plateform
stringclasses 1
value | repo_name
stringlengths 13
113
| name
stringlengths 3
74
| ext
stringclasses 1
value | path
stringlengths 12
229
| size
int64 23
843k
| source_encoding
stringclasses 9
values | md5
stringlengths 32
32
| text
stringlengths 23
843k
|
---|---|---|---|---|---|---|---|---|
github
|
lcnhappe/happe-master
|
elasticlin.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/elasticnet/elasticlin.m
| 4,041 |
utf_8
|
5795b073168c65dbf9bb3d3425e075f4
|
function [beta,beta0,conv] = elasticlin(X,Y,nu,lambda,options,beta,beta0)
% ELASTICLIN Elastic net implementation using coordinate descent,
% particularly suited for sparse high-dimensional models
% X: ninput x nsamples input data
% Y: 1 x nsamples output data
% nu: ninput x 1 weights for L1 penalty
% lambda: ninput x ninput matrix for ridge penalty
% options: struct with fields
% offset: 1 if offset is learned as well [1]
% maxiter: maximum number of iterations [10000]
% tol: tolerance (mean absolute difference) [1e-6]
% beta: initial beta
% beta0: initial offset
%
% beta: ninput x 1 vector
% beta0: offset ([] if not applicable)
% conv: convergence
% Parsing inputs
if nargin < 5,
options = make_options;
else
options = make_options(options);
end
[ninput,nsamples] = size(X);
if nargin < 4,
lambda = 1;
end
if isscalar(lambda),
lambda = lambda*ones(ninput,1);
end
if any(size(lambda) == 1),
lambda = diag(lambda);
end
if nargin < 3,
nu = 1;
end
if isscalar(nu),
nu = nu*ones(ninput,1);
end
% Incorporating bias
nvar = ninput;
if options.offset,
X = [X;ones(1,nsamples)]; % expand data with 1
nvar = nvar+1;
lambda(nvar,nvar) = 0;
nu = [nu;0]; % expand nu with 0 (no regularization)
end
% Initialization
if nargin < 7,
if options.offset,
beta0 = 0;
else
beta0 = [];
end
end
if nargin < 6,
beta = zeros(ninput,1);
end
beta = [beta;beta0];
% numerator of 10
T = X*Y'/nsamples; % nvar x 1
Q = zeros(nvar,nvar);
qcomputed = zeros(nvar,1);
activeset = (beta ~= 0);
qcomputed(activeset) = 1;
Q(:,activeset) = X*X(activeset,:)'/nsamples + lambda(:,activeset); % precompute Q for nonzero beta
Q(diag(activeset)) = 0;
V = T-Q*beta;
% denominator of Eq. 10 for nonstandardized variables
U = diag(lambda) + mean(X.^2,2);
% Start iterations
betaold = beta;
iter = 0;
activeset = true(1,nvar);
conv = zeros(options.maxiter,1);
while true
iter = iter+1;
% one full pass with all variables and then only with active set
activeset = coorddescent(activeset);
% fprintf('iteration %d of %d: %g\n',iter,options.maxiter,sum(abs(beta-betaold)));
conv(iter) = sum(abs(beta-betaold));
if iter==options.maxiter, break; end
if sum(abs(beta-betaold)) < options.tol
% again one full pass with all variables
oldset = activeset;
activeset = coorddescent(true(1,nvar));
% if the active sets are the same we are done; otherwise continue
if all(oldset==activeset)
break;
end
end
betaold = beta;
end
if iter == options.maxiter,
fprintf(' be careful: maximum number of iterations reached\n');
end
% Parsing output
if nargout > 1,
if options.offset,
beta0 = beta(end);
beta = beta(1:ninput);
else
beta0 = [];
end
end
function newset = coorddescent(activeset)
beta(activeset) = 0;
newset = false(1,nvar);
for i=fliplr(find(activeset)) % run over weights and start with offset, if applicable
if abs(V(i)) > nu(i) % active
if V(i) > 0,
beta(i) = (V(i) - nu(i))/U(i);
else
beta(i) = (V(i) + nu(i))/U(i);
end
newset(i) = 1;
end
if beta(i) ~= betaold(i)
if ~qcomputed(i), % compute Q when needed
Q(:,i) = X*X(i,:)'/nsamples + lambda(:,i);
Q(i,i) = 0;
qcomputed(i) = 1;
end
V = V - Q(:,i)*(beta(i) - betaold(i));
end
end
end
end
%%%%%%%%%%%%%%%%%%%%%%%
function options = make_options(options)
if nargin < 1,
options = struct;
end
fnames = {'offset','maxiter','tol'};
defaults = {1,1e4,1e-3};
for i=1:length(fnames),
if ~isfield(options,fnames{i}),
options = setfield(options,fnames{i},defaults{i});
end
end
end
|
github
|
lcnhappe/happe-master
|
elasticlog.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/elasticnet/elasticlog.m
| 4,626 |
utf_8
|
1515764e86a85226867842c7a06c18d0
|
function [beta,beta0,conv] = elasticlog(X,Y,nu,lambda,options,beta,beta0)
% ELASTICLOG Elastic net implementation using coordinate descent,
% particularly suited for sparse high-dimensional models
% X: ninput x nsamples input data
% Y: 1 x nsamples output data (class labels 0 and 1)
% nu: ninput x 1 weights for L1 penalty
% lambda: ninput x ninput matrix for ridge penalty
% options: struct with fields
% offset: 1 if offset is learned as well [1]
% maxiter: maximum number of iterations [10000]
% tol: tolerance (mean absolute difference) [1e-6]
% beta: initial beta
% beta0: initial offset
%
% beta: ninput x 1 vector
% beta0: offset ([] if not applicable)
% conv: convergence
% Parsing inputs
if nargin < 5,
options = make_options;
else
options = make_options(options);
end
[ninput,nsamples] = size(X);
if nargin < 4,
lambda = 1;
end
if isscalar(lambda),
lambda = lambda*ones(ninput,1);
end
if any(size(lambda) == 1),
lambda = diag(lambda);
end
if nargin < 3,
nu = 1;
end
if isscalar(nu),
nu = nu*ones(ninput,1);
end
% Incorporating bias
nvar = ninput;
if options.offset,
X = [X;ones(1,nsamples)]; % expand data with 1
nvar = nvar+1;
lambda(nvar,nvar) = 0;
nu = [nu;0]; % expand nu with 0 (no regularization)
end
% Initialization
if nargin < 7,
if options.offset,
beta0 = 0;
else
beta0 = [];
end
end
if nargin < 6
beta = zeros(ninput,1);
end
beta = [beta; beta0];
% start iterations
V = []; U = []; qcomputed = []; w = []; Q=[];
betaold = beta;
iter = 0;
activeset = true(1,nvar);
conv = zeros(options.maxiter,1);
while true
iter = iter+1;
quadratic_approximation();
% one full pass with all variables and then only with active set
activeset = coorddescent(activeset);
% fprintf('iteration %d of %d: %g\n',iter,options.maxiter,sum(abs(beta-betaold)));
conv(iter) = sum(abs(beta-betaold));
if iter==options.maxiter, break; end
if sum(abs(beta-betaold)) < options.tol
quadratic_approximation();
% again one full pass with all variables
oldset = activeset;
activeset = coorddescent(true(1,nvar));
% if the active sets are the same we are done; otherwise continue
if all(oldset==activeset)
break;
end
end
betaold = beta;
end
if iter == options.maxiter,
fprintf(' be careful: maximum number of iterations reached\n');
end
% Parsing output
if nargout > 1,
if options.offset,
beta0 = beta(end);
beta = beta(1:ninput);
else
beta0 = [];
end
end
function newset = coorddescent(activeset)
beta(activeset) = 0;
newset = false(1,nvar);
for i=fliplr(find(activeset)) % run over weights and start with offset, if applicable
if abs(V(i)) > nu(i) % active
if V(i) > 0,
beta(i) = (V(i) - nu(i))/U(i); % z - gamma of soft thresholding
else
beta(i) = (V(i) + nu(i))/U(i); % z + gamma of soft thresholding
end
newset(i) = 1;
end
if beta(i) ~= betaold(i)
if ~qcomputed(i), % compute Q when needed
Q(:,i) = bsxfun(@times,X,w)*X(i,:)' + lambda(:,i);
Q(i,i) = 0;
qcomputed(i) = 1;
end
V = V - Q(:,i)*(beta(i) - betaold(i));
end
end
end
function quadratic_approximation()
ptild=1./(1+exp(- beta'*X)); % ntrials x 1
ptild(ptild<1e-5)=0;
ptild(ptild>0.99999)=1;
w=ptild.*(1-ptild); % ntrials x 1 weights (17)
w(w==0)=1e-5;
Z = beta'*X + (Y-ptild)./w; % working response (16)
% numerator of 10; needs updating after each change in w
T = bsxfun(@times,X,w)*Z'; % nvar x 1
Q = zeros(nvar,nvar);
qcomputed = zeros(nvar,1);
activeset = (beta ~= 0)';
qcomputed(activeset) = 1;
Q(:,activeset) = bsxfun(@times,X,w)*X(activeset,:)' + lambda(:,activeset); % precompute Q for nonzero beta
Q(diag(activeset)) = 0;
V = T-Q*beta;
% denominator of Eq. 10; needs updating after each change in w
U = diag(lambda) + X.^2 * w';
end
end
%%%%%%%%%%%%%%%%%%%%%%%
function options = make_options(options)
if nargin < 1,
options = struct;
end
fnames = {'offset','maxiter','tol'};
defaults = {1,1e4,1e-6};
for i=1:length(fnames),
if ~isfield(options,fnames{i}),
options = setfield(options,fnames{i},defaults{i});
end
end
end
|
github
|
lcnhappe/happe-master
|
bls.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/pls/bls.m
| 2,337 |
utf_8
|
9e3e3838737dacc7f5b2dace9b43e117
|
function [A,B,Ypredict] = bls(X,Y,nhidden,algorithm)
% BLS Bottleneck least squares aka sparse orthogonalized least squares
%
% X: ninput x nsamples input data matrix
% Y: noutput x nsamples output data matrix
% nhidden: dimension of hidden
%
% A: noutput x nhidden weight matrix
% B: ninput x nhidden weight matrix
if nargin < 3,
nhidden = min(size(Y,1),2);
end
if nargin < 4,
algorithm = 'direct';
end
[ninput,nsamples] = size(X);
Sxx = symmetric(X*X')/nsamples;
switch algorithm
case 'direct'
Syx = Y*X'/nsamples;
Sxy = Syx';
Sxxinv = symmetric(pinv(Sxx));
S = symmetric(Syx * Sxxinv * Sxy);
opts.disp = 0;
opts.issym = 'true';
[A,D] = eigs(S,nhidden,'LM',opts); % first nhidden eigenvectors of S
B = Sxxinv*Sxy*A;
case 'indirect' % same result and subspace as 'direct', but then different linear combination
Syx = Y*X'/nsamples;
Sxy = Syx';
Sxxinv = symmetric(pinv(Sxx));
% initialize randomly
B = randn(ninput,nhidden);
iter = 0;
maxiter = 1000;
tol = nhidden*ninput*(1e-10);
Bold = B;
while iter < maxiter,
Z = B'*X;
% Szz = symmetric(Z*Z')/nsamples;
Szz = symmetric(B'*Sxx*B);
% Syz = symmetric(Y*Z')/nsamples;
Syz = Syx*B;
A = Syz*pinv(Szz);
B = Sxxinv*Sxy*A*pinv(A'*A);
if sumsqr(B - Bold) < tol,
fprintf('done after %d iterations!!\n',iter+1);
iter = maxiter;
else
iter = iter + 1;
Bold = B;
end
end
case 'sequential' % same result as 'direct', up to minus sign per hidden unit
R = Y;
noutput = size(Y,1);
A = zeros(noutput,nhidden);
B = zeros(ninput,nhidden);
for i=1:nhidden,
[A(:,i),B(:,i)] = bls(X,R,1,'direct');
if i < nhidden,
R = R - A(:,i)*B(:,i)'*X;
end
end
end
% transform to make latent variable orthonormal
Szz = symmetric(B'*Sxx*B);
Szzinv = symmetric(pinv(Szz));
sqrtSzzinv = chol(Szzinv(end:-1:1,end:-1:1)); % such that the rescaled B(i,:) is a linear combi of the unscaled B(1:i,:); this only makes sense for the direct method
sqrtSzzinv = sqrtSzzinv(end:-1:1,end:-1:1);
B = B*sqrtSzzinv;
A = A*pinv(sqrtSzzinv);
if nargout > 2,
Ypredict = A*B'*X;
end
function A = symmetric(A)
A = (A+A')/2;
|
github
|
lcnhappe/happe-master
|
elastic.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/pls/elastic.m
| 3,250 |
utf_8
|
0a8ac22b23d8aab23f77c4e49a0644a4
|
function [beta,beta0] = elastic(X,Y,nu,lambda,options,beta,beta0)
% ELASTICNET Elastic net implementation using coordinate descent,
% particularly suited for sparse high-dimensional models
% X: ninput x nsamples input data
% Y: 1 x nsamples output data
% nu: ninput x 1 weights for L1 penalty
% lambda: ninput x ninput matrix for ridge penalty
% options: struct with fields
% offset: 1 if offset is learned as well [1]
% maxiter: maximum number of iterations [10000]
% tol: tolerance (mean absolute difference) [1e-6]
% beta: initial beta
% beta0: initial offset
%
% beta: ninput x 1 vector
% beta0: offset ([] if not applicable)
% Parsing inputs
if nargin < 5,
options = make_options;
else
options = make_options(options);
end
[ninput,nsamples] = size(X);
if nargin < 4,
lambda = 1;
end
if isscalar(lambda),
lambda = lambda*ones(ninput,1);
end
if any(size(lambda) == 1),
lambda = diag(lambda);
end
if nargin < 3,
nu = 1;
end
if isscalar(nu),
nu = nu*ones(ninput,1);
end
% Incorporating bias
nvar = ninput;
if options.offset,
X = [X;ones(1,nsamples)]; % expand data with 1
nvar = nvar+1;
lambda(nvar,nvar) = 0;
nu = [nu;0]; % expand nu with 0 (no regularization)
end
% Initialization
T = X*Y'/nsamples; % nvar x 1
U = diag(lambda) + mean(X.^2,2);
if nargin < 7,
if options.offset,
beta0 = 0;
else
beta0 = [];
end
end
if nargin < 6,
beta = zeros(ninput,1);
end
beta = [beta;beta0];
Q = zeros(nvar,nvar);
qcomputed = zeros(nvar,1);
active = find(beta);
qcomputed(active) = 1;
Q(:,active) = X*X(active,:)'/nsamples + lambda(:,active); % precompute Q for nonzero beta
for i=1:length(active),
Q(active(i),active(i)) = 0; % zero on diagonal
end
V = T-Q*beta;
% Start iterations
betaold = beta;
iter = 0;
done = 0;
while ~done,
iter = iter+1;
for i=nvar:-1:1, % run over weights and start with offset, if applicable
if abs(V(i)) <= nu(i), % inactive
beta(i) = 0;
else % active
if V(i) > 0,
beta(i) = (V(i) - nu(i))/U(i);
else
beta(i) = (V(i) + nu(i))/U(i);
end
end
if beta(i) ~= betaold(i)
if ~qcomputed(i), % compute Q when needed
Q(:,i) = X*X(i,:)'/nsamples + lambda(:,i);
Q(i,i) = 0;
qcomputed(i) = 1;
end
V = V - Q(:,i)*(beta(i) - betaold(i));
end
end
done = (iter == options.maxiter | sum(abs(beta-betaold)) < options.tol);
betaold = beta;
end
if iter == options.maxiter,
fprintf(' be careful: maximum number of iterations reached\n');
end
% Parsing output
if nargout > 1,
if options.offset,
beta0 = beta(end);
beta = beta(1:ninput);
else
beta0 = [];
end
end
%%%%%%%%%%%%%%%%%%%%%%%
function options = make_options(options)
if nargin < 1,
options = struct;
end
fnames = {'offset','maxiter','tol'};
defaults = {1,1e4,1e-3};
for i=1:length(fnames),
if ~isfield(options,fnames{i}),
options = setfield(options,fnames{i},defaults{i});
end
end
|
github
|
lcnhappe/happe-master
|
opls.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/pls/opls.m
| 2,338 |
utf_8
|
413ad9957ca8651409ebf4479f96f580
|
function [A,B,Ypredict] = opls(X,Y,nhidden,algorithm)
% BLS Bottleneck least squares aka sparse orthogonalized least squares
%
% X: ninput x nsamples input data matrix
% Y: noutput x nsamples output data matrix
% nhidden: dimension of hidden
%
% A: noutput x nhidden weight matrix
% B: ninput x nhidden weight matrix
if nargin < 3,
nhidden = min(size(Y,1),2);
end
if nargin < 4,
algorithm = 'direct';
end
[ninput,nsamples] = size(X);
Sxx = symmetric(X*X')/nsamples;
switch algorithm
case 'direct'
Syx = Y*X'/nsamples;
Sxy = Syx';
Sxxinv = symmetric(pinv(Sxx));
S = symmetric(Syx * Sxxinv * Sxy);
opts.disp = 0;
opts.issym = 'true';
[A,D] = eigs(S,nhidden,'LM',opts); % first nhidden eigenvectors of S
B = Sxxinv*Sxy*A;
case 'indirect' % same result and subspace as 'direct', but then different linear combination
Syx = Y*X'/nsamples;
Sxy = Syx';
Sxxinv = symmetric(pinv(Sxx));
% initialize randomly
B = randn(ninput,nhidden);
iter = 0;
maxiter = 1000;
tol = nhidden*ninput*(1e-10);
Bold = B;
while iter < maxiter,
Z = B'*X;
% Szz = symmetric(Z*Z')/nsamples;
Szz = symmetric(B'*Sxx*B);
% Syz = symmetric(Y*Z')/nsamples;
Syz = Syx*B;
A = Syz*pinv(Szz);
B = Sxxinv*Sxy*A*pinv(A'*A);
if sumsqr(B - Bold) < tol,
fprintf('done after %d iterations!!\n',iter+1);
iter = maxiter;
else
iter = iter + 1;
Bold = B;
end
end
case 'sequential' % same result as 'direct', up to minus sign per hidden unit
R = Y;
noutput = size(Y,1);
A = zeros(noutput,nhidden);
B = zeros(ninput,nhidden);
for i=1:nhidden,
[A(:,i),B(:,i)] = bls(X,R,1,'direct');
if i < nhidden,
R = R - A(:,i)*B(:,i)'*X;
end
end
end
% transform to make latent variable orthonormal
Szz = symmetric(B'*Sxx*B);
Szzinv = symmetric(pinv(Szz));
sqrtSzzinv = chol(Szzinv(end:-1:1,end:-1:1)); % such that the rescaled B(i,:) is a linear combi of the unscaled B(1:i,:); this only makes sense for the direct method
sqrtSzzinv = sqrtSzzinv(end:-1:1,end:-1:1);
B = B*sqrtSzzinv;
A = A*pinv(sqrtSzzinv);
if nargout > 2,
Ypredict = A*B'*X;
end
function A = symmetric(A)
A = (A+A')/2;
|
github
|
lcnhappe/happe-master
|
dvComp.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/svm/dvComp.m
| 1,449 |
utf_8
|
935cb0c3970f3d1dbfa2c2a9c2d9d6c6
|
function [dv]=dvComp(Xtst,Xtrn,kernel,alphab,varargin)
% Compute the decision values for a kernel classifier
%
% [dv]=dvComp(Xtst,kernel,Xtrn,alphab,...)
%
% dv = K(Xtrn,Xtst)*alphab(1:end-1)+alphab(end)
%
% Inputs:
% Xtst -- [N x d] test set
% Xtrn -- [Ntrn x d] training set
% kernel -- the kernel function *as used in training*,
% will be passed to compKernel
% alphab -- [Ntrn+1 x 1] set of decision function parameters [alpha;b]
% ... -- additional parameters to pass to compKernel
%
% Outputs:
% dv -- [N x 1] decision values
%
% Copyright 2006- by Jason D.R. Farquhar ([email protected])
% Permission is granted for anyone to copy, use, or modify this
% software and accompanying documents for any uncommercial
% purposes, provided this copyright notice is retained, and note is
% made of any changes that have been made. This software and
% documents are distributed without any warranty, express or
% implied
K = compKernel(Xtst,Xtrn,kernel,varargin{:});
dv = K*alphab(1:end-1)+alphab(end);
return;
%----------------------------------------------------------
function testCase()
[X,Y]=mkMultiClassTst([-1 0; 1 0; .2 .5],[400 400 50],[.3 .3; .3 .3; .2 .2],[],[-1 1 1]);[dim,N]=size(X);
trnInd = randn(N,1)>0;
K=compKernel(X(:,trnInd)',[],'nlinear');
[alphab,f,J]=rkls(K,Y(trnInd),1);
dv = dvComp(X(:,trnInd)',[],'nlinear',alphab);
max(abs(dv-f))
dv = dvComp(X(:,~trnInd)',X(:,trnInd)','nlinear',alphab);
|
github
|
lcnhappe/happe-master
|
l2svm_cg.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/svm/l2svm_cg.m
| 15,343 |
utf_8
|
9d8b76e63b49da516da17d4dfcc04574
|
function [wb,f,J,obj]=l2svm_cg(K,Y,C,varargin);
% [alphab,f,J]=l2svm(K,Y,C,varargin)
% Quadratic Loss Support Vector machine using a pre-conditioned conjugate
% gradient solver so extends to large input kernels.
%
% J = C(1) w' K w + sum_i max(0 , 1 - y_i ( w'*K_i + b ) ).^2
%
% Inputs:
% K - [NxN] kernel matrix
% Y - [Nx1] matrix of -1/0/+1 labels, (0 label pts are implicitly ignored)
% C - the regularisation parameter, roughly max allowed length of the weight vector
% good default is: .1*var(data) = .1*(mean(diag(K))-mean(K(:))))
%
% Outputs:
% alphab - [(N+1)x1] matrix of the kernel weights and the bias [alpha;b]
% f - [Nx1] vector of decision values
% J - the final objective value
% obj - [J Ed Ew]
% p - [Nx1] vector of conditional probabilities, Pr(y|x)
%
% Options:
% alphab - [(N+1)x1] initial guess at the kernel parameters, [alpha;b] ([])
% ridge - [float] ridge to add to the kernel to improve convergence.
% ridge<0 -- absolute ridge value
% ridge>0 -- size relative to the mean kernel eigenvalue
% maxEval - [int] max number for function evaluations (N*5)
% maxIter - [int] max number of CG steps to do (inf)
% maxLineSrch - [int] max number of line search iterations to perform (50)
% objTol0 - [float] relative objective gradient tolerance (1e-5)
% objTol - [float] absolute objective gradient tolerance (0)
% tol0 - [float] relative gradient tolerance, w.r.t. initial value (0)
% lstol0 - [float] line-search relative gradient tolerance, w.r.t. initial value (1e-2)
% tol - [float] absolute gradient tolerance (0)
% verb - [int] verbosity (0)
% step - initial step size guess (1)
% wght - point weights [Nx1] vector of label accuracy probabilities ([])
% [2x1] for per class weightings
% [1x1] relative weight of the positive class
% nobias - [bool] flag we don't want the bias computed (false)
% Copyright 2006- by Jason D.R. Farquhar ([email protected])
% Permission is granted for anyone to copy, use, or modify this
% software and accompanying documents for any uncommercial
% purposes, provided this copyright notice is retained, and note is
% made of any changes that have been made. This software and
% documents are distributed without any warranty, express or
% implied
if ( nargin < 3 ) C(1)=0; end;
opts=struct('alphab',[],'nobias',0,'dim',[],...
'maxIter',inf,'maxEval',[],'tol',0,'tol0',0,'lstol0',1e-4,'objTol',0,'objTol0',1e-5,...
'verb',0,'step',0,'wght',[],'X',[],'ridge',0,'maxLineSrch',50,...
'maxStep',3,'minStep',5e-2,'weightDecay',0,'marate',.95,'bPC',[],'incThresh',.75);
[opts,varargin]=parseOpts(opts,varargin{:});
opts.ridge=opts.ridge(:);
if ( isempty(opts.maxEval) ) opts.maxEval=5*sum(Y(:)~=0); end
% Ensure all inputs have a consistent precision
if(isa(K,'double') & isa(Y,'single') ) Y=double(Y); end;
if(isa(K,'single')) eps=1e-7; else eps=1e-16; end;
opts.tol=max(opts.tol,eps); % gradient magnitude tolerence
[dim,N]=size(K); Y=Y(:); % ensure Y is col vector
% check for degenerate inputs
if ( all(Y>=0) || all(Y<=0) )
warning('Degnerate inputs, 1 class problem');
end
if ( opts.ridge>0 ) % make the ridge relative to the max eigen-value
opts.ridge = opts.ridge*median(abs(diag(K)));
ridge = opts.ridge;
else % negative value means absolute ridge
ridge = abs(opts.ridge);
end
% generate an initial seed solution if needed
wb=opts.alphab; % N.B. set the initial solution
if ( isempty(wb) )
wb=zeros(N+1,1,class(K));
% prototype classifier seed
wb(Y>0)=.5./sum(Y>0); wb(Y<0)=-.5./sum(Y<0); % vector between pos/neg class centers
wK = wb(1:end-1)'*K;
wb(end) = -(wK(Y<0)*abs(wb(Y<0))*sum(Y>0) + wK(Y>0)*abs(wb(Y>0))*sum(Y<0))./sum(Y~=0); % weighted mean
% find least squares optimal scaling and bias
sb = pinv([C(1)*wK*wb(1:end-1)+wK*wK' sum(wK); sum(wK) sum(Y~=0)])*[wK*Y; sum(Y)];
wb(1:end-1)=wb(1:end-1)*sb(1); wb(end)=sb(2);
end
if ( opts.nobias ) wb(end)=0; end;
% check if it's more efficient to sub-set the kernel, because of lots of ignored points
oK=K; oY=Y;
incIdx=(Y(:)~=0);
if ( sum(incIdx)./numel(Y) < opts.incThresh ) % if enough ignored to be worth it
if ( sum(incIdx)==0 ) error('Empty training set!'); end;
K=K(incIdx,incIdx); Y=Y(incIdx); wb=wb([incIdx; true]);
end
wght=1; wghtY=Y;
if ( ~isempty(opts.wght) ) % point weighting -- only needed in wghtY
if ( numel(opts.wght)==1 ) % weight ratio between classes
wght=zeros(size(Y));
wght(Y<0)=1./sum(Y<0); wght(Y>0)=(1./sum(Y>0))*opts.wght;
wght = wght*sum(abs(Y))./sum(abs(wght)); % ensure total weighting is unchanged
elseif ( numel(opts.wght)==2 ) % per class weights
wght=zeros(size(Y));
wght(Y<0)=1*opts.wght(1); wght(Y>0)=1*opts.wght(2);
elseif ( numel(opts.wght)==N )
else
error('Weight must be 2 or N elements long');
end
wghtY=wght.*Y;
else
wght=1;
end
% Normalise the kernel to prevent rounding issues causing convergence problems
% = average kernel eigen-value + regularisation const = ave row norm
diagK= K(1:size(K,1)+1:end);
if ( sum(incIdx)<size(K,1) ) diagK=diagK(incIdx); end;
muEig=median(diagK); % approx hessian scaling, for numerical precision
% adjust alpha and regul-constant to leave solution unchanged
wb(1:end-1)=wb(1:end-1)*muEig;
C(1) = C(1)./muEig;
% set the bias (i.e. b) pre-conditioner
bPC=opts.bPC;
if ( isempty(bPC) ) % bias pre-condn with the diagonal of the hessian
bPC = sqrt(abs(muEig + 2*C(1))./muEig); % N.B. use sqrt for safety?
bPC = 1./bPC;
%fprintf('bPC=%g\n',bPC);
end
% include ridge and re-scale by muEig for numerical stability
wK = (wb(1:end-1)'*K + ridge'.*wb(1:end-1)')./muEig; % include ridge
err = 1-Y.*(wK'+wb(end)); svs=err>0 & Y~=0;
Yerr = wghtY.*err; % weighted error
% pre-condinationed gradient,
% K^-1*dJdw = K^-1(2 C(1)Kw - 2 K I_sv(Y-f)) = 2*(C(1)w - Isv (Y-f) )
MdJ = [(2*C(1)*wb(1:end-1) - 2*(Yerr.*svs)); ...
-2*sum(Yerr(svs))./bPC];
dJ = [(K*MdJ(1:end-1)+ridge*MdJ(1:end-1))./muEig; ...
-2*sum(Yerr(svs))];
if ( opts.nobias ) MdJ(end)=0; dJ(end)=0; end;
Mr =-MdJ;
d = Mr;
dtdJ =-(d'*dJ);
r2 = dtdJ;
r02 = r2;
Ed = (wght.*err(svs))'*err(svs);
Ew = wK*wb(1:end-1);
J = Ed + C(1)*Ew; % SVM objective
% Set the initial line-search step size
step=opts.step;
if( step<=0 ) step=min(sqrt(abs(J/max(dtdJ,eps))),1); end %init step assuming opt is at 0
step=abs(step); tstep=step;
neval=1; lend='\r';
if(opts.verb>0) % debug code
if ( opts.verb>1 ) lend='\n'; else fprintf('\n'); end;
fprintf(['%3d) %3d x=[%5f,%5f,.] J=%5f (%5f+%5f) |dJ|=%8g\n'],0,neval,wb(1),wb(2),J,Ew./muEig,Ed,r2);
end
% pre-cond non-lin CG iteration
J0=J; madJ=abs(J); % init-grad est is init val
wb0=wb; Kd=zeros(size(wb),class(wb)); dJ=zeros(size(wb),class(wb));
for iter=1:min(opts.maxIter,2e6); % stop some matlab versions complaining about index too big
oJ= J; oMr = Mr; or2=r2; owb=wb; % record info about prev result we need
%---------------------------------------------------------------------
% Secant method for the root search.
if ( opts.verb > 2 )
fprintf('.%d %g=%g @ %g (%g+%g)\n',0,0,dtdJ,J,Ed,Ew./muEig);
if ( opts.verb>3 )
hold off;plot(0,dtdJ,'r*');hold on;text(0,double(dtdJ),num2str(0));
grid on;
end
end;
ostep=inf;step=tstep;%max(tstep,abs(1e-6/dtdJ)); % prev step size is first guess!
odtdJ=dtdJ; % one step before is same as current
wK0 = wK;
dK = (d(1:end-1)'*K+d(1:end-1)'.*ridge')./muEig; % N.B. v'*M is 50% faster than M*v'!!!
db = d(end);
dKw = dK*wb(1:end-1); dKd=dK*d(1:end-1);
% wb = wb + step*d;
% Kd(1:end-1) = (d(1:end-1)'*K+d(1:end-1)'.*ridge')./muEig; % N.B. v'*M is 50% faster than M*v'!!!
% Kd(end) = bPC*d(end);
%Kd = [;bPC*d(end)];%cache, so don't comp dJ
dtdJ0=abs(dtdJ); % initial gradient, for Wolfe 2 convergence test
for j=1:opts.maxLineSrch;
neval=neval+1;
oodtdJ=odtdJ; odtdJ=dtdJ; % prev and 1 before grad values
% Eval the gradient at this point. N.B. only gradient needed for secant
%wK = (wb(1:end-1)'*K + ridge'.*wb(1:end-1)')./muEig; % include ridge
wK = wK0 + tstep*dK;
err = 1-Y.*(wK'+wb(end)+tstep*d(end)); svs=err>0 & Y~=0;
Yerr = wghtY.*err;
% MdJ = [(2*C(1)*wb(1:end-1) - 2*(Yerr.*svs)); ...
% -2*sum(Yerr(svs))./bPC];
% if ( opts.nobias ) MdJ(end)=0; end;
% dtdJ =-Kd'*MdJ; % gradient along the line @ new position
dtdJ =-(2*C(1)*(dKw+tstep*dKd) - 2*dK*(Yerr.*svs) + -2*db*sum(Yerr(svs))); % gradient along the line @ new position
if ( opts.verb > 2 )
Ed = (wght.*err(svs))*err(svs)';
Ew = wK*wb(1:end-1);
J = Ed + C(1)*Ew; % SVM objective
fprintf('.%d %g=%g @ %g (%g+%g)\n',j,tstep,dtdJ,J,Ed,Ew./muEig);
if ( opts.verb > 3 )
plot(tstep,dtdJ,'*'); text(double(tstep),double(dtdJ),num2str(j));
end
end;
% convergence test, and numerical res test
if(iter>1|j>3) % Ensure we do decent line search for 1st step size!
if ( abs(dtdJ) < opts.lstol0*abs(dtdJ0) | ... % Wolfe 2, gradient enough smaller
abs(dtdJ*step) <= opts.tol ) % numerical resolution
break;
end
end
% now compute the new step size
% backeting check, so it always decreases
if ( oodtdJ*odtdJ < 0 & odtdJ*dtdJ > 0 ... % oodtdJ still brackets
& abs(step*dtdJ) > abs(odtdJ-dtdJ)*(abs(ostep+step)) ) % would jump outside
step = ostep + step; % make as if we jumped here directly.
% but prev points gradient, this is necessary stop very steep orginal gradient preventing decent step sizes
odtdJ = -sign(odtdJ)*sqrt(abs(odtdJ))*sqrt(abs(oodtdJ)); % geometric mean
end
ostep = step;
% *RELATIVE* secant step size
ddtdJ = odtdJ-dtdJ;
if ( ddtdJ~=0 ) nstep = dtdJ/ddtdJ; end; % secant step size, guard div by 0
nstep = sign(nstep)*max(opts.minStep,min(abs(nstep),opts.maxStep)); % bound growth/min-step size
step = step * nstep ; % absolute step
tstep = tstep + step; % total step size
% % move to the new point
% wb = wb + step*d ;
end
if ( opts.verb > 2 ) fprintf('\n'); end;
% update the solution with this step
wb = wb + tstep*d;
% compute the other bits needed for CG iteration
MdJ = [(2*C(1)*wb(1:end-1) - 2*(Yerr.*svs)); ...
-2*sum(Yerr(svs))./bPC];
dJ(1:end-1) = (MdJ(1:end-1)'*K+MdJ(1:end-1)'.*ridge)./muEig;
dJ(end) = bPC*MdJ(end);
% dJ = [(K*MdJ(1:end-1)+ridge.*MdJ(1:end-1))./muEig; ...
% bPC*MdJ(end)];
if ( opts.nobias ) dJ(end)=0; end;
Mr =-MdJ;
r2 =abs(Mr'*dJ);
% compute the function evaluation
Ed = (wght.*err(svs))'*err(svs);
Ew = wK*wb(1:end-1);
J = Ed + C(1)*Ew; % SVM objective
if(opts.verb>0) % debug code
fprintf(['%3d) %3d x=[%8f,%8f,.] J=%5f (%5f+%5f) |dJ|=%8g' lend],...
iter,neval,wb(1),wb(2),J,Ew./muEig,Ed,r2);
end
if ( J > oJ*(1+1e-3) || isnan(J) ) % check for stuckness
if ( opts.verb>=1 ) warning('Line-search Non-reduction - aborted'); end;
J=oJ; wb=owb; break;
end;
%------------------------------------------------
% convergence test
if ( iter==1 ) madJ=abs(oJ-J); dJ0=max(abs(madJ),eps); r02=r2;
elseif( iter<5 ) dJ0=max(dJ0,abs(oJ-J)); r02=max(r02,r2); % conv if smaller than best single step
end
madJ=madJ*(1-opts.marate)+abs(oJ-J)*(opts.marate);%move-ave objective grad est
if ( r2<=opts.tol || ... % small gradient + numerical precision
r2< r02*opts.tol0 || ... % Wolfe condn 2, gradient enough smaller
neval > opts.maxEval || ... % abs(odtdJ-dtdJ) < eps || ... % numerical resolution
madJ <= opts.objTol || madJ < opts.objTol0*dJ0 ) % objective function change
break;
end;
%------------------------------------------------
% conjugate direction selection
delta = max((Mr-oMr)'*(-dJ)/or2,0); % Polak-Ribier
%delta = max(r2/or2,0); % Fletcher-Reeves
d = Mr+delta*d; % conj grad direction
dtdJ =-d'*dJ; % new search dir grad.
if( dtdJ <= 0 ) % non-descent dir switch to steepest
if ( opts.verb >= 2 ) fprintf('non-descent dir\n'); end;
d=Mr; dtdJ=-d'*dJ;
end;
if ( opts.weightDecay > 0 ) % decay term to 0 non-svs faster
pts=~svs' & wb(1:end-1).*d(1:end-1) < 0;
%wb(pts)=wb(pts)*opts.weightDecay;
d(pts) =d(pts)*opts.weightDecay;
end
end;
if ( opts.verb >= 0 )
fprintf(['%3d) %3d x=[%8f,%8f,.] J=%5f (%5f+%5f) |dJ|=%8g\n'],...
iter,neval,wb(1),wb(2),J,Ew./muEig,Ed,r2);
end
if ( J > J0*(1+1e-4) || isnan(J) )
if ( opts.verb>=0 ) warning('Non-reduction'); end;
wb=wb0;
end;
% fix the stabilising K normalisation
wb(1:end-1) = wb(1:end-1)./muEig;
% compute final decision values.
if ( numel(Y)~=numel(incIdx) ) % map back to the full kernel space, if needed
nwb=zeros(size(oK,1)+1,1); nwb(incIdx)=wb(1:end-1); nwb(end)=wb(end); wb=nwb;
K=oK; Y=oY;
end
f = wb(1:end-1)'*K + wb(end); f = reshape(f,size(Y));
obj = [J Ew./muEig Ed];
return;
%-----------------------------------------------------------------------
function [opts,varargin]=parseOpts(opts,varargin)
% refined and simplified option parser with structure flatten
i=1;
while i<=numel(varargin);
if ( iscell(varargin{i}) ) % flatten cells
varargin={varargin{1:i} varargin{i}{:} varargin{i+1:end}};
elseif ( isstruct(varargin{i}) )% flatten structures
cellver=[fieldnames(varargin{i})'; struct2cell(varargin{i})'];
varargin={varargin{1:i} cellver{:} varargin{i+1:end} };
elseif( isfield(opts,varargin{i}) ) % assign fields
opts.(varargin{i})=varargin{i+1}; i=i+1;
else
error('Unrecognised option');
end
i=i+1;
end
return;
%-----------------------------------------------------------------------------
function []=testCase()
%TESTCASE 1) hinge vs. logistic (unregularised)
[X,Y]=mkMultiClassTst([-1 0; 1 0; .2 .5],[400 400 50],[.3 .3; .3 .3; .2 .2],[],[-1 1 1]);
K=X'*X; % Simple linear kernel
N=size(X,2);
fInds=gennFold(Y,10,'perm',1); trnInd=any(fInds(:,1:9),2); tstInd=fInds(:,10);
[alphab,J]=l2svm_cg(K(trnInd,trnInd),Y(trnInd),1,'verb',2);
dv=K(tstInd,trnInd)*alphab(1:end-1)+alphab(end);
dv2conf(Y(tstInd),dv)
% for linear kernel
alpha=zeros(N,1);alpha(find(trnInd))=alphab(1:end-1); % equiv alpha
w=X(:,trnInd)*alphab(1:end-1); b=alphab(end); plotLinDecisFn(X,Y,w,b,alpha);
% check the primal dual gap!
w=alphab(1:end-1); b=alphab(end);
Jd = C(1)*(2*sum(w.*Y(trnInd)) - C(1)*w'*w - w'*K(trnInd,trnInd)*w);
% imbalance test
[X,Y]=mkMultiClassTst([-1 0; 1 0],[400 400],[.5 .5; .5 .5],[],[-1 1]);
[X,Y]=mkMultiClassTst([-1 0; 1 0],[400 40],[.5 .5; .5 .5],[],[-1 1]);
[alphab,J]=l2svm_cg(K(trnInd,trnInd),Y(trnInd),1,'verb',2,'wght',[.1 1]);
|
github
|
lcnhappe/happe-master
|
csp.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/svm/csp.m
| 6,469 |
utf_8
|
17b337166f090c24d162dfc7e010f835
|
function [sf,d,Sigmai,Sigmac,SigmaAll]=csp(X,Y,dim,cent,ridge,singThresh)
% Generate spatial filters using CSP
%
% [sf,d,Sigmai,Sigmac,SigmaAll]=csp(X,Y,[dim]);
% N.B. if inputs are singular then d will contain 0 eigenvalues & sf==0
% Inputs:
% X -- n-d data matrix, e.g. [nCh x nSamp x nTrials] data set, OR
% [nCh x nCh x nTrials] set of *trial* covariance matrices, OR
% [nCh x nCh x nClass ] set of *class* covariance matrices
% Y -- [nTrials x 1] set of trial labels, with nClass unique labels, OR
% [nTrials x nClass] set of +/-1 (&0) trial lables per class, OR
% [nClass x 1] set of class labels when X=[nCh x nCh x nClass] (1:size(X,dim))
% N.B. in all cases a label of 0 indicates ignored trial
% dim -- [1 x 2] dimension of X which contains the trials, and
% (optionally) the the one which contains the channels. If
% channel dim not given the next available dim is used. ([-1 1])
% cent -- [bool] center the data (0)
% ridge -- [float] size of ridge (as fraction of mean eigenvalue) to add for numerical stability (1e-7)
% singThresh -- [float] threshold to detect singular values in inputs (1e-3)
% Outputs:
% sf -- [nCh x nCh x nClass] sets of 1-vs-rest spatial *filters*
% sorted in order of increasing eigenvalue.
% N.B. sf is normalised such that: mean_i sf'*cov(X_i)*sf = I
% N.B. to obtain spatial *patterns* just use, sp = Sigma*sf ;
% d -- [nCh x nClass] spatial filter eigen values
% Sigmai-- [nCh x nCh x nTrials] set of *trial* covariance matrices
% Sigmac-- [nCh x nCh x nClass] set of *class* covariance matrices
% SigmaAll -- [nCh x nCh] all (non excluded) data covariance matrix
%
if ( nargin < 3 || isempty(dim) ) dim=[-1 1]; end;
if ( numel(dim) < 2 ) if ( dim(1)==1 ) dim(2)=2; else dim(2)=1; end; end
dim(dim<0)=ndims(X)+dim(dim<0)+1; % convert negative dims
if ( nargin < 4 || isempty(cent) ) cent=0; end;
if ( nargin < 5 || isempty(ridge) )
if ( isequal(class(X),'single') ) ridge=1e-7; else ridge=0; end;
end;
if ( nargin < 6 || isempty(singThresh) ) singThresh=1e-3; end
nCh = size(X,dim(2)); N=size(X,dim(1)); nSamp=prod(size(X))./nCh./N;
% compute the per-trial covariances
if ( ~isequal(dim,[3 1]) || ndims(X)>3 || nCh ~= size(X,2) )
idx1=-(1:ndims(X)); idx2=-(1:ndims(X)); % sum out everything but ch, trials
idx1(dim(1))=3; idx2(dim(1))=3; % linear over trial dimension
idx1(dim(2))=1; idx2(dim(2))=2; % Outer product over ch dimension
Sigmai = tprod(X,idx1,[],idx2,'n');
if ( cent ) % center the co-variances, N.B. tprod to comp means for mem
error('Unsupported -- numerically unsound, center before instead');
% sizeX=size(X); muSz=sizeX; muSz(dim)=1; mu=ones(muSz,class(X));
% idx2(dim)=0; mu=tprod(X,idx1,mu,idx2); % nCh x 1 x nTr
% % subtract the means
% Sigmai = Sigmai - tprod(mu,[1 0 3],[],[2 0 3])/prod(muSz);
end
% Fallback code
% if(dim(1)==3) for i=1:size(X,3); Sigmai(:,:,i)=X(:,:,i)*X(:,:,i)'; end
% elseif(dim(1)==1) for i=1:size(X,1); Sigmai(:,:,i)=shiftdim(X(i,:,:))*shiftdim(X(i,:,:))'; end
% end
else
Sigmai = X;
end
% N.B. Sigmai *must* be [nCh x nCh x N]
if ( ndims(Y)==2 && min(size(Y))==1 && ~(all(Y(:)==-1 | Y(:)==0 | Y(:)==1)) )
oY=Y;
Y=lab2ind(Y,[],[],[],0);
end;
nClass=size(Y,2);
if ( nClass==2 ) nClass=1; end; % only 1 for binary problems
allY0 = all(Y==0,2); % trials which have label 0 in all sub-prob
SigmaAll = sum(double(Sigmai(:,:,~allY0)),3); % sum all non-0 labeled trials
sf = zeros([nCh,nCh,nClass],class(X)); d=zeros(nCh,nClass,class(X));
for c=1:nClass; % generate sf's for each sub-problem
Sigmac(:,:,c) = sum(double(Sigmai(:,:,Y(:,c)>0)),3); % +class covariance
if ( isequal(Y(:,c)==0,allY0) ) % rest covariance, i.e. excludes 0 class
Sigma=SigmaAll; % can use sum of everything
else
Sigma=sum(Sigmai(:,:,Y(:,c)~=0),3); % rest for this class
end
Sigma=double(Sigma);
% solve the generalised eigenvalue problem,
if ( ridge>0 ) % Add ridge if wanted to help with numeric issues in the inv
Sigmac(:,:,c)=Sigmac(:,:,c)+eye(size(Sigma))*ridge*mean(diag(Sigmac(:,:,c)));
Sigma =Sigma +eye(size(Sigma))*ridge*mean(diag(Sigma));
end
% N.B. use double to avoid rounding issues with the inv(Sigma) bit
[W D]=eig(Sigmac(:,:,c),Sigma);D=diag(D);
[dc,di]=sort(D,'descend'); W=W(:,di); % order in decreasing eigenvalue
% Check for and correct for singular inputs
% singular if eigval out of range
nf = sum(W.*(Sigma*W),1)'; % eig-value for this direction in full cov
si= dc>1-singThresh | dc<0+singThresh | imag(dc)~=0 | isnan(dc) | nf<1e-4*sum(abs(nf));
if ( sum(si)>0 ) % remove the singular eigen values & effect on other sf's
% Identify singular directions which leak redundant information into
% the other eigenvectors, i.e. are mapped to 0 by both Sigmac and Sigma
% N.B. if the numerics are OK this is probably uncessary!
Na = sum((double(Sigmac(:,:,c))*W(:,si)).^2)./sum(W(:,si).^2);
ssi=find(si);ssi=ssi(abs(Na)<singThresh & imag(Na)==0);%ssi=dc>1-singThresh|dc<0+singThresh;
if ( ~isempty(ssi) ) % remove anything in this dir in other eigenvectors
% Compute the projection of the rest onto the singular direction(s)
Pssi = repop(W(:,ssi)'*W(:,~si),'./',sum(W(:,ssi).*W(:,ssi),1)');
W(:,~si)= W(:,~si) - W(:,ssi)*Pssi; %remove this singular contribution
end
W=W(:,~si); dc=dc(~si); nf=nf(~si);% discard singular components
end
%Normalise, so that diag(W'*Sigma*W)=N, i.e.mean_i W'*(X_i*X_i')*W/nSamp = 1
% i.e. so that the resulting features have unit variance (and are
% approx white?)
W = repop(W,'*',nf'.^-.5)*sqrt(sum(Y(:,c)~=0)*nSamp);
% Save the normalised filters & eigenvalues
sf(:,1:size(W,2),c)= W;
d(1:size(W,2),c) = dc;
end
% Compute last class covariance if wanted
if ( nClass==1 & nargout>3 ) Sigmac(:,:,2)=sum(double(Sigmai(:,:,Y(:,1)<0)),3);end;
return;
%-----------------------------------------------------------------------------
function []=testCase()
nCh = 64; nSamp = 100; N=300;
X=randn(nCh,nSamp,N);
Y=sign(randn(N,1));
[sf,d,Sigmai,Sigmac]=jf_csp(X,Y);
[sf,d,Sigmai,Sigmac]=jf_csp(X,Y,1); % with data centering
[sf2,d2]=jf_csp(Sigmac,[-1 1]);
[sf3,d3]=csp(Sigmac);
mimage(sf,sf2,'diff',1,'clim','limits')
|
github
|
lcnhappe/happe-master
|
whiten.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/svm/whiten.m
| 7,608 |
utf_8
|
37ffeab940403c215817ba5e7251cfd4
|
function [W,D,wX,U,mu,Sigma,alpha]=whiten(X,dim,alpha,centerp,stdp,symp,linMapMx,tol,unitCov,order)
% whiten the input data
%
% [W,D,wX,U,mu,Sigma,alpha]=whiten(X,dim[,alpha,center,stdp,symp,linMapMx,tol,unitCov,order])
%
% Inputs:
% X - n-d input data set
% dim - dim(1)=dimension to whiten
% dim(2:end) whiten per each entry in these dim
% alpha - [float] regularisation parameter: (1)
% \Sigma' = (alpha)*\Sigma + (1-alpha) I * sum(\Sigma)
% 0=no-whitening, 1=normal-whitening, 'opt'= Optimal-Shrinkage est
% alpha<0 -> 0=no-whitening, -1=normal-whitening, regularise with alpha'th entry of the spe
% centerp- [bool] flag if we should center the data before whitening (1)
% stdp - [bool] flag if we should standardize input for numerical stability before whitening (0)
% symp - [bool] generate the symetric whitening transform (0)
% linMapMx - [size(X,dim(2:end)) x size(X,dim(2:end))] linear mapping over non-acc dim
% use to smooth over these dimensions ([])
% tol - [float] relative tolerance w.r.t. largest eigenvalue used to
% reject eigen-values as being effectively 0. (1e-6)
% unitCov - [bool] make the covariance have unit norm for numerical accuracy (1)
% order - [float] order of inverse to use (-.5)
% Outputs:
% W - [size(X,dim(1)) x nF x size(X,dim(2:end))]
% whitening matrix which maps from dim(1) to its whitened version
% with number of factors nF
% N.B. whitening matrix: W = U*diag(D.^order);
% and inverse whitening matrix: W^-1 = U*diag(D.^-order);
% D - [nF x size(X,dim(2:end))] orginal eigenvalues for each coefficient
% wX - [size(X)] the whitened version of X
% U - [size(X,dim(1)) x nF x size(X,dim(2:end))]
% eigen-decomp of the inputs
% Sigma- [size(X,dim(1)) size(X,dim(1)) x size(X,dim(2:end))] the
% covariance matrices for dim(1) for each dim(2:end)
% mu - [size(X) with dim(2:end)==1] mean to center everything else
if ( nargin < 3 || isempty(alpha) ) alpha=1; elseif(isnumeric(alpha)) alpha=sign(alpha)*min(1,max(abs(alpha),0)); end;
if ( nargin < 4 || isempty(centerp) ) centerp=1; end;
if ( nargin < 5 || isempty(stdp) ) stdp=0; end;
if ( nargin < 6 || isempty(symp) ) symp=0; end;
if ( nargin < 7 ) linMapMx=[]; end;
if ( nargin < 8 || isempty(tol) ) % set the tolerance
if ( isa(X,'single') ) tol=1e-6; else tol=1e-9; end;
end
if ( nargin < 9 || isempty(unitCov) ) unitCov=1; end; % improve condition number before inversion
if ( nargin < 10 || isempty(order) ) order=-.5; end;
dim(dim<0)=dim(dim<0)+ndims(X)+1;
sz=size(X); sz(end+1:max(dim))=1; % pad with unit dims as necessary
accDims=setdiff(1:ndims(X),dim); % set the dims we should accumulate over
N = prod(sz(accDims));
% covariance + eigenvalue method
idx1 = -(1:ndims(X)); idx1(dim)=[1 1+(2:numel(dim))]; % skip for OP dim
idx2 = -(1:ndims(X)); idx2(dim)=[2 1+(2:numel(dim))]; % skip for OP dim
if ( isreal(X) ) % work with complex inputs
XX = tprod(X,idx1,[],idx2,'n');%[szX(dim(1)) szX(dim(1)) x szX(dim(2:end))]
else
XX = tprod(real(X),idx1,[],idx2,'n') + tprod(imag(X),idx1,[],idx2,'n');
end
if ( centerp ) % centered
sX = msum(X,accDims); % size(X)\dim
sXsX = tprod(double(real(sX)),idx1,[],idx2,'n');
if( ~isreal(sX) ) sXsX = sXsX + tprod(double(imag(sX)),idx1,[],idx2,'n'); end
Sigma= (double(XX) - sXsX/N)/N;
else % uncentered
sX=[];
Sigma= double(XX)/N;
end
clear XX;
if ( isstr(alpha) )
switch (alpha);
case 'opt'; % optimal shrinkage regularisation estimate
alpha=optShrinkage(X,dim(1),sum(Sigma,3),sum(sX,2)./N,centerp);
alpha=1-alpha; % invert type of alpha to be strength of whitening
error('not fixed yet!');
otherwise; error('Unrec alpha type');
end
end
if ( stdp ) % standardise the channels before whitening
X2 = tprod(real(X),idx1,[],idx1,'n'); % var each entry
if( isreal(X) ) X2 = X2 + tprod(imag(X),idx1,[],idx1,'n'); end
if ( centerp ) % include the centering correction
sX2 = tprod(real(sX),idx1,[],idx1,'n'); % var mean
if ( ~isreal(X) ) sX2=sX2 + tprod(imag(sX),idx1,[],idx1,'n'); end
varX = (double(X2) - sX2/N)/N; % channel variance
else
varX = X2./N;
end
istdX = 1./sqrt(max(varX,eps)); % inverse stdX
% pre+post mult to correct
szstdX=size(istdX);
Sigma = repop(istdX,'*',repop(Sigma,'*',reshape(istdX,[szstdX(2) szstdX([1 3:end])])));
end
if ( ~isempty(linMapMx) ) % smooth the covariance estimates
Sigma=tprod(Sigma,[1 2 -(1:ndims(Sigma)-2)],full(linMapMx),[-(1:ndims(Sigma-2)) 1:ndims(Sigma)-2]);
end
% give the covariance matrix unit norm to improve numerical accuracy
if ( unitCov ) unitCov=median(diag(Sigma)); Sigma=Sigma./unitCov; end;
W=zeros(size(Sigma),class(X));
if(numel(dim)>1) Dsz=[sz(dim(1)) sz(dim(2:end))];else Dsz=[sz(dim(1)) 1];end
D=zeros(Dsz,class(X));
nF=0;
for dd=1:size(Sigma(:,:,:),3); % for each dir
[Udd,Ddd]=eig(Sigma(:,:,dd)); Ddd=diag(Ddd);
[ans,si]=sort(abs(Ddd),'descend'); Ddd=Ddd(si); Udd=Udd(:,si); % dec abs order
if( alpha>=0 ) % regularise the covariance
Ddd = alpha*Ddd + (1-alpha)*mean(Ddd); % absolute factor to add
elseif ( alpha<0 ) % percentage of spectrum to use
%s = exp(log(Ddd(1))*(1+alpha)+(-alpha)*log(Ddd(end)));%1-s(round(numel(s)*alpha))./sum(s); % strength is % to leave
t = Ddd(round(-alpha*numel(Ddd))); % strength we want
Ddd = (Ddd + t)*sum(Ddd)./(sum(Ddd)+t);
end
% only eig sufficiently big are selected
si=Ddd>max(abs(Ddd))*tol; % remove small and negative eigenvalues
rDdd=ones(size(Ddd),class(Ddd)); if( order==-.5 ) rDdd(si) = 1./sqrt(Ddd(si)); else rDdd(si)=power(Ddd(si),order); end;
if ( symp ) % symetric whiten
W(:,:,dd) = repop(Udd(:,si),'*',rDdd(si)')*Udd(:,si)';
nF=size(W,1);
else % non-symetric
W(:,1:sum(si),dd) = repop(Udd(:,si),'*',rDdd(si)');
nF = max(nF,sum(si)); % record the max number of factors actually used
end
U(:,1:sum(si),dd) = Udd(:,si);
D(1:sum(si),dd) = Ddd(si);
end
% Only keep the max nF
W=reshape(W(:,1:nF,:),[sz(dim(1)) nF sz(dim(2:end)) 1]);
D=reshape(D(1:nF,:),[nF sz(dim(2:end)) 1]);
% undo the effects of the standardisation
if ( stdp ) W=repop(W,'*',istdX); end
% undo numerical re-scaling
if ( unitCov ) W=W./sqrt(unitCov); D=D.*unitCov; end
if ( nargout>2 ) % compute the whitened output if wanted
if (centerp) wX = repop(X,'-',sX./N); else wX=X; end % center the data
% N.B. would be nice to use the sparsity of W to speed this up
idx1 = 1:ndims(X); idx1(dim(1))=-dim(1);
wX = tprod(wX,idx1,W,[-dim(1) dim(1) dim(2:end)]); % apply whitening
end
if ( nargout>3 && centerp) mu=sX/N; else mu=[]; end;
return;
%------------------------------------------------------
function testCase()
z=jf_mksfToy();
clf;image3ddi(z.X,z.di,1,'colorbar','nw','ticklabs','sw');packplots('sizes','equal');
[W,D,wX,U,mu,Sigma]=whiten(z.X,1);
imagesc(wX(:,:)*wX(:,:)'./size(wX(:,:),2)); % plot output covariance
[W,D,wX,U,mu,Sigma]=whiten(z.X,1,'opt'); % opt-shrinkage
% check that the std-code works
A=randn(11,10); A(1,:)=A(1,:)*1000; % spatial filter with lin dependence
sX=randn(10,1000); sC=sX*sX'; [sU,sD]=eig(sC); sD=diag(sD); wsU=repop(sU,'./',sqrt(sD)');
X=A*sX; C=X*X'; [U,D]=eig(C); D=diag(D); wU=repop(U,'./',sqrt(D)');
mimage(wU'*C*wU,wsU'*sC*wsU)
mimage(repop(1./d,'*',wsU)'*C*repop(1./d,'*',wsU))
[W,D,wX,Sigma]=whiten(X,1,0,0);
[sW,sD,swX,sSigma]=whiten(X,1,0,1);
|
github
|
lcnhappe/happe-master
|
repop_testcases.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/svm/repop/repop_testcases.m
| 11,158 |
utf_8
|
79bc2813b0a64a4efbc64e27cf0ef960
|
function []=repop_testcases(testType)
%
% This file contains lots of test-cases to test the performance of the repop
% files vs. the matlab built-ins.
%
% N.B. there appears to be a bug in MATLAB when comparing mixed
% complex/real + double/single values in a max/min
%
% Copyright 2006- by Jason D.R. Farquhar ([email protected])
% Permission is granted for anyone to copy, use, or modify this
% software and accompanying documents for any uncommercial
% purposes, provided this copyright notice is retained, and note is
% made of any changes that have been made. This software and
% documents are distributed without any warranty, express or
% implied
if ( nargin<1 || isempty(testType) ) testType={'acc','timing'}; end;
if ( ~isempty(strmatch('acc',testType)) )
fprintf('------------------- Accuracy tests -------------------\n');
X=complex(randn(10,100),randn(10,100));
Y=complex(randn(size(X)),randn(size(X)));
fprintf('\n****************\n Double Real X, Double Real Y\n******************\n')
accuracyTests(real(X),real(Y),'dRdR')
fprintf('\n****************\n Double Complex X, Double Real Y\n******************\n')
accuracyTests(X,real(Y),'dCdR')
fprintf('\n****************\n Double Real X, Double Complex Y\n******************\n')
accuracyTests(real(X),Y,'dRdC')
fprintf('\n****************\n Double Complex X, Double Complex Y\n******************\n')
accuracyTests(X,Y,'dCdC')
fprintf('\n****************\n Double Real X, Single Real Y\n******************\n')
accuracyTests(real((X)),real(single(Y)),'dRsR')
fprintf('\n****************\n Double Complex X, Single Real Y\n******************\n')
accuracyTests((X),real(single(Y)),'dCsR')
fprintf('\n****************\n Double Real X, Single Complex Y\n******************\n')
accuracyTests((real(X)),single(Y),'dRsC')
fprintf('\n****************\n Double Complex X, Single Complex Y\n******************\n')
accuracyTests((X),single(Y),'dCsC')
fprintf('\n****************\n Single Real X, Double Real Y\n******************\n')
accuracyTests(real(single(X)),real((Y)),'sRdR')
fprintf('\n****************\n Single Complex X, Double Real Y\n******************\n')
accuracyTests(single(X),real((Y)),'sCdR')
fprintf('\n****************\n Single Real X, Double Complex Y\n******************\n')
accuracyTests(single(real(X)),(Y),'sRdC')
fprintf('\n****************\n Single Complex X,Double Complex Y\n******************\n')
accuracyTests(single(X),(Y),'sCdC')
fprintf('\n****************\n Single Real X, Single Real Y\n******************\n')
accuracyTests(real(single(X)),real(single(Y)),'sRsR')
fprintf('\n****************\n Single Complex X, Single Real Y\n******************\n')
accuracyTests(single(X),real(single(Y)),'sCsR')
fprintf('\n****************\n Single Real X, Single Complex Y\n******************\n')
accuracyTests(single(real(X)),single(Y),'sRsC')
fprintf('\n****************\n Single Complex X, Single Complex Y\n******************\n')
accuracyTests(single(X),single(Y),'sCsC')
fprintf('All tests passed\n');
end
if ( ~isempty(strmatch('timing',testType)) )
fprintf('------------------- Timing tests -------------------\n');
X=complex(randn(100,1000),randn(100,1000));
Y=complex(randn(size(X)),randn(size(X)));
timingTests(real(X),real(Y),'[100x1000] RR');
timingTests(X,Y,'[100x1000] CC');
end
return;
function []=accuracyTests(X,Y,str)
% PLUS
unitTest([str ' Matx + col Vec'],X,'+',Y(:,1),repop(X,'+',Y(:,1)),X+repmat(Y(:,1),[1,size(X,2)]));
unitTest([str ' Matx + row Vec'],X,'+',Y(1,:),repop(X,'+',Y(1,:)),X+repmat(Y(1,:),[size(X,1),1]));
unitTest([str ' Matx + Matx(:,1:2)'],X,'+',Y(:,1:2),repop(X,'+',Y(:,1:2),'m'),X+repmat(Y(:,1:2),[1,size(X,2)/2]));
unitTest([str ' Matx + Matx'],X,'+',Y(:,:),repop(X,'+',Y(:,:)),X+repmat(Y(:,:),[1,1]));
% TIMES
unitTest([str ' Matx * col Vec'],X,'*',Y(:,1),repop(X,'*',Y(:,1)),X.*repmat(Y(:,1),[1,size(X,2)]));
unitTest([str ' Matx * row Vec'],X,'*',Y(1,:),repop(X,'*',Y(1,:)),X.*repmat(Y(1,:),[size(X,1),1]));
unitTest([str ' Matx * Matx(:,1:2)'],X,'*',Y(:,1:2),repop(X,'*',Y(:,1:2),'m'),X.*repmat(Y(:,1:2),[1,size(X,2)/2]));
unitTest([str ' Matx * Matx'],X,'*',Y(:,:),repop(X,'*',Y(:,:)),X.*repmat(Y(:,:),[1,1]));
% other operations
unitTest([str ' Matx - row vec'],X,'-',Y(:,1),repop(X,'-',Y(:,1)),X-repmat(Y(:,1),[1,size(X,2)]));
unitTest([str ' Matx / row vec'],X,'/',Y(:,1),repop(X,'/',Y(:,1)),X./repmat(Y(:,1),[1,size(X,2)]));
unitTest([str ' Matx \ row vec'],X,'\',Y(:,1),repop(X,'\',Y(:,1)),X.\repmat(Y(:,1),[1,size(X,2)]));
unitTest([str ' Matx ^ row vec'],X,'^',Y(:,1),repop(X,'^',Y(:,1)),X.^repmat(Y(:,1),[1,size(X,2)]),1e-5);
unitTest([str ' Matx == row vec'],X,'==',Y(:,1),repop(X,'==',Y(:,1)),X==repmat(Y(:,1),[1,size(X,2)]));
unitTest([str ' Matx ~= row vec'],X,'~=',Y(:,1),repop(X,'~=',Y(:,1)),X~=repmat(Y(:,1),[1,size(X,2)]));
% N.B. repop tests with complex inputs use the magnitude of the input!
tX=X; tY=Y; if( ~isreal(X) | ~isreal(Y) ) tX=abs(X); tY=abs(Y); end;
unitTest([str ' Matx < row vec'],X,'<',Y(:,1),repop(X,'<',Y(:,1)),tX<repmat(tY(:,1),[1,size(X,2)]));
unitTest([str ' Matx > row vec'],X,'>',Y(:,1),repop(X,'>',Y(:,1)),tX>repmat(tY(:,1),[1,size(X,2)]));
unitTest([str ' Matx <= row vec'],X,'<=',Y(:,1),repop(X,'<=',Y(:,1)),tX<=repmat(tY(:,1),[1,size(X,2)]));
unitTest([str ' Matx >= row vec'],X,'>=',Y(:,1),repop(X,'>=',Y(:,1)),tX>=repmat(tY(:,1),[1,size(X,2)]));
%unitTest([str ' min Matx, row vec'],repop('min',X,Y(:,1)),min(X,repmat(Y(:,1),[1, size(X,2)])));
%unitTest([str ' max Matx, row vec'],repop('max',X,Y(:,1)),max(X,repmat(Y(:,1),[1,size(X,2)])));
%return;
%function []=inplaceaccuracyTests(X,Y,str)
return;
% Inplace operations test
% PLUS
% N.B. need the Z(1)=Z(1); to force to make a "deep" copy, i.e. not just
% equal pointers
Z=X;unitTest([str ' Matx + col Vec (inplace)'],Z,'+',Y(:,1),repop(Z,'+',Y(:,1),'i'),X+repmat(Y(:,1),[1,size(X,2)]));
Z=X;unitTest([str ' Matx + row Vec (inplace)'],Z,'+',Y(1,:),repop(Z,'+',Y(1,:),'i'),X+repmat(Y(1,:),[size(X,1),1]));
Z=X;unitTest([str ' Matx + Matx(:,1:2) (inplace)'],Z,'+',Y(:,1:2),repop(Z,'+',Y(:,1:2),'mi'),X+repmat(Y(:,1:2),[1,size(X,2)/2]));
Z=X;unitTest([str ' Matx + Matx (inplace)'],Z,'+',Y(:,:),repop(Z,'+',Y(:,:),'i'),X+repmat(Y(:,:),[1,1]));
% TIMES
Z=X;unitTest([str ' Matx * col Vec (inplace)'],Z,'*',Y(:,1),repop(Z,'*',Y(:,1),'i'),X.*repmat(Y(:,1),[1,size(X,2)]));
Z=X;unitTest([str ' Matx * row Vec (inplace)'],Z,'*',Y(1,:),repop(Z,'*',Y(1,:),'i'),X.*repmat(Y(1,:),[size(X,1),1]));
Z=X;unitTest([str ' Matx * Matx(:,1:2) (inplace)'],Z,'*',Y(:,1:2),repop(Z,'*',Y(:,1:2),'mi'),X.*repmat(Y(:,1:2),[1,size(X,2)/2]));
Z=X;unitTest([str ' Matx * Matx (inplace)'],Z,'*',Y(:,:),repop(Z,'*',Y(:,:),'i'),X.*repmat(Y(:,:),[1,1]));
% other operations
Z=X;unitTest([str ' Matx - row vec (inplace)'],Z,'-',Y(:,1),repop(Z,'-',Y(:,1),'i'),X-repmat(Y(:,1),[1,size(X,2)]));
Z=X;unitTest([str ' Matx \ row vec (inplace)'],Z,'\',Y(:,1),repop(Z,'\',Y(:,1),'i'),X.\repmat(Y(:,1),[1,size(X,2)]));
Z=X;unitTest([str ' Matx / row vec (inplace)'],Z,'/',Y(:,1),repop(Z,'/',Y(:,1),'i'),X./repmat(Y(:,1),[1,size(X,2)]));
Z=X;unitTest([str ' Matx ^ row vec (inplace)'],Z,'^',Y(:,1),repop(Z,'^',Y(:,1),'i'),X.^repmat(Y(:,1),[1,size(X,2)]),1e-5);
%unitTest([str ' min Matx, row vec (inplace)'],repop('min',X,Y(:,1),'i'),min(X,repmat(Y(:,1),[1, size(X,2)])));
%unitTest([str ' max Matx, row vec (inplace)'],repop('max',X,Y(:,1),'i'),max(X,repmat(Y(:,1),[1,size(X,2)])));
function []=timingTests(X,Y,str)
%PLUS
fprintf('%s Matx + Scalar\n',str);
tic; for i=1:1000; Z=repop(X,'+',10); end;
fprintf('%30s %gs\n','repop',toc/1000);
tic; Z=X;Z(1)=Z(1);for i=1:1000; Z=repop(Z,'+',10,'i'); end;
fprintf('%30s %gs\n','repop (inplace)',toc/1000);
tic; for i=1:1000; T=X+10;end;
fprintf('%30s %gs\n','MATLAB',toc/1000);
fprintf('%s Matx + col vec\n',str);
tic, for i=1:1000; Z=repop(X,'+',Y(:,1)); end;
fprintf('%30s %gs\n','repop',toc/1000); % = .05 / .01
tic, Z=X;Z(1)=Z(1); for i=1:1000; Z=repop(Z,'+',Y(:,1),'i'); end;
fprintf('%30s %gs\n','repop (inplace)',toc/1000); % = .05 / .01
tic, for i=1:1000; Z=X+repmat(Y(:,1),1,size(X,2));end;
fprintf('%30s %gs\n','MATLAB',toc/1000); % = .05 / .01
fprintf('%s Matx + row vec\n',str);
tic, for i=1:1000; Z=repop(X,'+',Y(1,:)); end;
fprintf('%30s %gs\n','repop',toc/1000); % = .05 / .01
tic, Z=X;Z(1)=Z(1); for i=1:1000; Z=repop(Z,'+',Y(1,:),'i'); end;
fprintf('%30s %gs\n','repop (inplace)',toc/1000); % = .05 / .01
tic, for i=1:1000; Z=X+repmat(Y(1,:),size(X,1),1);end;
fprintf('%30s %gs\n','MATLAB',toc/1000); % = .05 / .01
fprintf('%s Matx + Matx(:,1:2)\n',str);
tic; for i=1:1000; Z=repop(X,'+',Y(:,1:2),'m'); end;
fprintf('%30s %gs\n','repop',toc/1000); % = .05 / .01
tic, Z=X;Z(1)=Z(1); for i=1:1000; Z=repop(Z,'+',Y(:,1:2),'im'); end;
fprintf('%30s %gs\n','repop (inplace)',toc/1000); % = .05 / .01
tic; for i=1:1000; T=X+repmat(Y(:,1:2),[1,size(X,2)/2]);end;
fprintf('%30s %gs\n','MATLAB',toc/1000); % = .05 / .01
%TIMES
fprintf('%s Matx * col Vec\n',str);
tic; for i=1:1000; Z=repop(X,'*',Y(:,1)); end;
fprintf('%30s %gs\n','repop',toc/1000);
tic; Z=X;Z(1)=Z(1); for i=1:1000; Z=repop(Z,'*',Y(:,1),'i'); end;
fprintf('%30s %gs\n','repop (inplace)',toc/1000);
tic; for i=1:1000; Z=X.*repmat(Y(:,1),1,size(X,2));end;
fprintf('%30s %gs\n','MATLAB',toc/1000);
tic; for i=1:1000; Z=spdiags(Y(:,1),0,size(X,1),size(X,1))*X;end;
fprintf('%30s %gs\n','MATLAB (spdiags)',toc/1000);
fprintf('%s Matx * row Vec\n',str);
tic; for i=1:1000; Z=repop(X,'*',Y(1,:)); end;
fprintf('%30s %gs\n','repop',toc/1000);
tic; Z=X;Z(1)=Z(1); for i=1:1000; Z=repop(Z,'*',Y(1,:),'i'); end;
fprintf('%30s %gs\n','repop (inplace)',toc/1000);
tic; for i=1:1000; Z=X.*repmat(Y(:,1),1,size(X,2));end;
fprintf('%30s %gs\n','MATLAB',toc/1000);
tic; for i=1:1000; Z=X*spdiags(Y(1,:)',0,size(X,2),size(X,2));end;
fprintf('%30s %gs\n','MATLAB (spdiags)',toc/1000);
fprintf('%s Matx * Matx(:,1:2)\n',str);
tic; for i=1:1000; Z=repop(X,'*',Y); end;
fprintf('%30s %gs\n','repop',toc/1000);
tic; Z=X;Z(1)=Z(1); for i=1:1000; Z=repop(Z,'*',Y,'i'); end;
fprintf('%30s %gs\n','repop (inplace)',toc/1000);
tic; for i=1:1000; Z=X.*repmat(Y(:,1:2),[1,size(X,2)/2]);end;
fprintf('%30s %gs\n','MATLAB',toc/1000);
return;
% simple function to check the accuracy of a test and report the result
function [testRes,trueRes,diff]=unitTest(testStr,A,op,B,testRes,trueRes,tol)
global LOGFILE;
if ( ~isempty(LOGFILE) ) % write tests and result to disc
writeMxInfo(LOGFILE,A);
fprintf(LOGFILE,'%s\n',op);
writeMxInfo(LOGFILE,B);
fprintf(LOGFILE,'=\n');
writeMxInfo(LOGFILE,trueRes);
fprintf(LOGFILE,'\n');
end
if ( nargin < 7 )
if ( isa(trueRes,'double') ) tol=1e-11;
elseif ( isa(trueRes,'single') ) tol=1e-5;
elseif ( isa(trueRes,'integer') )
warning('Integer inputs!'); tol=1;
elseif ( isa(trueRes,'logical') ) tol=0;
end
end;
diff=abs(testRes-trueRes)./max(1,abs(testRes+trueRes));
fprintf('%45s = %0.3g ',testStr,max(diff(:)));
if ( max(diff(:)) > tol )
if ( exist('mimage') )
mimage(squeeze(testRes),squeeze(trueRes),squeeze(diff))
end
warning([testStr ': failed!']);
fprintf('Type return to continue\n'); keyboard;
else
fprintf('Passed \n');
end
|
github
|
lcnhappe/happe-master
|
tprod_testcases.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/svm/tprod/tprod_testcases.m
| 17,197 |
utf_8
|
26b0e1b54b6867e5d62e8956c84e01bd
|
function []=tprod_testcases(testCases,debugin)
% This file contains lots of test-cases to test the performance of the tprod
% files vs. the matlab built-ins.
%
%
% Copyright 2006- by Jason D.R. Farquhar ([email protected])
% Permission is granted for anyone to copy, use, or modify this
% software and accompanying documents for any uncommercial
% purposes, provided this copyright notice is retained, and note is
% made of any changes that have been made. This software and
% documents are distributed without any warranty, express or
% implied
if ( nargin < 1 || isempty(testCases) ) testCases={'acc','timing','blksz'}; end;
if ( ~iscell(testCases) ) testCases={testCases}; end;
if ( nargin < 2 ) debugin=1; end;
global DEBUG; DEBUG=debugin;
% fprintf('------------------ memory execption test ------------\n');
% ans=tprod(randn(100000,1),1,randn(100000,1)',1);
% ans=tprod(randn(100000,1),1,randn(100000,1)',1,'m');
%-----------------------------------------------------------------------
% Real , Real
if ( ~isempty(strmatch('acc',testCases)) )
fprintf('------------------- Accuracy tests -------------------\n');
X=complex(randn(101,101,101),randn(101,101,101));
Y=complex(randn(101,101),randn(101,101));
fprintf('\n****************\n Double Real X, Double Real Y\n******************\n')
accuracyTests(real(X),real(Y),'dRdR');
fprintf('\n****************\n Double Real X, Double Complex Y\n******************\n')
accuracyTests(real(X),Y,'dRdC');
fprintf('\n****************\n Double Complex X, Double Real Y\n******************\n')
accuracyTests(X,real(Y),'dCdR');
fprintf('\n****************\n Double Complex X, Double Complex Y\n******************\n')
accuracyTests(X,Y,'dCdC');
fprintf('\n****************\n Double Real X, Single Real Y\n******************\n')
accuracyTests(real(X),single(real(Y)),'dRsR');
fprintf('\n****************\n Double Real X, Single Complex Y\n******************\n')
accuracyTests(real(X),single(Y),'dRsC');
fprintf('\n****************\n Double Complex X, Single Real Y\n******************\n')
accuracyTests(X,single(real(Y)),'dCsR');
fprintf('\n****************\n Double Complex X, Single Complex Y\n******************\n')
accuracyTests(X,single(Y),'dCsC');
fprintf('\n****************\n Single Real X, Double Real Y\n******************\n')
accuracyTests(single(real(X)),real(Y),'sRdR');
fprintf('\n****************\n Single Real X, Double Complex Y\n******************\n')
accuracyTests(single(real(X)),Y,'sRdC');
fprintf('\n****************\n Single Complex X, Double Real Y\n******************\n')
accuracyTests(single(X),real(Y),'sCdR');
fprintf('\n****************\n Single Complex X, Double Complex Y\n******************\n')
accuracyTests(single(X),Y,'sCdC');
fprintf('\n****************\n Single Real X, Single Real Y\n******************\n')
accuracyTests(single(real(X)),single(real(Y)),'sRsR');
fprintf('\n****************\n Single Real X, Single Complex Y\n******************\n')
accuracyTests(single(real(X)),single(Y),'sRsC');
fprintf('\n****************\n Single Complex X, Single Real Y\n******************\n')
accuracyTests(single(X),single(real(Y)),'sCsR');
fprintf('\n****************\n Single Complex X, Single Complex Y\n******************\n')
accuracyTests(single(X),single(Y),'sCsC');
fprintf('All tests passed\n');
end
% fprintf('------------------- Timing tests -------------------\n');
if ( ~isempty(strmatch('timing',testCases)) )
%Timing tests
X=complex(randn(101,101,101),randn(101,101,101));
Y=complex(randn(101,101),randn(101,101));
fprintf('\n****************\n Real X, Real Y\n******************\n')
timingTests(real(X),real(Y),'RR');
fprintf('\n****************\n Real X, Complex Y\n******************\n')
timingTests(real(X),Y,'RC');
fprintf('\n****************\n Complex X, Real Y\n******************\n')
timingTests(X,real(Y),'CR');
fprintf('\n****************\n Complex X, Complex Y\n******************\n')
timingTests(X,Y,'CC');
end
% fprintf('------------------- Scaling tests -------------------\n');
% scalingTests([32,64,128,256]);
if( ~isempty(strmatch('blksz',testCases)) )
fprintf('------------------- Blksz tests -------------------\n');
blkSzTests([128 96 64 48 40 32 24 16 0],[128,256,512,1024,2048]);
end
return;
function []=accuracyTests(X,Y,str)
unitTest([str ' OuterProduct, [1],[2]'],tprod(X(:,1),1,Y(:,1),2,'m'),X(:,1)*Y(:,1).');
unitTest([str ' Inner product, [-1],[-1]'],tprod(X(:,1),-1,Y(:,1),-1,'m'),X(:,1).'*Y(:,1));
unitTest([str ' Matrix product, [1 -1],[-1 2]'],tprod(X(:,:,1),[1 -1],Y,[-1 2],'m'),X(:,:,1)*Y);
unitTest([str ' transposed matrix product, [-1 1],[-1 2]'],tprod(X(:,:,1),[-1 1],Y,[-1 2],'m'),X(:,:,1).'*Y);
unitTest([str ' Matrix frobenius norm, [-1 -2],[-1 -2]'],tprod(X(:,:,1),[-1 -2],Y,[-1 -2],'m'),sum(sum(X(:,:,1).*Y)));
unitTest([str ' transposed matrix frobenius norm, [-1 -2],[-2 -1]'],tprod(X(:,:,1),[-1 -2],Y,[-2 -1],'m'),sum(sum(X(:,:,1).'.*Y)));
unitTest([str ' ignored dims, [0 -2],[-2 2 1]'],tprod(Y(1,:),[0 -2],X(:,:,:),[-2 2 1],'m'),reshape(Y(1,:)*reshape(X,size(X,1),[]),size(X,2),size(X,3)).');
% Higher order matrix operations
unitTest([str ' spatio-temporal filter [-1 -2 1],[-1 -2]'],tprod(X,[-1 -2 1],Y,[-1 -2],'m'),reshape(Y(:).'*reshape(X,size(X,1)*size(X,2),size(X,3)),[size(X,3) 1]));
unitTest([str ' spatio-temporal filter (fallback) [-1 -2 1],[-1 -2]'],tprod(X,[-1 -2 1],Y,[-1 -2]),reshape(Y(:).'*reshape(X,size(X,1)*size(X,2),size(X,3)),[size(X,3) 1]));
unitTest([str ' spatio-temporal filter (order) [-1 -2],[-1 -2 1]'],tprod(Y,[-1 -2],X,[-1 -2 1]),reshape(Y(:).'*reshape(X,size(X,1)*size(X,2),size(X,3)),[size(X,3) 1]));
unitTest([str ' spatio-temporal filter (order) [-1 -2],[-1 -2 3]'],tprod(Y,[-1 -2],X,[-1 -2 3]),reshape(Y(:).'*reshape(X,size(X,1)*size(X,2),size(X,3)),[1 1 size(X,3)]));
unitTest([str ' transposed spatio-temporal filter [1 -2 -3],[-2 -3]'],tprod(X,[1 -2 -3],Y,[-2 -3],'m'),reshape(reshape(X,size(X,1),size(X,2)*size(X,3))*Y(:),[size(X,1) 1]));
unitTest([str ' matrix-vector product [-1 1 2][-1]'],tprod(X,[-1 1 2],Y(:,1),[-1],'m'),reshape(Y(:,1).'*reshape(X,[size(X,1) size(X,2)*size(X,3)]),[size(X,2) size(X,3)]));
unitTest([str ' spatial filter (fallback): [-1 2 3],[-1 1]'],tprod(X,[-1 2 3],Y,[-1 1]),reshape(Y.'*reshape(X,[size(X,1) size(X,2)*size(X,3)]),[size(Y,2) size(X,2) size(X,3)]));
unitTest([str ' spatial filter: [-1 2 3],[-1 1]'],tprod(X,[-1 2 3],Y,[-1 1],'m'),reshape(Y.'*reshape(X,[size(X,1) size(X,2)*size(X,3)]),[size(Y,2) size(X,2) size(X,3)]));
unitTest([str ' temporal filter [1 -2 3],[2 -2]'],tprod(X,[1 -2 3],Y(1,:),[2 -2],'m'),sum(X.*repmat(Y(1,:),[size(X,1) 1 size(X,3)]),2));
unitTest([str ' temporal filter [2 -2],[1 -2 3]'],tprod(Y(1,:),[2 -2],X,[1 -2 3],'m'),sum(X.*repmat(Y(1,:),[size(X,1) 1 size(X,3)]),2));
unitTest([str ' temporal filter [-2 2],[1 -2 3]'],tprod(Y(1,:).',[-2 2],X,[1 -2 3],'m'),sum(X.*repmat(Y(1,:),[size(X,1) 1 size(X,3)]),2));
unitTest([str ' temporal filter [-2 2],[1 -2 3]'],tprod(Y(1,:).',[-2 2],X,[1 -2 3],'m'),sum(X.*repmat(Y(1,:),[size(X,1) 1 size(X,3)]),2));
Xp=permute(X,[1 3 2]);
unitTest([str ' blk-code [-1 1 -2][-1 2 -2]'],tprod(X,[-1 1 -2],X,[-1 2 -2],'m'),reshape(Xp,[],size(Xp,3)).'*reshape(Xp,[],size(Xp,3)));
return;
function []=timingTests(X,Y,str);
% outer product simulation
fprintf([str ' OuterProduct [1][2]\n']);
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic; for i=1:1000; Z=tprod(X(:,1),1,Y(:,1),2); end
fprintf('%30s %gs\n','tprod',toc/1000); % = .05 / .01
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic; for i=1:1000; Zm=tprod(X(:,1),1,Y(:,1),2,'m'); end
fprintf('%30s %gs\n','tprod m',toc/1000); % = .05 / .01
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic, for i=1:1000;T=X(:,1)*Y(:,1).';end;
fprintf('%30s %gs\n','MATLAB',toc/1000); % = .03 / .01
% matrix product
fprintf([str ' MatrixProduct [1 -1][-1 2]\n']);
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic, for i=1:1000;Z=tprod(X(:,:,1),[1 -1],Y,[-1 2]);end
fprintf('%30s %gs\n','tprod',toc/1000);% = .28 / .06
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic, for i=1:1000;Zm=tprod(X(:,:,1),[1 -1],Y,[-1 2],'m');end
fprintf('%30s %gs\n','tprod m',toc/1000);% = .28 / .06
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic, for i=1:1000;T=X(:,:,1)*Y;end
fprintf('%30s %gs\n','MATLAB',toc/1000);% = .17 / .06
% transposed matrix product simulation
fprintf([str ' transposed Matrix Product [-1 1][2 -1]\n']);
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic, for i=1:1000;Z=tprod(X(:,:,1),[-1 1],Y,[2 -1]);end
fprintf('%30s %gs\n','tprod',toc/1000);% =.3 / .06
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic, for i=1:1000;Zm=tprod(X(:,:,1),[-1 1],Y,[2 -1],'m');end
fprintf('%30s %gs\n','tprod m',toc/1000);% =.3 / .06
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic, for i=1:1000;T=X(:,:,1).'*Y.';end
fprintf('%30s %gs\n','MATLAB',toc/1000); % =.17 / .06
% Higher order matrix operations % times: P3-m 1.8Ghz 2048k / P4 2.4Ghz 512k
fprintf([str ' spatio-temporal filter [-1 -2 1] [-1 -2]\n']);
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:500;Z=tprod(X,[-1 -2 1],Y,[-1 -2]);end,
fprintf('%30s %gs\n','tprod',toc/500);% =.26 / .18
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:500;Zm=tprod(X,[-1 -2 1],Y,[-1 -2],'m');end,
fprintf('%30s %gs\n','tprod m',toc/500);% =.26 / .18
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:500;T=reshape(Y(:).'*reshape(X,size(X,1)*size(X,2),size(X,3)),[size(X,3) 1]);end,
fprintf('%30s %gs\n','MATLAB',toc/500); %=.21 / .18
fprintf([str ' transposed spatio-temporal filter [1 -2 -3] [-2 -3]\n']);
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:50;Z=tprod(X,[1 -2 -3],Y,[-2 -3]);end,
fprintf('%30s %gs\n','tprod',toc/50);% =.27 / .28
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:50;Zm=tprod(X,[1 -2 -3],Y,[-2 -3],'m');end,
fprintf('%30s %gs\n','tprod m',toc/50);% =.27 / .28
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:50;T=reshape(reshape(X,size(X,1),size(X,2)*size(X,3))*Y(:),[size(X,1) 1]);end,
fprintf('%30s %gs\n','MATLAB',toc/50); %=.24 / .26
% MATRIX vector product
fprintf([str ' matrix-vector product [-1 1 2] [-1]\n']);
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:500; Z=tprod(X,[-1 1 2],Y(:,1),[-1]);end,
fprintf('%30s %gs\n','tprod',toc/500); %=.27 / .26
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:500; Zm=tprod(X,[-1 1 2],Y(:,1),[-1],'m');end,
fprintf('%30s %gs\n','tprod m',toc/500); %=.27 / .26
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:500; T=reshape(Y(:,1).'*reshape(X,[size(X,1) size(X,2)*size(X,3)]),[1 size(X,2) size(X,3)]);end,
fprintf('%30s %gs\n','MATLAB (reshape)',toc/500);%=.21 / .28
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:500; T=zeros([size(X,1),1,size(X,3)]);for k=1:size(X,3); T(:,:,k)=Y(1,:)*X(:,:,k); end,end,
fprintf('%30s %gs\n','MATLAB (loop)',toc/500); %=.49 /
% spatial filter
fprintf([str ' Spatial filter: [-1 2 3],[-1 1]\n']);
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic;for i=1:500;Z=tprod(X,[-1 2 3],Y(:,1:2),[-1 1]);end;
fprintf('%30s %gs\n','tprod',toc/500);%=.39/.37
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic;for i=1:500;Zm=tprod(X,[-1 2 3],Y(:,1:2),[-1 1],'m');end;
fprintf('%30s %gs\n','tprod m',toc/500);%=.39/.37
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic;for i=1:500; T=reshape(Y(:,1:2).'*reshape(X,[size(X,1) size(X,2)*size(X,3)]),[2 size(X,2) size(X,3)]);end;
fprintf('%30s %gs\n','MATLAB',toc/500);
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic;for i=1:500;T=zeros(2,size(X,2),size(X,3));for k=1:size(X,3);T(:,:,k)=Y(:,1:2).'*X(:,:,k);end;end;
fprintf('%30s %gs\n','MATLAB (loop)',toc/500); %=.76/.57
% temporal filter
fprintf([str ' Temporal filter: [1 -2 3],[2 -2]\n']);
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:50; Z=tprod(X,[1 -2 3],Y,[2 -2]);end
fprintf('%30s %gs\n','tprod',toc/50); %=.27 / .31
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:50; T=zeros([size(X,1),size(Y,1),size(X,3)]);for k=1:size(X,3); T(:,:,k)=X(:,:,k)*Y(:,:).'; end,end,
fprintf('%30s %gs\n','MATLAB (loop)',toc/50); %= .50 /
%tic,for i=1:50; T=zeros([size(X,1),size(Y,1),size(X,3)]); for k=1:size(Y,1); T(:,k,:)=sum(X.*repmat(Y(k,:),[size(X,1) 1 size(X,3)]),2);end,end;
%fprintf('%30s %gs\n','MATLAB (repmat)',toc/50); %=3.9 / 3.3
fprintf([str ' Temporal filter2: [1 -2 3],[-2 2]\n']);
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:50; Z=tprod(X,[1 -2 3],Y,[-2 2]);end
fprintf('%30s %gs\n','tprod',toc/50); %=.27 / .31
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:50; T=zeros([size(X,1),size(Y,1),size(X,3)]);for k=1:size(X,3); T(:,:,k)=X(:,:,k)*Y(:,:); end,end,
fprintf('%30s %gs\n','MATLAB (loop)',toc/50); %= .50 /
% Data Covariances
fprintf([str ' Channel-covariance/trial(3) [1 -1 3] [2 -1 3]\n']);
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic;for i=1:50;Z=tprod(X,[1 -1 3],[],[2 -1 3]);end;
fprintf('%30s %gs\n','tprod',toc/50); %=8.36/7.6
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:50;T=zeros(size(X,1),size(X,1),size(X,3));for k=1:size(X,3); T(:,:,k)=X(:,:,k)*X(:,:,k)';end;end,
fprintf('%30s %gs\n','MATLAB (loop)',toc/50); %=9.66/7.0
% N.B. --- aligned over dim 1 takes 2x longer!
fprintf([str ' Channel-covariance/trial(1) [1 -1 2] [1 -1 3]\n']);
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic;for i=1:10;Z=tprod(X,[1 -1 2],[],[1 -1 3]);end;
fprintf('%30s %gs\n','tprod',toc/10); %= /37.2
A=complex(randn(size(X)),randn(size(X))); % flush cache?
tic,for i=1:10;T=zeros(size(X,1),size(X,1),size(X,3));for k=1:size(X,3);T(k,:,:)=shiftdim(X(k,:,:))'*shiftdim(X(k,:,:));end;end,
fprintf('%30s %gs\n','MATLAB',toc/10); %=17.2/25.8
return;
function []=scalingTests(Ns);
% Scaling test
fprintf('Simple test of the effect of the acc size\n');
for N=Ns;
fprintf('X=[%d x %d]\n',N,N*N);X=randn(N,N*N);Y=randn(N*N,1);
fprintf('[1 -1][-1]\n');
tic, for i=1:1000;Z=tprod(X,[1 -1],Y,[-1],'n');end
fprintf('%30s %gs\n','tprod',toc/1000);% = .28 / .06
tic, for i=1:1000;Z=tprod(X,[1 -1],Y,[-1],'mn');end
fprintf('%30s %gs\n','tprod m',toc/1000);% = .28 / .06
end
fprintf('Simple mat vs non mat tests\n');
for N=Ns;
fprintf('N=%d\n',N);X=randn(N,N,N);Y=randn(N,N);
fprintf('[1 -1 -2][-1 -2]\n');
tic, for i=1:1000;Z=tprod(X,[1 -1 -2],Y,[-1 -2]);end
fprintf('%30s %gs\n','tprod',toc/1000);% = .28 / .06
tic, for i=1:1000;Z=tprod(X,[1 -1 -2],Y,[-1 -2],'m');end
fprintf('%30s %gs\n','tprod m',toc/1000);% = .28 / .06
fprintf('[-1 -2 1][-1 -2]\n');
tic, for i=1:1000;Z=tprod(X,[-1 -2 1],Y,[-1 -2]);end
fprintf('%30s %gs\n','tprod',toc/1000);% = .28 / .06
tic, for i=1:1000;Z=tprod(X,[-1 -2 1],Y,[-1 -2],'m');end
fprintf('%30s %gs\n','tprod m',toc/1000);% = .28 / .06
fprintf('[-1 2 3][-1 1]\n');
tic, for i=1:100;Z=tprod(X,[-1 2 3],Y,[-1 1]);end
fprintf('%30s %gs\n','tprod',toc/100);% = .28 / .06
tic, for i=1:100;Z=tprod(X,[-1 2 3],Y,[-1 1],'m');end
fprintf('%30s %gs\n','tprod m',toc/100);% = .28 / .06
end
function []=blkSzTests(blkSzs,Ns);
fprintf('Blksz optimisation tests\n');
%blkSzs=[64 48 40 32 24 16 0];
tptime=zeros(numel(blkSzs+2));
for N=Ns;%[128,256,512,1024,2048];
X=randn(N,N);Y=randn(size(X));
for i=1:5;
% use tprod without matlab
for b=1:numel(blkSzs); blkSz=blkSzs(b);
clear T;T=randn(1024,1024); % clear cache
tic,for j=1:3;tprod(X,[1 -1],Y,[-1 2],'mn',blkSz);end;
tptime(b)=tptime(b)+toc;
end;
% tprod with defaults & matlab if possible
clear T;T=randn(1024,1024); % clear cache
tic,for j=1:3;tprod(X,[1 -1],Y,[-1 2]);end;
tptime(numel(blkSzs)+1)=tptime(numel(blkSzs)+1)+toc;
% the pure matlab code
clear T;T=randn(1024,1024); % clear cache
tic,for j=1:3; Z=X*Y;end;
tptime(numel(blkSzs)+2)=tptime(numel(blkSzs)+2)+toc;
end;
for j=1:numel(blkSzs);
fprintf('blk=%d, N = %d -> %gs \n',blkSzs(j),N,tptime(j));
end
fprintf('tprod, N = %d -> %gs \n',N,tptime(numel(blkSzs)+1));
fprintf('MATLAB, N = %d -> %gs \n',N,tptime(numel(blkSzs)+2));
fprintf('\n');
end;
return;
% simple function to check the accuracy of a test and report the result
function [testRes,trueRes,diff]=unitTest(testStr,testRes,trueRes,tol)
global DEBUG;
if ( nargin < 4 )
if ( isa(trueRes,'double') ) tol=1e-11;
elseif ( isa(trueRes,'single') ) tol=1e-5;
elseif ( isa(trueRes,'integer') )
warning('Integer inputs!'); tol=1;
elseif ( isa(trueRes,'logical') ) tol=0;
end
end
diff=abs(testRes-trueRes)./max(1,abs(testRes+trueRes));
fprintf('%60s = %0.3g ',testStr,max(diff(:)));
if ( max(diff(:)) > tol )
if ( exist('mimage') )
mimage(squeeze(testRes),squeeze(trueRes),squeeze(diff))
end
fprintf(' **FAILED***\n');
if ( DEBUG>0 )
warning([testStr ': failed!']),
fprintf('Type return to continue\b');keyboard;
end;
else
fprintf('Passed \n');
end
|
github
|
lcnhappe/happe-master
|
etprod.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/svm/tprod/etprod.m
| 3,882 |
utf_8
|
2f529c6b86be54251a59ae7f9db81d98
|
function [C,Atp,Btp]=etprod(Cidx,A,Aidx,B,Bidx)
% tprod wrapper to make calls more similar to Einstein Summation Convention
%
% [C,Atp,Btp]=etprod(Cidx,A,Aidx,B,Bidx);
% Wrapper function for tprod to map between Einstein summation
% convetion (ESC) and tprod's numeric calling convention e.g.
% 1) Matrix Matrix product:
% ESC: C_ij = A_ik B_kj <=> C = etprod('ij',A,'ik',B,'kj');
% 2) Vector outer product
% ESC: C_ij = A_i B_j <=> C = etprod('ij',A,'i',B,'j'); % A,B col vec
% C = etprod('ij',A,' i',B,' j'); % A,B row vec
% N.B. use spaces ' ' to indicate empty/ignored *singlenton* dimensions
% 3) Matrix vector product
% ESC: C_i = A_ik B_k <=> C = etprod('i',A,'ik',B,'k');
% 4) Spatial Filtering
% ESC: FX_fte = A_cte B_cf <=> C = etprod('fte',A,'cte',B,'cf')
% OR:
% C = etprod({'feat','time','epoch'},A,{'ch','time','epoch'},B,{'ch','feat'})
%
% Inputs:
% Cidx -- the list of dimension labels for the output
% A -- [n-d] array of the A values
% Aidx -- [ndims(A) x 1] (array, string, or cell array of strings)
% list of dimension labels for A array
% B -- [m-d] array of the B values
% Bidx -- [ndims(B) x 1] (array, string or cell array of strings)
% list of dimension labels for B array
% Outputs:
% C -- [p-d] array of output values. Dimension labels are as in Cidx
% Atp -- [ndims(A) x 1] A's dimspec as used in the core tprod call
% Btp -- [ndims(B) x 1] B's dimspec as used in the core tprod call
%
% See Also: tprod, tprod_testcases
%
% Copyright 2006- by Jason D.R. Farquhar ([email protected])
% Permission is granted for anyone to copy, use, or modify this
% software and accompanying documents for any uncommercial
% purposes, provided this copyright notice is retained, and note is
% made of any changes that have been made. This software and
% documents are distributed without any warranty, express or
% implied
if ( iscell(Aidx)~=iscell(Bidx) || iscell(Cidx)~=iscell(Aidx) )
error('Aidx,Bidx and Cidx cannot be of different types, all cells or arrays');
end
Atp = zeros(size(Aidx));
Btp = zeros(size(Bidx));
% Map inner product dimensions, to unique *negative* index
for i=1:numel(Aidx)
if ( iscell(Aidx) ) Bmatch = strcmp(Aidx{i}, Bidx);
else Bmatch = (Aidx(i)==Bidx);
end
if ( any(Bmatch) ) Btp(Bmatch)=-i; Atp(i)=-i; end;
end
% Spaces/empty values in the input become 0's, i.e. ignored dimensions
if ( iscell(Aidx) )
Btp(strcmp(' ',Bidx))=0;Btp(strcmp('',Bidx))=0;
Atp(strcmp(' ',Aidx))=0;Atp(strcmp('',Aidx))=0;
else
Btp(' '==Bidx)=0;
Atp(' '==Aidx)=0;
end
% Map to output position numbers, to correct *positive* index
for i=1:numel(Cidx);
if ( iscell(Aidx) )
Atp(strcmp(Cidx{i}, Aidx))=i;
Btp(strcmp(Cidx{i}, Bidx))=i;
else
Atp(Cidx(i)==Aidx)=i;
Btp(Cidx(i)==Bidx)=i;
end
end
% now do the tprod call.
global LOG; if ( isempty(LOG) ) LOG=0; end; % N.B. set LOG to valid fd to log
if ( LOG>0 )
fprintf(LOG,'tprod(%s,[%s], %s,[%s])\n',mxPrint(A),sprintf('%d ',Atp),mxPrint(B),sprintf('%d ',Btp));
end
C=tprod(A,Atp,B,Btp,'n');
return;
function [str]=mxPrint(mx)
sz=size(mx);
if ( isa(mx,'double') ) str='d'; else str='s'; end;
if ( isreal(mx)) str=[str 'r']; else str=[str 'c']; end;
str=[str ' [' sprintf('%dx',sz(1:end-1)) sprintf('%d',sz(end)) '] '];
return;
%----------------------------------------------------------------------------
function testCase();
A=randn(10,10); B=randn(10,10);
C2 = tprod(A,[1 -2],B,[-2 2]);
C = etprod('ij',A,'ik',B,'kj'); mad(C2,C)
C = etprod({'i' 'j'},A,{'i' 'k'},B,{'k' 'j'});
A=randn(100,100);B=randn(100,100,4);
C3 = tprod(A,[-1 -2],B,[-1 -2 3]);
C3 = tprod(B,[-1 -2 1],A,[-1 -2]);
C3 = tprod(A,[-1 -2],B,[-1 -2 1]);
C = etprod('3',A,'12',B,'123');
C = etprod([3],A,[1 2],B,[1 2 3]);
C = etprod({'3'},A,{'1' '2'},B,{'1' '2' '3'})
|
github
|
lcnhappe/happe-master
|
covshrinkKPM.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/slda/covshrinkKPM.m
| 2,367 |
utf_8
|
5dec20c5cd59e71f053003124ae98c46
|
function [s, lam] = covshrinkKPM(x, shrinkvar)
% Shrinkage estimate of a covariance matrix, using optimal shrinkage coefficient.
% INPUT:
% x is n*p data matrix
% shrinkvar :
% 0: corshrink (default)
% 1: varshrink
%
% OUTPUT:
% s is the posdef p*p cov matrix
% lam is the shrinkage coefficient
%
% See J. Schaefer and K. Strimmer. 2005. A shrinkage approach to
% large-scale covariance matrix estimation and implications
% for functional genomics. Statist. Appl. Genet. Mol. Biol. 4:32.
% This code is based on their original code http://strimmerlab.org/software.html
% but has been vectorized and simplified by Kevin Murphy.
% Adapted by Marcel van Gerven
if nargin < 2, shrinkvar = 0; end
[n p] = size(x);
if p==1, s=var(x); return; end
switch num2str(shrinkvar)
case '1' % Eqns 10 and 11 of Opgen-Rhein and Strimmer (2007)
[v, lam] = varshrink(x);
dsv = diag(sqrt(v));
r = corshrink(x);
s = dsv*r*dsv;
otherwise % case 'D' of Schafer and Strimmer
v = var(x);
dsv = diag(sqrt(v));
[r, lam] = corshrink(x);
s = dsv*r*dsv;
end
%%%%%%%%
function [sv, lambda] = varshrink (x)
% Eqns 10 and 11 of Opgen-Rhein and Strimmer (2007)
[v, vv] = varcov(x);
v = diag(v); vv = diag(vv);
vtarget = median(v);
numerator = sum(vv);
denominator = sum((v-vtarget).^2);
lambda = numerator/denominator;
lambda = min(lambda, 1); lambda = max(lambda, 0);
sv = (1-lambda)*v + lambda*vtarget;
function [Rhat, lambda] = corshrink(x)
% Eqn on p4 of Schafer and Strimmer 2005
[n, p] = size(x);
sx = makeMeanZero(x); sx = makeStdOne(sx); % convert S to R
[r, vr] = varcov(sx);
offdiagsumrij2 = sum(sum(tril(r,-1).^2));
offdiagsumvrij = sum(sum(tril(vr,-1)));
lambda = offdiagsumvrij/offdiagsumrij2;
lambda = min(lambda, 1); lambda = max(lambda, 0);
Rhat = (1-lambda)*r;
Rhat(logical(eye(p))) = 1;
function [S, VS] = varcov(x)
% s(i,j) = cov X(i,j)
% vs(i,j) = est var s(i,j)
[n,p] = size(x);
xc = makeMeanZero(x);
S = cov(xc);
XC1 = repmat(reshape(xc', [p 1 n]), [1 p 1]); % size p*p*n !
XC2 = repmat(reshape(xc', [1 p n]), [p 1 1]); % size p*p*n !
VS = var(XC1 .* XC2, 0, 3) * n/((n-1)^2);
function xc = makeMeanZero(x)
% make column means zero
[n,p] = size(x);
m = mean(x);
xc = x - ones(n, 1)*m;
function xc = makeStdOne(x)
% make column variances one
[n,p] = size(x);
sd = ones(n, 1)*std(x);
xc = x ./ sd;
|
github
|
lcnhappe/happe-master
|
glmnet.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/glmnet/glmnet.m
| 12,230 |
utf_8
|
8b349969d0712566867f87fff7e75396
|
function fit = glmnet(x, y, family, options)
%--------------------------------------------------------------------------
% glmnet.m: fit an elasticnet model path
%--------------------------------------------------------------------------
%
% DESCRIPTION:
% Fit a regularization path for the elasticnet at a grid of values for
% the regularization parameter lambda. Can deal with all shapes of data.
% Fits linear, logistic and multinomial regression models.
%
% USAGE:
% fit = glmnet(x, y)
% fit = glmnet(x, y, family, options)
%
% EXTERNAL FUNCTIONS:
% options = glmnetSet; provided with glmnet.m
%
% INPUT ARGUMENTS:
% x Input matrix, of dimension nobs x nvars; each row is an
% observation vector. Can be in sparse column format.
% y Response variable. Quantitative for family =
% 'gaussian'. For family = 'binomial' should be either a vector
% of two levels, or a two-column matrix of counts or
% proportions. For family = 'multinomial', can be either a
% vector of nc>=2 levels, or a matrix with nc columns of counts
% or proportions.
% family Reponse type. (See above). Default is 'gaussian'.
% options A structure that may be set and altered by glmnetSet (type
% help glmnetSet).
%
% OUTPUT ARGUMENTS:
% fit A structure.
% fit.a0 Intercept sequence of length length(fit.lambda).
% fit.beta For "elnet" and "lognet" models, a nvars x length(lambda)
% matrix of coefficients. For "multnet", a list of nc such
% matrices, one for each class.
% fit.lambda The actual sequence of lambda values used.
% fit.dev The fraction of (null) deviance explained (for "elnet", this
% is the R-square).
% fit.nulldev Null deviance (per observation).
% fit.df The number of nonzero coefficients for each value of lambda.
% For "multnet", this is the number of variables with a nonzero
% coefficient for any class.
% fit.dfmat For "multnet" only. A matrix consisting of the number of
% nonzero coefficients per class.
% fit.dim Dimension of coefficient matrix (ices).
% fit.npasses Total passes over the data summed over all lambda values.
% fit.jerr Error flag, for warnings and errors (largely for internal
% debugging).
% fit.class Type of regression - internal usage.
%
% DETAILS:
% The sequence of models implied by lambda is fit by coordinate descent.
% For family = 'gaussian' this is the lasso sequence if alpha = 1, else
% it is the elasticnet sequence. For family = 'binomial' or family =
% "multinomial", this is a lasso or elasticnet regularization path for
% fitting the linear logistic or multinomial logistic regression paths.
% Sometimes the sequence is truncated before options.nlambda values of
% lambda have been used, because of instabilities in the logistic or
% multinomial models near a saturated fit. glmnet(..., family =
% 'binomial') fits a traditional logistic regression model for the
% log-odds. glmnet(..., family = 'multinomial') fits a symmetric
% multinomial model, where each class is represented by a linear model
% (on the log-scale). The penalties take care of redundancies. A
% two-class "multinomial" model will produce the same fit as the
% corresponding "binomial" model, except the pair of coefficient
% matrices will be equal in magnitude and opposite in sign, and half the
% "binomial" values. Note that the objective function for
% "gaussian" is
% 1 / (2 * nobs) RSS + lambda * penalty
% , and for the logistic models it is
% 1 / nobs - loglik + lambda * penalty
%
% LICENSE: GPL-2
%
% DATE: 14 Jul 2009
%
% AUTHORS:
% Algorithm was designed by Jerome Friedman, Trevor Hastie and Rob Tibshirani
% Fortran code was written by Jerome Friedman
% R wrapper (from which the MATLAB wrapper was adapted) was written by Trevor Hasite
% MATLAB wrapper was written and maintained by Hui Jiang, [email protected]
% Department of Statistics, Stanford University, Stanford, California, USA.
%
% REFERENCES:
% Friedman, J., Hastie, T. and Tibshirani, R. (2009)
% Regularization Paths for Generalized Linear Models via Coordinate Descent.
% Journal of Statistical Software, 33(1), 2010
%
% SEE ALSO:
% glmnetSet, glmnetPrint, glmnetPlot, glmnetPredict and glmnetCoef methods.
%
% EXAMPLES:
% x=randn(100,20);
% y=randn(100,1);
% g2=randsample(2,100,true);
% g4=randsample(4,100,true);
% fit1=glmnet(x,y);
% glmnetPrint(fit1);
% glmnetCoef(fit1,0.01) % extract coefficients at a single value of lambda
% glmnetPredict(fit1,'response',x(1:10,:),[0.01,0.005]') % make predictions
% fit2=glmnet(x,g2,'binomial');
% fit3=glmnet(x,g4,'multinomial');
%
% DEVELOPMENT:
% 14 Jul 2009: Original version of glmnet.m written.
% 26 Jan 2010: Fixed a bug in the description of y, pointed out by
% Peter Rijnbeek from Erasmus University.
% 09 Mar 2010: Fixed a bug of printing "ka = 2", pointed out by
% Ramon Casanova from Wake Forest University.
% 25 Mar 2010: Fixed a bug when p > n in multinomial fitting, pointed
% out by Gerald Quon from University of Toronto
% Check input arguments
if nargin < 2
error('more input arguments needed.');
end
if nargin < 3
family = 'gaussian';
end
if nargin < 4
options = glmnetSet;
end
% Prepare parameters
nlam = options.nlambda;
[nobs,nvars] = size(x);
weights = options.weights;
if isempty(weights)
weights = ones(nobs,1);
end
maxit = options.maxit;
if strcmp(family, 'binomial') || strcmp(family, 'multinomial')
[noo,nc] = size(y);
kopt = double(options.HessianExact);
if noo ~= nobs
error('x and y have different number of rows');
end
if nc == 1
classes = unique(y);
nc = length(classes);
indexes = eye(nc);
y = indexes(y,:);
end
if strcmp(family, 'binomial')
if nc > 2
error ('More than two classes; use multinomial family instead');
end
nc = 1; % for calling multinet
end
if ~isempty(weights)
% check if any are zero
o = weights > 0;
if ~all(o) %subset the data
y = y(o,:);
x = x(o,:);
weights = weights(o);
nobs = sum(o);
end
[my,ny] = size(y);
y = y .* repmat(weights,1,ny);
end
% Compute the null deviance
prior = sum(y,1);
sumw = sum(sum(y));
prior = prior / sumw;
nulldev = -2 * sum(sum(y .* (ones(nobs, 1) * log(prior)))) / sumw;
elseif strcmp(family, 'gaussian')
% Compute the null deviance
ybar = y' * weights/ sum(weights);
nulldev = (y' - ybar).^2 * weights / sum(weights);
if strcmp(options.type, 'covariance')
ka = 1;
elseif strcmp(options.type, 'naive')
ka = 2;
else
error('unrecognized type');
end
else
error('unrecognized family');
end
ne = options.dfmax;
if ne == 0
ne = nvars + 1;
end
nx = options.pmax;
if nx == 0
nx = min(ne * 1.2, nvars);
end
exclude = options.exclude;
if ~isempty(exclude)
exclude = unique(exclude);
if ~all(exclude > 0 & exclude <= nvars)
error('Some excluded variables out of range');
end
jd = [length(exclude); exclude];
else
jd = 0;
end
vp = options.penalty_factor;
if isempty(vp)
vp = ones(nvars,1);
end
isd = double(options.standardize);
thresh = options.thresh;
lambda = options.lambda;
lambda_min = options.lambda_min;
if lambda_min == 0
if nobs < nvars
lambda_min = 5e-2;
else
lambda_min = 1e-4;
end
end
if isempty(lambda)
if (lambda_min >= 1)
error('lambda_min should be less than 1');
end
flmin = lambda_min;
ulam = 0;
else
flmin = 1.0;
if any(lambda < 0)
error ('lambdas should be non-negative');
end
ulam = -sort(-lambda);
nlam = length(lambda);
end
parm = options.alpha;
if strcmp(family, 'gaussian')
[a0,ca,ia,nin,rsq,alm,nlp,jerr] = glmnetMex(parm,x,y,jd,vp,ne,nx,nlam,flmin,ulam,thresh,isd,weights,ka);
else
[a0,ca,ia,nin,dev,alm,nlp,jerr] = glmnetMex(parm,x,y,jd,vp,ne,nx,nlam,flmin,ulam,thresh,isd,nc,maxit,kopt);
end
% Prepare output
lmu = length(alm);
ninmax = max(nin);
lam = alm;
if isempty(options.lambda)
lam = fix_lam(lam); % first lambda is infinity; changed to entry point
end
errmsg = err(jerr, maxit, nx);
if errmsg.n == 1
error(errmsg.msg);
elseif errmsg.n == -1
warning(errmsg.msg);
end
if strcmp(family, 'multinomial')
beta_list = {};
a0 = a0 - repmat(mean(a0), nc, 1);
dfmat=a0;
dd=[nvars, lmu];
if ninmax > 0
ca = reshape(ca, nx, nc, lmu);
ca = ca(1:ninmax,:,:);
ja = ia(1:ninmax);
[ja1,oja] = sort(ja);
df = any(abs(ca) > 0, 2);
df = sum(df, 1);
df = df(:);
for k=1:nc
ca1 = reshape(ca(:,k,:), ninmax, lmu);
cak = ca1(oja,:);
dfmat(k,:) = sum(sum(abs(cak) > 0));
beta = zeros(nvars, lmu);
beta(ja1,:) = cak;
beta_list{k} = beta;
end
else
for k = 1:nc
dfmat(k,:) = zeros(1,lmu);
beta_list{k} = zeros(nvars, lmu);
end
end
fit.a0 = a0;
fit.beta = beta_list;
fit.dev = dev;
fit.nulldev = nulldev;
fit.dfmat = dfmat;
fit.df = df';
fit.lambda = lam;
fit.npasses = nlp;
fit.jerr = jerr;
fit.dim = dd;
fit.class = 'multnet';
else
dd=[nvars, lmu];
if ninmax > 0
ca = ca(1:ninmax,:);
df = sum(abs(ca) > 0, 1);
ja = ia(1:ninmax);
[ja1,oja] = sort(ja);
beta = zeros(nvars, lmu);
beta (ja1, :) = ca(oja,:);
else
beta = zeros(nvars,lmu);
df = zeros(1,lmu);
end
if strcmp(family, 'binomial')
a0 = -a0;
fit.a0 = a0;
fit.beta = -beta; %sign flips make 2 arget class
fit.dev = dev;
fit.nulldev = nulldev;
fit.df = df';
fit.lambda = lam;
fit.npasses = nlp;
fit.jerr = jerr;
fit.dim = dd;
fit.class = 'lognet';
else
fit.a0 = a0;
fit.beta = beta;
fit.dev = rsq;
fit.nulldev = nulldev;
fit.df = df';
fit.lambda = lam;
fit.npasses = nlp;
fit.jerr = jerr;
fit.dim = dd;
fit.class = 'elnet';
end
end
%------------------------------------------------------------------
% End function glmnet
%------------------------------------------------------------------
function new_lam = fix_lam(lam)
new_lam = lam;
llam=log(lam);
new_lam(1)=exp(2*llam(2)-llam(3));
%------------------------------------------------------------------
% End private function fix_lam
%------------------------------------------------------------------
function output = err(n,maxit,pmax)
if n==0
output.n=0;
output.msg='';
elseif n>0 %fatal error
if n<7777
msg='Memory allocation error; contact package maintainer';
elseif n==7777
msg='All used predictors have zero variance';
elseif (8000<n) && (n<9000)
msg=sprintf('Null probability for class %d < 1.0e-5', n-8000);
elseif (9000<n) && (n<10000)
msg=sprintf('Null probability for class %d > 1.0 - 1.0e-5', n-9000);
elseif n==10000
msg='All penalty factors are <= 0';
end
output.n=1
output.msg=['in glmnet fortran code - %s',msg];
elseif n<0 %non fatal error
if n > -10000
msg=sprintf('Convergence for %dth lambda value not reached after maxit=%d iterations; solutions for larger lambdas returned', -n, maxit);
elseif n < -10000
msg=sprintf('Number of nonzero coefficients along the path exceeds pmax=%d at %dth lambda value; solutions for larger lambdas returned', pmax, -n-10000);
end
output.n=-1;
output.msg=['from glmnet fortran code - ',msg];
end
%------------------------------------------------------------------
% End private function err
%------------------------------------------------------------------
|
github
|
lcnhappe/happe-master
|
glmnetPlot.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/glmnet/glmnetPlot.m
| 3,485 |
utf_8
|
25bef119d3957074e024d7afafff205f
|
function glmnetPlot( x, xvar, label )
%--------------------------------------------------------------------------
% glmnetPlot.m: plot coefficients from a "glmnet" object
%--------------------------------------------------------------------------
%
% DESCRIPTION:
% Produces a coefficient profile plot fo the coefficient paths for a
% fitted "glmnet" object.
%
% USAGE:
% glmnetPlot(fit);
% glmnetPlot(fit, xvar);
% glmnetPlot(fit, xvar, label);
%
% INPUT ARGUMENTS:
% x fitted "glmnet" model.
% xvar What is on the X-axis. "norm" plots against the L1-norm of
% the coefficients, "lambda" against the log-lambda sequence,
% and "dev" against the percent deviance explained.
% label if TRUE, label the curves with variable sequence numbers.
%
% DETAILS:
% A coefficient profile plot is produced. If x is a multinomial model, a
% coefficient plot is produced for each class.
%
% LICENSE: GPL-2
%
% DATE: 14 Jul 2009
%
% AUTHORS:
% Algorithm was designed by Jerome Friedman, Trevor Hastie and Rob Tibshirani
% Fortran code was written by Jerome Friedman
% R wrapper (from which the MATLAB wrapper was adapted) was written by Trevor Hasite
% MATLAB wrapper was written and maintained by Hui Jiang, [email protected]
% Department of Statistics, Stanford University, Stanford, California, USA.
%
% REFERENCES:
% Friedman, J., Hastie, T. and Tibshirani, R. (2009)
% Regularization Paths for Generalized Linear Models via Coordinate Descent.
% Journal of Statistical Software, 33(1), 2010
%
% SEE ALSO:
% glmnet, glmnetSet, glmnetPrint, glmnetPredict and glmnetCoef methods.
%
% EXAMPLES:
% x=randn(100,20);
% y=randn(100,1);
% g2=randsample(2,100,true);
% g4=randsample(4,100,true);
% fit1=glmnet(x,y);
% glmnetPlot(fit1);
% glmnetPlot(fit1, 'lambda', true);
% fit3=glmnet(x,g4,'multinomial');
% glmnetPlot(fit3);
%
% DEVELOPMENT: 14 Jul 2009: Original version of glmnet.m written.
if nargin < 2
xvar = 'norm';
end
if nargin < 3
label = false;
end
if strcmp(x.class,'multnet')
beta=x.beta;
if strcmp(xvar,'norm')
norm = 0;
for i=1:length(beta);
which = nonzeroCoef(beta{i});
beta{i} = beta{i}(which,:);
norm = norm + sum(abs(beta{i}),1);
end
else
norm = 0;
end
dfmat=x.dfmat;
ncl=size(dfmat,1);
for i=1:ncl
plotCoef(beta{i},norm,x.lambda,dfmat(i,:),x.dev,label,xvar,'',sprintf('Coefficients: Class %d', i));
end
else
plotCoef(x.beta,[],x.lambda,x.df,x.dev,label,xvar,'','Coefficients');
end
%----------------------------------------------------------------
% End function glmnetPlot
%----------------------------------------------------------------
function plotCoef(beta,norm,lambda,df,dev,label,xvar,xlab,ylab)
which = nonzeroCoef(beta);
beta = beta(which,:);
if strcmp(xvar, 'norm')
if isempty(norm)
index = sum(abs(beta),1);
else
index = norm;
end
iname = 'L1 Norm';
elseif strcmp(xvar, 'lambda')
index=log(lambda);
iname='Log Lambda';
elseif strcmp(xvar, 'dev')
index=dev;
iname='Fraction Deviance Explained';
end
if isempty(xlab)
xlab = iname;
end
plot(index,transpose(beta));
xlabel(xlab);
ylabel(ylab);
%----------------------------------------------------------------
% End private function plotCoef
%----------------------------------------------------------------
|
github
|
lcnhappe/happe-master
|
glmnetPredict.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/glmnet/glmnetPredict.m
| 9,023 |
utf_8
|
fd2d04fce352a52a0c6616c95fc90bd1
|
function result = glmnetPredict(object, type, newx, s)
%--------------------------------------------------------------------------
% glmnetPredict.m: make predictions from a "glmnet" object.
%--------------------------------------------------------------------------
%
% DESCRIPTION:
% Similar to other predict methods, this functions predicts fitted
% values, logits, coefficients and more from a fitted "glmnet" object.
%
% USAGE:
% glmnetPredict(object)
% glmnetPredict(object, type)
% glmnetPredict(object, type, newx)
% glmnetPredict(object, type, newx, s)
%
% INPUT ARGUMENTS:
% fit Fitted "glmnet" model object.
% type Type of prediction required. Type "link" gives the linear
% predictors for "binomial" or "multinomial" models; for
% "gaussian" models it gives the fitted values. Type "response"
% gives the fitted probabilities for "binomial" or
% "multinomial"; for "gaussian" type "response" is equivalent
% to type "link". Type "coefficients" computes the coefficients
% at the requested values for s. Note that for "binomial"
% models, results are returned only for the class corresponding
% to the second level of the factor response. Type "class"
% applies only to "binomial" or "multinomial" models, and
% produces the class label corresponding to the maximum
% probability. Type "nonzero" returns a list of the indices of
% the nonzero coefficients for each value of s.
% newx Matrix of new values for x at which predictions are to be
% made. Must be a matrix; This argument is not used for
% type=c("coefficients","nonzero")
% s Value(s) of the penalty parameter lambda at which predictions
% are required. Default is the entire sequence used to create
% the model.
%
% DETAILS:
% The shape of the objects returned are different for "multinomial"
% objects. glmnetCoef(fit, ...) is equivalent to glmnetPredict(fit, "coefficients", ...)
%
% LICENSE: GPL-2
%
% DATE: 14 Jul 2009
%
% AUTHORS:
% Algorithm was designed by Jerome Friedman, Trevor Hastie and Rob Tibshirani
% Fortran code was written by Jerome Friedman
% R wrapper (from which the MATLAB wrapper was adapted) was written by Trevor Hasite
% MATLAB wrapper was written and maintained by Hui Jiang, [email protected]
% Department of Statistics, Stanford University, Stanford, California, USA.
%
% REFERENCES:
% Friedman, J., Hastie, T. and Tibshirani, R. (2009)
% Regularization Paths for Generalized Linear Models via Coordinate Descent.
% Journal of Statistical Software, 33(1), 2010
%
% SEE ALSO:
% glmnet, glmnetSet, glmnetPrint, glmnetPlot and glmnetCoef methods.
%
% EXAMPLES:
% x=randn(100,20);
% y=randn(100,1);
% g2=randsample(2,100,true);
% g4=randsample(4,100,true);
% fit1=glmnet(x,y);
% glmnetPredict(fit1,'link',x(1:5,:),[0.01,0.005]') % make predictions
% glmnetPredict(fit1,'coefficients')
% fit2=glmnet(x,g2,'binomial');
% glmnetPredict(fit2, 'response', x(2:5,:))
% glmnetPredict(fit2, 'nonzero')
% fit3=glmnet(x,g4,'multinomial');
% glmnetPredict(fit3, 'response', x(1:3,:), 0.01)
%
% DEVELOPMENT:
% 14 Jul 2009: Original version of glmnet.m written.
% 20 Oct 2009: Fixed a bug in bionomial response, pointed out by Ramon
% Casanov from Wake Forest University.
% 26 Jan 2010: Fixed a bug in multinomial link and class, pointed out by
% Peter Rijnbeek from Erasmus University.
if nargin < 2
type = 'link';
end
if nargin < 3
newx = [];
end
if nargin < 4
s = object.lambda;
end
if strcmp(object.class, 'elnet')
a0=transpose(object.a0);
nbeta=[a0; object.beta];
if nargin == 4
lambda=object.lambda;
lamlist=lambda_interp(lambda,s);
nbeta=nbeta(:,lamlist.left).*repmat(lamlist.frac',size(nbeta,1),1) +nbeta(:,lamlist.right).*(1-repmat(lamlist.frac',size(nbeta,1),1));
end
if strcmp(type, 'coefficients')
result = nbeta;
elseif strcmp(type, 'link')
result = [ones(size(newx,1),1), newx] * nbeta;
elseif strcmp(type, 'response')
result = [ones(size(newx,1),1), newx] * nbeta;
elseif strcmp(type, 'nonzero')
result = nonzeroCoef(nbeta(2:size(nbeta,1),:), true);
else
error('Unrecognized type');
end
elseif strcmp(object.class, 'lognet')
a0=transpose(object.a0);
nbeta=[object.a0; object.beta];
if nargin == 4
lambda=object.lambda;
lamlist=lambda_interp(lambda,s);
nbeta=nbeta(:,lamlist.left).*repmat(lamlist.frac',size(nbeta,1),1) +nbeta(:,lamlist.right).*(1-repmat(lamlist.frac',size(nbeta,1),1));
end
%%% remember that although the fortran lognet makes predictions
%%% for the first class, we make predictions for the second class
%%% to avoid confusion with 0/1 responses.
%%% glmnet flipped the signs of the coefficients
if strcmp(type,'coefficients')
result = nbeta;
elseif strcmp(type,'nonzero')
result = nonzeroCoef(nbeta(2:size(nbeta,1),:), true);
else
nfit = [ones(size(newx,1),1), newx] * nbeta;
if strcmp(type,'response')
pp=exp(-nfit);
result = 1./(1+pp);
elseif strcmp(type,'link')
result = nfit;
elseif strcmp(type,'class')
result = (nfit > 0) * 2 + (nfit <= 0) * 1;
else
error('Unrecognized type');
end
end
elseif strcmp(object.class, 'multnet')
a0=object.a0;
nbeta=object.beta;
nclass=size(a0,1);
nlambda=length(s);
if nargin == 4
lambda=object.lambda;
lamlist=lambda_interp(lambda,s);
for i=1:nclass
kbeta=[a0(i,:); nbeta{i}];
kbeta=kbeta(:,lamlist.left)*lamlist.frac +kbeta(:,lamlist.right)*(1-lamlist.frac);
nbeta{i}=kbeta;
end
else
for i=1:nclass
nbeta{i} = [a0(i,:);nbeta{i}];
end
end
if strcmp(type, 'coefficients')
result = nbeta;
elseif strcmp(type, 'nonzero')
for i=1:nclass
result{i}=nonzeroCoef(nbeta{i}(2:size(nbeta{i},1),:),true);
end
else
npred=size(newx,1);
dp = zeros(nclass,nlambda,npred);
for i=1:nclass
fitk = [ones(size(newx,1),1), newx] * nbeta{i};
dp(i,:,:)=dp(i,:,:)+reshape(transpose(fitk),1,nlambda,npred);
end
if strcmp(type, 'response')
pp=exp(dp);
psum=sum(pp,1);
result = permute(pp./repmat(psum,nclass,1),[3,1,2]);
elseif strcmp(type, 'link')
result=permute(dp,[3,1,2]);
elseif strcmp(type, 'class')
dp=permute(dp,[3,1,2]);
result = [];
for i=1:size(dp,3)
result = [result, softmax(dp(:,:,i))];
end
else
error('Unrecognized type');
end
end
else
error('Unrecognized class');
end
%-------------------------------------------------------------
% End private function glmnetPredict
%-------------------------------------------------------------
function result = lambda_interp(lambda,s)
% lambda is the index sequence that is produced by the model
% s is the new vector at which evaluations are required.
% the value is a vector of left and right indices, and a vector of fractions.
% the new values are interpolated bewteen the two using the fraction
% Note: lambda decreases. you take:
% sfrac*left+(1-sfrac*right)
if length(lambda)==1 % degenerate case of only one lambda
nums=length(s);
left=ones(nums,1);
right=left;
sfrac=ones(nums,1);
else
s(s > max(lambda)) = max(lambda);
s(s < min(lambda)) = min(lambda);
k=length(lambda);
sfrac =(lambda(1)-s)/(lambda(1) - lambda(k));
lambda = (lambda(1) - lambda)/(lambda(1) - lambda(k));
coord = interp1(lambda, 1:length(lambda), sfrac);
left = floor(coord);
right = ceil(coord);
sfrac=(sfrac-lambda(right))./(lambda(left) - lambda(right));
sfrac(left==right)=1;
end
result.left = left;
result.right = right;
result.frac = sfrac;
%-------------------------------------------------------------
% End private function lambda_interp
%-------------------------------------------------------------
function result = softmax(x, gap)
if nargin < 2
gap = false;
end
d = size(x);
maxdist = x(:, 1);
pclass = repmat(1, d(1), 1);
for i =2:d(2)
l = x(:, i) > maxdist;
pclass(l) = i;
maxdist(l) = x(l, i);
end
if gap
x = abs(maxdist - x);
x(1:d(1), pclass) = x * repmat(1, d(2));
gaps = pmin(x);
end
if gap
result = {pclass, gaps};
else
result = pclass;
end
%-------------------------------------------------------------
% End private function softmax
%-------------------------------------------------------------
|
github
|
lcnhappe/happe-master
|
m2kml.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/misc/m2kml.m
| 1,807 |
utf_8
|
6ab3b7f8f1cac70d62e51ac3b881ffdf
|
% M2KML Converts GP prediction results to a KML file
%
% Input:
% input_file - Name of the .mat file containing GP results
% cellsize - Size of the cells in meters
% output_file - Name of the output file without the file extension!
% If output is not given the name of the input file is used
% and the results are written to <input_name>.kmz
%
% Assumes that the .mat-file contains (atleast) the following variables:
% Ef - Logarithm of the relative risk to be displayed
% X1 - Grid of cells. The size of the data-array is determined from this.
% xxii - Indexes of non-zero elements in the cell grid
%
function m2kml(input_file,cellsize,output_file)
if nargin < 3
name = input_file(1:end-4);
zip_name = [name, '.kmz'];
output_file = [name, '.kml'];
else
zip_name = [output_file, '.kmz'];
output_file = [output_file, '.kml'];
end
use ge_toolbox
addpath /proj/bayes/software/jmjharti/maps
load(input_file)
% Form the data for output
data = zeros(size(X1));
data = data(:);
data(:) = NaN;
data(xxii) = exp(Ef);
cellsize = 5000;
% is there some mistake in coordinates?
x=(3050000:cellsize:3750000-1)+cellsize;
y=(6600000:cellsize:7800000-1)+cellsize;
data = reshape(data,length(y),length(x));
% create kml code for polygon overlay
output = ge_imagesc_ykj(x, y, data, ...
'altitudeMode', 'clampToGround', ...
'transparency', 'ff');
% Write the KML string to output file
ge_output(output_file, [output]);
% Zip the KML file
zip(zip_name,output_file);
movefile([zip_name,'.zip'],zip_name);
|
github
|
lcnhappe/happe-master
|
mapcolor2.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/misc/mapcolor2.m
| 1,973 |
utf_8
|
808a8a3bca47cf7634126af39ec9c1a2
|
function map = mapcolor2(A, breaks)
%MAPCOLOR2 Create a blue-gray-red colormap.
% MAPCOLOR2(A, BREAKS), when A is a matrix and BREAKS a vector, returns a
% colormap that can be used as a parameter in COLORMAP function. BREAKS
% has to contain six break values for a 7-class colormap. The break
% values define the points at which the scheme changes color. The first
% three colors are blue, the middle one gray and the last three ones
% are red.
%
% Example: A = ones(100, 100);
% for i=1:10:100
% A(i:end, i:end) = A(i,i) + 5;
% end
% A = A + rand(100, 100);
% map = mapcolor2(A, [10 15 20 25 30 35]);
% pcolor(A), shading flat
% colormap(map), colorbar
%
% Copyright (c) 2006 Markus Siivola
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
% there must be 6 break values for seven colors
if length(breaks) ~= 6
error('Break point vector must have 6 elements.');
end
% sort break values in ascending order if necessary
if ~issorted(breaks)
breaks = sort(breaks);
end
% check the value range consumed by the data
rng = [min(A(:)) max(A(:))];
if any(breaks > rng(2)) | any(breaks < rng(1))
error('Break point out of value range');
end
if ispc
n = 256;
else
n = 1000;
end
% a color scheme from blue through gray to red
colors = flipud([ 140 0 0;
194 80 68;
204 143 151;
191 191 191;
128 142 207;
70 84 158;
7 39 115 ] / 255);
% create a colormap with 256 colors
map = []; n = 256; ibeg = 1;
for i = 1:6
iend = round(n * (breaks(i) - rng(1)) / (rng(2) - rng(1)));
map = [map; repmat(colors(i,:), [iend-ibeg+1 1])];
ibeg = iend+1;
end
map = [map; repmat(colors(7,:), [n - size(map, 1) 1])];
|
github
|
lcnhappe/happe-master
|
test_regression_sparse1.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_regression_sparse1.m
| 2,394 |
utf_8
|
ed030b7ce8f86b52cfc2037d0c1d216d
|
function test_suite = test_regression_sparse1
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_REGRESSION_SPARSE1
initTestSuite;
function testDemo
% Set random number stream so that failing isn't because randomness. Run
% demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_regression_sparse1')
demo_regression_sparse1
path = which('test_regression_sparse1');
path = strrep(path,'test_regression_sparse1.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testRegression_sparse1');
save(path, 'Eft_fic', 'Eft_pic', 'Eft_var', 'Eft_dtc', 'Eft_cs');
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
% Compare test values to real values.
function testPredictionsCS
values.real = load('realValuesRegression_sparse1', 'Eft_cs');
values.test = load(strrep(which('test_regression_sparse1.m'), 'test_regression_sparse1.m', 'testValues/testRegression_sparse1'), 'Eft_cs');
assertElementsAlmostEqual((values.real.Eft_cs), (values.test.Eft_cs), 'absolute', 0.1);
function testPredictionsFIC
values.real = load('realValuesRegression_sparse1', 'Eft_fic');
values.test = load(strrep(which('test_regression_sparse1.m'), 'test_regression_sparse1.m', 'testValues/testRegression_sparse1'), 'Eft_fic');
assertElementsAlmostEqual((values.real.Eft_fic), (values.test.Eft_fic), 'absolute', 0.1);
function testPredictionsPIC
values.real = load('realValuesRegression_sparse1', 'Eft_pic');
values.test = load(strrep(which('test_regression_sparse1.m'), 'test_regression_sparse1.m', 'testValues/testRegression_sparse1'), 'Eft_pic');
assertElementsAlmostEqual((values.real.Eft_pic), (values.test.Eft_pic), 'absolute', 0.1);
function testPredictionsVAR
values.real = load('realValuesRegression_sparse1', 'Eft_var');
values.test = load(strrep(which('test_regression_sparse1.m'), 'test_regression_sparse1.m', 'testValues/testRegression_sparse1'), 'Eft_var');
assertElementsAlmostEqual((values.real.Eft_var), (values.test.Eft_var), 'absolute', 0.1);
function testPredictionsDTC
values.real = load('realValuesRegression_sparse1', 'Eft_dtc');
values.test = load(strrep(which('test_regression_sparse1.m'), 'test_regression_sparse1.m', 'testValues/testRegression_sparse1'), 'Eft_dtc');
assertElementsAlmostEqual((values.real.Eft_dtc), (values.test.Eft_dtc), 'absolute', 0.1);
|
github
|
lcnhappe/happe-master
|
test_multiclass.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_multiclass.m
| 1,348 |
utf_8
|
6de4245c71740a6340e2cd4852cad0e7
|
function test_suite = test_multiclass
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_MULTICLASS
initTestSuite;
function testDemo
% Set random number stream so that test failing isn't because randomness.
% Run demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_multiclass')
demo_multiclass
Eft=Eft(1:100,1:3);
Varft=Varft(1:3,1:3,1:100);
Covft=Covft(1:3,1:3,1:100);
path = which('test_multiclass.m');
path = strrep(path,'test_multiclass.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testMulticlass');
save(path,'Eft','Varft','Covft');
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
% Compare test values to real values.
function testPredictions
values.real = load('realValuesMulticlass.mat');
values.test = load(strrep(which('test_multiclass.m'), 'test_multiclass.m', 'testValues/testMulticlass.mat'));
assertElementsAlmostEqual(mean(values.real.Eft), mean(values.test.Eft), 'relative', 0.01);
assertElementsAlmostEqual(mean(values.real.Varft), mean(values.test.Varft), 'relative', 0.01);
assertElementsAlmostEqual(mean(values.real.Covft), mean(values.test.Covft), 'relative', 0.01);
|
github
|
lcnhappe/happe-master
|
test_regression_additive2.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_regression_additive2.m
| 1,200 |
utf_8
|
5cfacab33d356c758648eb98795b4b25
|
function test_suite = test_regression_additive2
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_REGRESSION_ADDITIVE2
initTestSuite;
function testDemo
% Set random number stream so that test failing isn't because randomness.
% Run demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_regression_additive2')
demo_regression_additive2
path = which('test_regression_additive2.m');
path = strrep(path,'test_regression_additive2.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testRegression_additive2');
save(path, 'Eft_map');
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
% Compare test values to real values.
function testNeuralNetworkCFPrediction
values.real = load('realValuesRegression_additive2.mat','Eft_map');
values.test = load(strrep(which('test_regression_additive2.m'), 'test_regression_additive2.m', 'testValues/testRegression_additive2.mat'),'Eft_map');
assertElementsAlmostEqual(mean(values.real.Eft_map), mean(values.test.Eft_map), 'relative', 0.05);
|
github
|
lcnhappe/happe-master
|
test_regression_meanf.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_regression_meanf.m
| 1,003 |
utf_8
|
1ad49d761a9849fba70028e4a0deae3d
|
function test_suite = test_regression_meanf
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_REGRESSION_MEANF
initTestSuite;
function testDemo
% Set random number stream so that test failing isn't because randomness.
% Run demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_regression_meanf')
demo_regression_meanf
path = which('test_regression_meanf.m');
path = strrep(path,'test_regression_meanf.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testRegression_meanf');
save(path, 'Eft');
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
function testPredictions
values.real = load('realValuesRegression_meanf.mat', 'Eft');
values.test = load(strrep(which('test_regression_meanf.m'), 'test_regression_meanf.m', 'testValues/testRegression_meanf.mat'), 'Eft');
assertElementsAlmostEqual(mean(values.real.Eft), mean(values.test.Eft), 'relative', 0.10);
|
github
|
lcnhappe/happe-master
|
test_regression_ppcs.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_regression_ppcs.m
| 1,373 |
utf_8
|
7fda1165ed379fa8accab66db375fd9d
|
function test_suite = test_regression_ppcs
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_REGRESSION_PPCS
initTestSuite;
function testDemo
% Set random number stream so that failing isn't because randomness. Run
% demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_regression_ppcs')
demo_regression_ppcs
K = K(1:50, 1:50);
Ef = Ef(1:100);
path = which('test_regression_ppcs.m');
path = strrep(path,'test_regression_ppcs.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testRegression_ppcs');
save(path, 'K', 'Ef')
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
% Compare test values to real values.
function testCovarianceMatrix
values.real = load('realValuesRegression_ppcs.mat', 'K');
values.test = load(strrep(which('test_regression_ppcs.m'), 'test_regression_ppcs.m', 'testValues/testRegression_ppcs.mat'), 'K');
assertElementsAlmostEqual(mean(full(values.real.K)), mean(full(values.test.K)), 'relative', 0.1)
function testPrediction
values.real = load('realValuesRegression_ppcs.mat', 'Ef');
values.test = load(strrep(which('test_regression_ppcs.m'), 'test_regression_ppcs.m', 'testValues/testRegression_ppcs.mat'), 'Ef');
assertElementsAlmostEqual(mean(values.real.Ef), mean(values.test.Ef), 'relative', 0.1);
|
github
|
lcnhappe/happe-master
|
test_spatial1.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_spatial1.m
| 1,460 |
utf_8
|
51de1c938a75607200183816de226e34
|
function test_suite = test_spatial1
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_SPATIAL1
initTestSuite;
function testDemo
% Set random number stream so that failing isn't because randomness. Run
% demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_spatial1')
demo_spatial1
Ef = Ef(1:100);
Varf = Varf(1:100);
path = which('test_spatial1.m');
path = strrep(path,'test_spatial1.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testSpatial1');
save(path, 'Elth', 'Elth2', 'Ef', 'Varf');
% Set back initial random stream
setrandstream(prevstream);
% Compare test values to real values.
function testEstimatesIA
values.real = load('realValuesSpatial1.mat', 'Elth', 'Elth2');
values.test = load(strrep(which('test_spatial1.m'), 'test_spatial1.m', 'testValues/testSpatial1.mat'), 'Elth', 'Elth2');
assertElementsAlmostEqual(values.real.Elth, values.test.Elth, 'relative', 0.1);
assertElementsAlmostEqual(values.real.Elth2, values.test.Elth2, 'relative', 0.1);
function testPredictionIA
values.real = load('realValuesSpatial1.mat', 'Ef', 'Varf');
values.test = load(strrep(which('test_spatial1.m'), 'test_spatial1.m', 'testValues/testSpatial1.mat'), 'Ef', 'Varf');
assertElementsAlmostEqual(mean(values.real.Ef), mean(values.test.Ef), 'relative', 0.1);
assertElementsAlmostEqual(mean(values.real.Varf), mean(values.test.Varf), 'relative', 0.1);
|
github
|
lcnhappe/happe-master
|
test_periodic.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_periodic.m
| 1,906 |
utf_8
|
d63046190abe59ab1b11667e35546492
|
function test_suite = test_periodic
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_PERIODIC
initTestSuite;
function testDemo
% Set random number stream so that test failing isn't because randomness.
% Run demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_periodic')
demo_periodic
path = which('test_periodic.m');
path = strrep(path,'test_periodic.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testPeriodic');
save(path, 'Eft_full1', 'Varft_full1', 'Eft_full2', 'Varft_full2', ...
'Eft_full', 'Varft_full');
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
% Compare test values to real values.
function testPredictionsMaunaLoa
values.real = load('realValuesPeriodic.mat', 'Eft_full1', 'Eft_full2','Varft_full1','Varft_full2');
values.test = load(strrep(which('test_periodic.m'), 'test_periodic.m', 'testValues/testPeriodic.mat'), 'Eft_full1', 'Eft_full2','Varft_full1','Varft_full2');
assertElementsAlmostEqual(mean(values.real.Eft_full1), mean(values.test.Eft_full1), 'relative', 0.05);
assertElementsAlmostEqual(mean(values.real.Eft_full2), mean(values.test.Eft_full2), 'relative', 0.05);
assertElementsAlmostEqual(mean(values.real.Varft_full1), mean(values.test.Varft_full1), 'relative', 0.05);
assertElementsAlmostEqual(mean(values.real.Varft_full2), mean(values.test.Varft_full2), 'relative', 0.05);
function testPredictionsDrowning
values.real = load('realValuesPeriodic.mat', 'Eft_full', 'Varft_full');
values.test = load(strrep(which('test_periodic.m'), 'test_periodic.m', 'testValues/testPeriodic.mat'), 'Eft_full', 'Varft_full');
assertElementsAlmostEqual(mean(values.real.Eft_full), mean(values.test.Eft_full), 'relative', 0.05);
assertElementsAlmostEqual(mean(values.real.Varft_full), mean(values.test.Varft_full), 'relative', 0.05);
|
github
|
lcnhappe/happe-master
|
test_regression_additive1.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_regression_additive1.m
| 2,344 |
utf_8
|
346daba327586ff75f7c9b113cd9224b
|
function test_suite = test_regression_additive1
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_REGRESSION_ADDITIVE1
initTestSuite;
function testDemo
% Set random number stream so that test failing isn't because randomness.
% Run demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_regression_additive1')
demo_regression_additive1
path = which('test_regression_additive1.m');
path = strrep(path,'test_regression_additive1.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testRegression_additive1');
save(path, 'Eft_fic', 'Varft_fic', 'Eft_pic', 'Varft_pic', ...
'Eft_csfic', 'Varft_csfic');
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
% Compare test values to real values.
function testPredictionsFIC
values.real = load('realValuesRegression_additive1.mat', 'Eft_fic', 'Varft_fic');
values.test = load(strrep(which('test_regression_additive1.m'), 'test_regression_additive1.m', 'testValues/testRegression_additive1.mat'), 'Eft_fic', 'Varft_fic');
assertElementsAlmostEqual((values.real.Eft_fic), (values.test.Eft_fic), 'absolute', 0.1);
assertElementsAlmostEqual(mean(values.real.Varft_fic), mean(values.test.Varft_fic), 'absolute', 0.1);
function testPredictionsPIC
values.real = load('realValuesRegression_additive1.mat', 'Eft_pic', 'Varft_pic');
values.test = load(strrep(which('test_regression_additive1.m'), 'test_regression_additive1.m', 'testValues/testRegression_additive1.mat'), 'Eft_pic', 'Varft_pic');
assertElementsAlmostEqual((values.real.Eft_pic), (values.test.Eft_pic), 'absolute', 0.1);
assertElementsAlmostEqual((values.real.Varft_pic), (values.test.Varft_pic), 'absolute', 0.1);
function testPredictionsSparse
values.real = load('realValuesRegression_additive1.mat', 'Eft_csfic', 'Varft_csfic');
values.test = load(strrep(which('test_regression_additive1.m'), 'test_regression_additive1.m', 'testValues/testRegression_additive1.mat'), 'Eft_csfic', 'Varft_csfic');
assertElementsAlmostEqual((values.real.Eft_csfic), (values.test.Eft_csfic), 'absolute', 0.1);
assertElementsAlmostEqual((values.real.Varft_csfic), (values.test.Varft_csfic), 'absolute', 0.1);
|
github
|
lcnhappe/happe-master
|
test_survival_weibull.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_survival_weibull.m
| 1,382 |
utf_8
|
3b33524a51abd8435e5757f2ce935c4e
|
function test_suite = test_survival_weibull
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_SURVIVAL_WEIBULL
% Copyright (c) 2011-2012 Ville Tolvanen
initTestSuite;
function testDemo
% Set random number stream so that failing isn't because randomness. Run
% demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_survival_weibull')
demo_survival_weibull;
path = which('test_survival_weibull.m');
path = strrep(path,'test_survival_weibull.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testSurvival_weibull');
save(path, 'Ef1', 'Ef2', 'Varf1', 'Varf2');
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
% Compare test values to real values.
function testPredictionsWeibull
values.real = load('realValuesSurvival_weibull', 'Ef1', 'Varf1', 'Ef2', 'Varf2');
values.test = load(strrep(which('test_survival_weibull.m'), 'test_survival_weibull.m', 'testValues/testSurvival_weibull'), 'Ef1', 'Varf1', 'Ef2', 'Varf2');
assertElementsAlmostEqual(values.real.Ef1, values.test.Ef1, 'relative', 0.10);
assertElementsAlmostEqual(values.real.Ef2, values.test.Ef2, 'relative', 0.10);
assertElementsAlmostEqual(values.real.Varf1, values.test.Varf1, 'relative', 0.10);
assertElementsAlmostEqual(values.real.Varf2, values.test.Varf2, 'relative', 0.10);
|
github
|
lcnhappe/happe-master
|
test_zinegbin.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_zinegbin.m
| 1,120 |
utf_8
|
d8a5a32418b9e85ce3b6a3de126a3c46
|
function test_suite = test_zinegbin
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_ZINEGBIN
% Copyright (c) 2011-2012 Ville Tolvanen
initTestSuite;
function testDemo
% Set random number stream so that failing isn't because randomness. Run
% demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_zinegbin')
demo_zinegbin;
path = which('test_zinegbin.m');
path = strrep(path,'test_zinegbin.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testZinegbin');
Ef=Ef(1:100); Varf=diag(Varf(1:100,1:100));
save(path, 'Ef', 'Varf');
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
% Compare test values to real values.
function testPredictionsZinegbin
values.real = load('realValuesZinegbin', 'Ef', 'Varf');
values.test = load(strrep(which('test_zinegbin.m'), 'test_zinegbin.m', 'testValues/testZinegbin'), 'Ef', 'Varf');
assertElementsAlmostEqual(values.real.Ef, values.test.Ef, 'relative', 0.10);
assertElementsAlmostEqual(values.real.Varf, values.test.Varf, 'relative', 0.10);
|
github
|
lcnhappe/happe-master
|
test_regression_sparse2.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_regression_sparse2.m
| 1,820 |
utf_8
|
c3167a5e33d483573669205845efb0b6
|
function test_suite = test_regression_sparse2
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_REGRESSION_SPARSE2
initTestSuite;
function testDemo
% Set random number stream so that failing isn't because randomness. Run
% demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_regression_sparse2')
demo_regression_sparse2
Eft_full = Eft_full(1:100);
Eft_var = Eft_var(1:100);
Varft_full = Varft_full(1:100);
Varft_var = Varft_var(1:100);
path = which('test_regression_sparse2.m');
path = strrep(path,'test_regression_sparse2.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testRegression_sparse2');
save(path, 'Eft_full', 'Eft_var', 'Varft_full', 'Varft_var');
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
% Compare test values to real values.
function testPredictionsFull
values.real = load('realValuesRegression_sparse2.mat', 'Eft_full', 'Varft_full');
values.test = load(strrep(which('test_regression_sparse2.m'), 'test_regression_sparse2.m', 'testValues/testRegression_sparse2.mat'),'Eft_full', 'Varft_full');
assertElementsAlmostEqual(mean(values.real.Eft_full), mean(values.test.Eft_full), 'relative', 0.1);
assertElementsAlmostEqual(mean(values.real.Varft_full), mean(values.test.Varft_full), 'relative', 0.1);
function testPredictionsVar
values.real = load('realValuesRegression_sparse2.mat', 'Eft_var', 'Varft_var');
values.test = load(strrep(which('test_regression_sparse2.m'), 'test_regression_sparse2.m', 'testValues/testRegression_sparse2.mat'), 'Eft_var', 'Varft_var');
assertElementsAlmostEqual((values.real.Eft_var), (values.test.Eft_var), 'relative', 0.1);
assertElementsAlmostEqual((values.real.Varft_var), (values.test.Varft_var), 'relative', 0.1);
|
github
|
lcnhappe/happe-master
|
test_spatial2.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_spatial2.m
| 1,396 |
utf_8
|
82431e99b626dd45f3691e828f9f209b
|
function test_suite = test_spatial2
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_SPATIAL2
initTestSuite;
function testDemo
% Set random number stream so that failing isn't because randomness. Run
% demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_spatial2')
demo_spatial2
Ef = Ef(1:100);
Varf = Varf(1:100);
C = C(1:50, 1:50);
path = which('test_spatial2.m');
path = strrep(path,'test_spatial2.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testSpatial2');
save(path, 'Ef', 'Varf', 'C');
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
% Compare test values to real values.
function testPredictionsEP
values.real = load('realValuesSpatial2.mat', 'Ef', 'Varf');
values.test = load(strrep(which('test_spatial2.m'), 'test_spatial2.m', 'testValues/testSpatial2.mat'), 'Ef', 'Varf');
assertElementsAlmostEqual(mean(values.test.Ef), mean(values.real.Ef), 'relative', 0.1);
assertElementsAlmostEqual(mean(values.test.Varf), mean(values.real.Varf), 'relative', 0.1);
function testCovarianceMatrix
values.real = load('realValuesSpatial2.mat', 'C');
values.test = load(strrep(which('test_spatial2.m'), 'test_spatial2.m', 'testValues/testSpatial2.mat'), 'C');
assertElementsAlmostEqual(mean(values.real.C), mean(values.test.C), 'relative', 0.1);
|
github
|
lcnhappe/happe-master
|
test_regression_hier.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_regression_hier.m
| 992 |
utf_8
|
ed6062874abff9cbf8ae1925856aff6d
|
function test_suite = test_regression_hier
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_REGRESSION_HIER
initTestSuite;
% Set random number stream so that test failing isn't because randomness.
% Run demo & save test values.
function testDemo
prevstream=setrandstream(0);
disp('Running: demo_regression_hier')
demo_regression_hier
path = which('test_regression_hier.m');
path = strrep(path,'test_regression_hier.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testRegression_hier');
save(path, 'Eff');
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
function testPredictionMissingData
values.real = load('realValuesRegression_hier', 'Eff');
values.test = load(strrep(which('test_regression_hier.m'), 'test_regression_hier.m', 'testValues/testRegression_hier'), 'Eff');
assertVectorsAlmostEqual(mean(values.real.Eff), mean(values.test.Eff), 'relative', 0.01);
|
github
|
lcnhappe/happe-master
|
test_neuralnetcov.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_neuralnetcov.m
| 1,432 |
utf_8
|
cf9e6362388bd59b2da5c1760ddf809c
|
function test_suite = test_neuralnetcov
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_NEURALNETCOV
initTestSuite;
function testDemo
% Set random number stream so that failing isn't because randomness. Run
% demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_neuralnetcov')
demo_neuralnetcov
path = which('test_neuralnetcov.m');
path = strrep(path,'test_neuralnetcov.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testNeuralnetcov');
save(path, 'Eft_map', 'Varft_map', 'Eft_map2', 'Varft_map2');
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
% Compare test values to real values.
function testPredictions
values.real = load('realValuesNeuralnetcov.mat','Eft_map', 'Eft_map2','Varft_map','Varft_map2');
values.test = load(strrep(which('test_neuralnetcov.m'), 'test_neuralnetcov.m', 'testValues/testNeuralnetcov.mat'), 'Eft_map', 'Eft_map2','Varft_map','Varft_map2');
assertElementsAlmostEqual(mean(values.real.Eft_map), mean(values.test.Eft_map), 'relative', 0.05);
assertElementsAlmostEqual(mean(values.real.Eft_map2), mean(values.test.Eft_map2), 'relative', 0.05);
assertElementsAlmostEqual(mean(values.real.Varft_map), mean(values.test.Varft_map), 'relative', 0.05);
assertElementsAlmostEqual(mean(values.real.Varft_map2), mean(values.test.Varft_map2), 'relative', 0.05);
|
github
|
lcnhappe/happe-master
|
test_multinom.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_multinom.m
| 1,082 |
utf_8
|
f0950b45a324a1f3ca22c5691fac21c3
|
function test_suite = test_multinom
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_ZINEGBIN
% Copyright (c) 2011-2012 Ville Tolvanen
initTestSuite;
function testDemo
% Set random number stream so that failing isn't because randomness. Run
% demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_multinom')
demo_multinom;
path = which('test_multinom.m');
path = strrep(path,'test_multinom.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testMultinom');
save(path, 'Eft', 'pyt2');
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
% Compare test values to real values.
function testPredictionsMultinom
values.real = load('realValuesMultinom', 'Eft', 'pyt2');
values.test = load(strrep(which('test_multinom.m'), 'test_multinom.m', 'testValues/testMultinom'), 'Eft', 'pyt2');
assertElementsAlmostEqual(values.real.Eft, values.test.Eft, 'absolute', 0.10);
assertElementsAlmostEqual(values.real.pyt2, values.test.pyt2, 'absolute', 0.10);
|
github
|
lcnhappe/happe-master
|
test_regression_robust.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_regression_robust.m
| 1,567 |
utf_8
|
941d7778d6b1aeaf4b1a7b70a1639d28
|
function test_suite = test_regression_robust
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_REGRESSION_ROBUST
initTestSuite;
function testDemo
% Set random number stream so that failing isn't because randomness. Run
% demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_regression_robust')
demo_regression_robust
path = which('test_regression_robust.m');
path = strrep(path,'test_regression_robust.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testRegression_robust');
w=gp_pak(rr);
save(path, 'Eft', 'Varft', 'w')
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
% Compare test values to real values.
function testPredictionEP
values.real = load('realValuesRegression_robust', 'Eft', 'Varft');
values.test = load(strrep(which('test_regression_robust.m'), 'test_regression_robust.m', 'testValues/testRegression_robust'), 'Eft', 'Varft');
assertElementsAlmostEqual(mean(values.real.Eft), mean(values.test.Eft), 'relative', 0.05);
assertElementsAlmostEqual(mean(values.real.Varft), mean(values.test.Varft), 'relative', 0.05);
function testMCMCSamples
values.real = load('realValuesRegression_robust', 'w');
values.test = load(strrep(which('test_regression_robust.m'), 'test_regression_robust.m', 'testValues/testRegression_robust'), 'w');
assertElementsAlmostEqual(mean(values.real.w), mean(values.test.w), 'relative', 0.01);
assertElementsAlmostEqual(mean(values.real.w), mean(values.test.w), 'relative', 0.01);
|
github
|
lcnhappe/happe-master
|
test_survival_coxph.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/xunit/test_survival_coxph.m
| 1,358 |
utf_8
|
98ec11c9517f7656fb584f0cf36596e3
|
function test_suite = test_survival_coxph
% Run specific demo and save values for comparison.
%
% See also
% TEST_ALL, DEMO_SURVIVAL_COXPH
% Copyright (c) 2011-2012 Ville Tolvanen
initTestSuite;
function testDemo
% Set random number stream so that failing isn't because randomness. Run
% demo & save test values.
prevstream=setrandstream(0);
disp('Running: demo_survival_coxph')
demo_survival_coxph;
path = which('test_survival_coxph.m');
path = strrep(path,'test_survival_coxph.m', 'testValues');
if ~(exist(path, 'dir') == 7)
mkdir(path)
end
path = strcat(path, '/testSurvival_coxph');
save(path, 'Ef1', 'Ef2', 'Varf1', 'Varf2');
% Set back initial random stream
setrandstream(prevstream);
drawnow;clear;close all
% Compare test values to real values.
function testPredictionsCoxph
values.real = load('realValuesSurvival_coxph', 'Ef1', 'Varf1', 'Ef2', 'Varf2');
values.test = load(strrep(which('test_survival_coxph.m'), 'test_survival_coxph.m', 'testValues/testSurvival_coxph'), 'Ef1', 'Varf1', 'Ef2', 'Varf2');
assertElementsAlmostEqual(values.real.Ef1, values.test.Ef1, 'absolute', 0.10);
assertElementsAlmostEqual(values.real.Ef2, values.test.Ef2, 'absolute', 0.10);
assertElementsAlmostEqual(values.real.Varf1, values.test.Varf1, 'absolute', 0.10);
assertElementsAlmostEqual(values.real.Varf2, values.test.Varf2, 'absolute', 0.10);
|
github
|
lcnhappe/happe-master
|
fminlbfgs.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/optim/fminlbfgs.m
| 35,278 |
utf_8
|
bbb5921f455647a8404163243c3fff60
|
function [x,fval,exitflag,output,grad]=fminlbfgs(funfcn,x_init,optim)
%FMINLBFGS finds a local minimum of a function of several variables.
% This optimizer is developed for image registration
% methods with large amounts of unknown variables.
%
% Description
% Optimization methods supported:
% - Quasi Newton Broyden-Fletcher-Goldfarb-Shanno (BFGS)
% - Limited memory BFGS (L-BFGS)
% - Steepest Gradient Descent optimization.
%
% [X,FVAL,EXITFLAG,OUTPUT,GRAD] = FMINLBFGS(FUN,X0,OPTIONS)
%
% Inputs,
% FUN Function handle or string which is minimized,
% returning an error value and optional the error
% gradient.
% X0 Initial values of unknowns can be a scalar, vector
% or matrix
% OPTIONS Structure with optimizer options, made by a struct or
% optimset. (optimset does not support all input options)
% Outputs,
% X The found location (values) which minimize the function.
% FVAL The minimum found
% EXITFLAG Gives value, which explains why the minimizer stopped
% OUTPUT Structure with all important output values and parameters
% GRAD The gradient at this location
%
% Extended description of inputs/outputs
% OPTIONS,
% GoalsExactAchieve - If set to 0, a line search method is
% used which uses a few function calls to
% do a good line search. When set to 1 a
% normal line search method with Wolfe
% conditions is used (default).
% GradConstr - Set this variable to 'on' if gradient
% calls are cpu-expensive. If 'off' more
% gradient calls are used and less
% function calls (default).
% HessUpdate - If set to 'bfgs', Broyden-Fletcher-Goldfarb-Shanno
% optimization is used (default), when the
% number of unknowns is larger then 3000
% the function will switch to Limited
% memory BFGS, or if you set it to
% 'lbfgs'. When set to 'steepdesc',
% steepest decent optimization is used.
% StoreN - Number of iterations used to approximate
% the Hessian, in L-BFGS, 20 is default. A
% lower value may work better with non
% smooth functions, because than the
% Hessian is only valid for a specific
% position. A higher value is recommend
% with quadratic equations.
% GradObj - Set to 'on' if gradient available otherwise
% finite difference is used.
% Display - Level of display. 'off' displays no output;
% 'plot' displays all linesearch results
% in figures. 'iter' displays output at
% each iteration; 'final' displays just
% the final output; 'notify' displays
% output only if the function does not
% converge;
% TolX - Termination tolerance on x, default 1e-6.
% TolFun - Termination tolerance on the function value,
% default 1e-6.
% MaxIter - Maximum number of iterations allowed, default 400.
% MaxFunEvals - Maximum number of function evaluations allowed,
% default 100 times the amount of unknowns.
% DiffMaxChange - Maximum stepsize used for finite difference
% gradients.
% DiffMinChange - Minimum stepsize used for finite difference
% gradients.
% OutputFcn - User-defined function that an optimization
% function calls at each iteration.
% rho - Wolfe condition on gradient (c1 on Wikipedia),
% default 0.01.
% sigma - Wolfe condition on gradient (c2 on Wikipedia),
% default 0.9.
% tau1 - Bracket expansion if stepsize becomes larger,
% default 3.
% tau2 - Left bracket reduction used in section phase,
% default 0.1.
% tau3 - Right bracket reduction used in section phase,
% default 0.5.
% FUN,
% The speed of this optimizer can be improved by also
% providing the gradient at X. Write the FUN function as
% follows function [f,g]=FUN(X)
% f , value calculation at X;
% if ( nargout > 1 )
% g , gradient calculation at X;
% end
% EXITFLAG,
% Possible values of EXITFLAG, and the corresponding exit
% conditions are
% 1, 'Change in the objective function value was less than TolFun.';
% 2, 'Change in x was smaller than the specified tolerance TolX.';
% 3, 'Magnitude of gradient smaller than the specified tolerance';
% 4, 'Boundary fminimum reached.';
% 0, 'Number of iterations exceeded options.MaxIter or
% number of function evaluations exceeded
% options.FunEvals.';
% -1, 'Algorithm was terminated by the output function.';
% -2, 'Line search cannot find an acceptable point along the
% current search direction';
%
% Examples
% options = optimset('GradObj','on');
% X = fminlbfgs(@myfun,2,options)
%
% % where myfun is a MATLAB function such as:
% function [f,g] = myfun(x)
% f = sin(x) + 3;
% if ( nargout > 1 ), g = cos(x); end
%
% See also OPTIMSET, FMINSEARCH, FMINBND, FMINCON, FMINUNC, @, INLINE.
%
% Function is written by D.Kroon University of Twente (Updated Nov. 2010)
% 2010-10-29 Aki Vehtari : GradConstr is 'off' by default.
% 2011-9-28 Ville Tolvanen : Reduce step size until function returns finite
% value
% 2012-10-10 Aki Vehtari : Improved robustness if function returns NaN or Inf
% Spell check and some beautification
%Copyright (c) 2009, Dirk-Jan Kroon
%All rights reserved.
%
%Redistribution and use in source and binary forms, with or without
%modification, are permitted provided that the following conditions are
%met:
%
% * Redistributions of source code must retain the above copyright
% notice, this list of conditions and the following disclaimer.
% * Redistributions in binary form must reproduce the above copyright
% notice, this list of conditions and the following disclaimer in
% the documentation and/or other materials provided with the distribution
%
%THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
%AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
%IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
%ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
%LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
%CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
%SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
%INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
%CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
%ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
%POSSIBILITY OF SUCH DAMAGE.
% Read Optimization Parameters
defaultopt = struct('Display','final',...
'HessUpdate','bfgs',...
'GoalsExactAchieve',1,...
'GradConstr','off', ...
'TolX',1e-6,...
'TolFun',1e-6,...
'GradObj','off',...
'MaxIter',400,...
'MaxFunEvals',100*numel(x_init)-1,...
'DiffMaxChange',1e-1,...
'DiffMinChange',1e-8,...
'OutputFcn',[], ...
'rho',0.0100,...
'sigma',0.900,...
'tau1',3,...
'tau2', 0.1,...
'tau3', 0.5,...
'StoreN',20);
if (~exist('optim','var'))
optim=defaultopt;
else
f = fieldnames(defaultopt);
for i=1:length(f),
if (~isfield(optim,f{i})||(isempty(optim.(f{i})))), optim.(f{i})=defaultopt.(f{i}); end
end
end
% Initialize the data structure
data.fval=0;
data.gradient=0;
data.fOld=[];
data.xsizes=size(x_init);
data.numberOfVariables = numel(x_init);
data.xInitial = x_init(:);
data.alpha=1;
data.xOld=data.xInitial;
data.iteration=0;
data.funcCount=0;
data.gradCount=0;
data.exitflag=[];
data.nStored=0;
data.timeTotal=tic;
data.timeExtern=0;
% Switch to L-BFGS in case of more than 3000 unknown variables
if(optim.HessUpdate(1)=='b')
if(data.numberOfVariables<3000),
optim.HessUpdate='bfgs';
else
optim.HessUpdate='lbfgs';
end
end
if(optim.HessUpdate(1)=='l')
succes=false;
while(~succes)
try
data.deltaX=zeros(data.numberOfVariables,optim.StoreN);
data.deltaG=zeros(data.numberOfVariables,optim.StoreN);
data.saveD=zeros(data.numberOfVariables,optim.StoreN);
succes=true;
catch ME
warning('fminlbfgs:memory','Decreasing StoreN value because out of memory');
succes=false;
data.deltaX=[]; data.deltaG=[]; data.saveD=[];
optim.StoreN=optim.StoreN-1;
if(optim.StoreN<1)
rethrow(ME);
end
end
end
end
exitflag=[];
% Display column headers
if(strcmp(optim.Display,'iter'))
disp(' Iteration Func-count Grad-count f(x) Step-size');
end
% Calculate the initial error and gradient
data.initialStepLength=1;
[data,fval,grad]=gradient_function(data.xInitial,funfcn, data, optim);
data.gradient=grad;
data.dir = -data.gradient;
data.fInitial = fval;
data.fPrimeInitial= data.gradient'*data.dir(:);
data.fOld=data.fInitial;
data.xOld=data.xInitial;
data.gOld=data.gradient;
gNorm = norm(data.gradient,Inf); % Norm of gradient
data.initialStepLength = min(1/gNorm,5);
% Show the current iteration
if(strcmp(optim.Display,'iter'))
s=sprintf(' %5.0f %5.0f %5.0f %13.6g ',data.iteration,data.funcCount,data.gradCount,data.fInitial); disp(s);
end
% Hessian intialization
if(optim.HessUpdate(1)=='b')
data.Hessian=eye(data.numberOfVariables);
end
% Call output function
if(call_output_function(data,optim,'init')), exitflag=-1; end
% Start Minimizing
while(true)
% Update number of iterations
data.iteration=data.iteration+1;
% Set current lineSearch parameters
data.TolFunLnS = eps(max(1,abs(data.fInitial )));
data.fminimum = data.fInitial - 1e16*(1+abs(data.fInitial));
% Make arrays to store linesearch results
data.storefx=[]; data.storepx=[]; data.storex=[]; data.storegx=[];
% If option display plot, than start new figure
if(optim.Display(1)=='p'), figure, hold on; end
% Find a good step size in the direction of the gradient: Linesearch
if(optim.GoalsExactAchieve==1)
data=linesearch(funfcn, data,optim);
else
data=linesearch_simple(funfcn, data, optim);
end
% Make linesearch plot
if(optim.Display(1)=='p');
plot(data.storex,data.storefx,'r*');
plot(data.storex,data.storefx,'b');
alpha_test= linspace(min(data.storex(:))/3, max(data.storex(:))*1.3, 10);
falpha_test=zeros(1,length(alpha_test));
for i=1:length(alpha_test)
[data,falpha_test(i)]=gradient_function(data.xInitial(:)+alpha_test(i)*data.dir(:),funfcn, data, optim);
end
plot(alpha_test,falpha_test,'g');
plot(data.alpha,data.f_alpha,'go','MarkerSize',8);
end
% Check if exitflag is set
if(~isempty(data.exitflag)),
exitflag=data.exitflag;
data.xInitial=data.xOld;
data.fInitial=data.fOld;
data.gradient=data.gOld;
break,
end;
% Update x with the alpha step
data.xInitial = data.xInitial + data.alpha*data.dir;
% Set the current error and gradient
data.fInitial = data.f_alpha;
data.gradient = data.grad;
% Set initial step-length to 1
data.initialStepLength = 1;
gNorm = norm(data.gradient,Inf); % Norm of gradient
% Set exit flags
if(gNorm <optim.TolFun), exitflag=1; end
if(max(abs(data.xOld-data.xInitial)) <optim.TolX), exitflag=2; end
if(data.iteration>=optim.MaxIter), exitflag=0; end
% Check if exitflag is set
if(~isempty(exitflag)), break, end;
% Update the inverse Hessian matrix
if(optim.HessUpdate(1)~='s')
% Do the Quasi-Newton Hessian update.
data = updateQuasiNewtonMatrix_LBFGS(data,optim);
else
data.dir = -data.gradient;
end
% Derivative of direction
data.fPrimeInitial= data.gradient'*data.dir(:);
% Call output function
if(call_output_function(data,optim,'iter')), exitflag=-1; end
% Show the current iteration
if(strcmp(optim.Display(1),'i')||strcmp(optim.Display(1),'p'))
s=sprintf(' %5.0f %5.0f %5.0f %13.6g %13.6g',data.iteration,data.funcCount,data.gradCount,data.fInitial,data.alpha); disp(s);
end
% Keep the variables for next iteration
data.fOld=data.fInitial;
data.xOld=data.xInitial;
data.gOld=data.gradient;
end
% Set output parameters
fval=data.fInitial;
grad=data.gradient;
x = data.xInitial;
% Reshape x to original shape
x=reshape(x,data.xsizes);
% Call output function
if(call_output_function(data,optim,'done')), exitflag=-1; end
% Make exist output structure
if(optim.HessUpdate(1)=='b'), output.algorithm='Broyden-Fletcher-Goldfarb-Shanno (BFGS)';
elseif(optim.HessUpdate(1)=='l'), output.algorithm='limited memory BFGS (L-BFGS)';
else output.algorithm='Steepest Gradient Descent';
end
output.message=getexitmessage(exitflag);
output.iteration = data.iteration;
output.funccount = data.funcCount;
output.fval = data.fInitial;
output.stepsize = data.alpha;
output.directionalderivative = data.fPrimeInitial;
output.gradient = reshape(data.gradient, data.xsizes);
output.searchdirection = data.dir;
output.timeTotal=toc(data.timeTotal);
output.timeExtern=data.timeExtern;
output.timeIntern=output.timeTotal-output.timeExtern;
% Display final results
if(~strcmp(optim.Display,'off'))
disp(' Optimizer Results')
disp([' Algorithm Used: ' output.algorithm]);
disp([' Exit message : ' output.message]);
disp([' Iterations : ' int2str(data.iteration)]);
disp([' Function Count : ' int2str(data.funcCount)]);
disp([' Minimum found : ' num2str(fval)]);
disp([' Intern Time : ' num2str(output.timeIntern) ' seconds']);
disp([' Total Time : ' num2str(output.timeTotal) ' seconds']);
end
function message=getexitmessage(exitflag)
switch(exitflag)
case 1, message='Change in the objective function value was less than TolFun.';
case 2, message='Change in x was smaller than the specified tolerance TolX.';
case 3, message='Magnitude of gradient smaller than the specified tolerance';
case 4, message='Boundary fminimum reached.';
case 0, message='Number of iterations exceeded options.MaxIter or number of function evaluations exceeded options.FunEvals.';
case -1, message='Algorithm was terminated by the output function.';
case -2, message='Line search cannot find an acceptable point along the current search direction';
otherwise, message='Undefined exit code';
end
function stopt=call_output_function(data,optim,where)
stopt=false;
if(~isempty(optim.OutputFcn))
output.iteration = data.iteration;
output.funccount = data.funcCount;
output.fval = data.fInitial;
output.stepsize = data.alpha;
output.directionalderivative = data.fPrimeInitial;
output.gradient = reshape(data.gradient, data.xsizes);
output.searchdirection = data.dir;
stopt=feval(optim.OutputFcn,reshape(data.xInitial,data.xsizes),output,where);
end
function data=linesearch_simple(funfcn, data, optim)
% Find a bracket of acceptable points
data = bracketingPhase_simple(funfcn, data, optim);
if (data.bracket_exitflag == 2)
% BracketingPhase found a bracket containing acceptable points;
% now find acceptable point within bracket
data = sectioningPhase_simple(funfcn, data, optim);
data.exitflag = data.section_exitflag;
else
% Already acceptable point found or MaxFunEvals reached
data.exitflag = data.bracket_exitflag;
end
function data = bracketingPhase_simple(funfcn, data,optim)
% Number of iterations
itw=0;
% Point with smaller value, initial
data.beta=0;
data.f_beta=data.fInitial;
data.fPrime_beta=data.fPrimeInitial;
% Initial step is equal to alpha of previous step.
alpha = data.initialStepLength;
% Going uphill?
hill=false;
% Search for brackets
while(true)
% Calculate the error registration gradient
if isequal(optim.GradConstr,'on')
[data,f_alpha]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data, optim);
while isnan(f_alpha) || isinf(f_alpha)
% Go to smaller stepsize
alpha=alpha*optim.tau3;
% Set hill variable
hill=true;
[data,f_alpha]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data,optim);
end
fPrime_alpha=nan;
grad=nan;
else
[data,f_alpha, grad]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data,optim);
while isnan(f_alpha) || isinf(f_alpha) || any(isnan(grad)) || any(isinf(grad))
% Go to smaller stepsize
alpha=alpha*optim.tau3;
% Set hill variable
hill=true;
[data,f_alpha, grad]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data,optim);
end
fPrime_alpha = grad'*data.dir(:);
end
% Store values linesearch
data.storefx=[data.storefx f_alpha];
data.storepx=[data.storepx fPrime_alpha];
data.storex=[data.storex alpha];
data.storegx=[data.storegx grad(:)];
% Update step value
if(data.f_beta<f_alpha),
% Go to smaller stepsize
alpha=alpha*optim.tau3;
% Set hill variable
hill=true;
else
% Save current minimum point
data.beta=alpha; data.f_beta=f_alpha; data.fPrime_beta=fPrime_alpha; data.grad=grad;
if(~hill)
alpha=alpha*optim.tau1;
end
end
% Update number of loop iterations
itw=itw+1;
if(itw>(log(optim.TolFun)/log(optim.tau3))),
% No new optimum found, linesearch failed.
data.bracket_exitflag=-2; break;
end
if(data.beta>0&&hill)
% Get the brackets around minimum point
% Pick bracket A from stored trials
[t,i]=sort(data.storex,'ascend');
storefx=data.storefx(i);storepx=data.storepx(i); storex=data.storex(i);
[t,i]=find(storex>data.beta,1);
if(isempty(i)), [t,i]=find(storex==data.beta,1); end
alpha=storex(i); f_alpha=storefx(i); fPrime_alpha=storepx(i);
% Pick bracket B from stored trials
[t,i]=sort(data.storex,'descend');
storefx=data.storefx(i);storepx=data.storepx(i); storex=data.storex(i);
[t,i]=find(storex<data.beta,1);
if(isempty(i)), [t,i]=find(storex==data.beta,1); end
beta=storex(i); f_beta=storefx(i); fPrime_beta=storepx(i);
% Calculate derivatives if not already calculated
if isequal(optim.GradConstr,'on')
gstep=data.initialStepLength/1e6;
if(gstep>optim.DiffMaxChange), gstep=optim.DiffMaxChange; end
if(gstep<optim.DiffMinChange), gstep=optim.DiffMinChange; end
[data,f_alpha2]=gradient_function(data.xInitial(:)+(alpha+gstep)*data.dir(:),funfcn, data, optim);
[data,f_beta2]=gradient_function(data.xInitial(:)+(beta+gstep)*data.dir(:),funfcn, data, optim);
fPrime_alpha=(f_alpha2-f_alpha)/gstep;
fPrime_beta=(f_beta2-f_beta)/gstep;
end
% Set the brackets A and B
data.a=alpha; data.f_a=f_alpha; data.fPrime_a=fPrime_alpha;
data.b=beta; data.f_b=f_beta; data.fPrime_b=fPrime_beta;
% Finished bracketing phase
data.bracket_exitflag = 2; return
end
% Reached max function evaluations
if(data.funcCount>=optim.MaxFunEvals), data.bracket_exitflag=0; return; end
end
function data = sectioningPhase_simple(funfcn, data, optim)
% Get the brackets
brcktEndpntA=data.a; brcktEndpntB=data.b;
% Calculate minimum between brackets
[alpha,f_alpha_estimated] = pickAlphaWithinInterval(brcktEndpntA,brcktEndpntB,data.a,data.b,data.f_a,data.fPrime_a,data.f_b,data.fPrime_b,optim);
if(isfield(data,'beta')&&(data.f_beta<f_alpha_estimated)), alpha=data.beta; end
[t,i]=find(data.storex==alpha,1);
if((~isempty(i))&&(~isnan(data.storegx(i))))
f_alpha=data.storefx(i); grad=data.storegx(:,i);
else
% Calculate the error and gradient for the next minimizer iteration
[data,f_alpha, grad]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data,optim);
if(isfield(data,'beta')&&(data.f_beta<f_alpha)),
alpha=data.beta;
if((~isempty(i))&&(~isnan(data.storegx(i))))
f_alpha=data.storefx(i); grad=data.storegx(:,i);
else
[data,f_alpha, grad]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data,optim);
end
end
end
% Store values linesearch
data.storefx=[data.storefx f_alpha]; data.storex=[data.storex alpha];
fPrime_alpha = grad'*data.dir(:);
data.alpha=alpha;
data.fPrime_alpha= fPrime_alpha;
data.f_alpha= f_alpha;
data.grad=grad;
% Set the exit flag to success
data.section_exitflag=[];
function data=linesearch(funfcn, data, optim)
% Find a bracket of acceptable points
data = bracketingPhase(funfcn, data,optim);
%if abs(data.a-data.b)<eps
% data = bracketingPhase_simple(funfcn, data,optim);
%end
if (data.bracket_exitflag == 2)
% BracketingPhase found a bracket containing acceptable points;
% now find acceptable point within bracket
data = sectioningPhase(funfcn, data, optim);
data.exitflag = data.section_exitflag;
else
% Already acceptable point found or MaxFunEvals reached
data.exitflag = data.bracket_exitflag;
end
function data = sectioningPhase(funfcn, data, optim)
%
% sectioningPhase finds an acceptable point alpha within a given bracket [a,b]
% containing acceptable points. Notice that funcCount counts the total number of
% function evaluations including those of the bracketing phase.
while(true)
% Pick alpha in reduced bracket
brcktEndpntA = data.a + min(optim.tau2,optim.sigma)*(data.b - data.a);
brcktEndpntB = data.b - optim.tau3*(data.b - data.a);
% Find global minimizer in bracket [brcktEndpntA,brcktEndpntB] of 3rd-degree
% polynomial that interpolates f() and f'() at "a" and at "b".
alpha = pickAlphaWithinInterval(brcktEndpntA,brcktEndpntB,data.a,data.b,data.f_a,data.fPrime_a,data.f_b,data.fPrime_b,optim);
% No acceptable point could be found
if (abs( (alpha - data.a)*data.fPrime_a ) <= data.TolFunLnS), data.section_exitflag = -2; return; end
% Calculate value (and gradient if no extra time cost) of current alpha
if ~isequal(optim.GradConstr,'on')
[data,f_alpha, grad]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data, optim);
fPrime_alpha = grad'*data.dir(:);
else
gstep=data.initialStepLength/1e6;
if(gstep>optim.DiffMaxChange), gstep=optim.DiffMaxChange; end
if(gstep<optim.DiffMinChange), gstep=optim.DiffMinChange; end
[data,f_alpha]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data,optim);
[data,f_alpha2]=gradient_function(data.xInitial(:)+(alpha+gstep)*data.dir(:),funfcn, data, optim);
fPrime_alpha=(f_alpha2-f_alpha)/gstep;
end
% Store values linesearch
data.storefx=[data.storefx f_alpha]; data.storex=[data.storex alpha];
% Store current bracket position of A
aPrev = data.a;
f_aPrev = data.f_a;
fPrime_aPrev = data.fPrime_a;
% Update the current brackets
if ((f_alpha > data.fInitial + alpha*optim.rho*data.fPrimeInitial) || (f_alpha >= data.f_a))
% Update bracket B to current alpha
data.b = alpha; data.f_b = f_alpha; data.fPrime_b = fPrime_alpha;
else
% Wolfe conditions, if true then acceptable point found
if (abs(fPrime_alpha) <= -optim.sigma*data.fPrimeInitial),
if isequal(optim.GradConstr,'on')
% Gradient was not yet calculated because of time costs
[data,f_alpha, grad]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data, optim);
fPrime_alpha = grad'*data.dir(:);
end
% Store the found alpha values
data.alpha=alpha; data.fPrime_alpha= fPrime_alpha; data.f_alpha= f_alpha;
data.grad=grad;
data.section_exitflag = []; return,
end
% Update bracket A
data.a = alpha; data.f_a = f_alpha; data.fPrime_a = fPrime_alpha;
if (data.b - data.a)*fPrime_alpha >= 0
% B becomes old bracket A;
data.b = aPrev; data.f_b = f_aPrev; data.fPrime_b = fPrime_aPrev;
end
end
% No acceptable point could be found
if (abs(data.b-data.a) < eps)
if f_alpha<data.fInitial;
% however, point with a smaller function value found
data.alpha=alpha; data.fPrime_alpha= fPrime_alpha; data.f_alpha= f_alpha;
data.grad=grad;
data.section_exitflag = []; return
else
% No acceptable point could be found
data.section_exitflag = -2; return
end
end
% maxFunEvals reached
if(data.funcCount >optim.MaxFunEvals), data.section_exitflag = -1; return, end
end
function data = bracketingPhase(funfcn, data, optim)
% bracketingPhase finds a bracket [a,b] that contains acceptable
% points; a bracket is the same as a closed interval, except that a
% > b is allowed.
%
% The outputs f_a and fPrime_a are the values of the function and
% the derivative evaluated at the bracket endpoint 'a'. Similar
% notation applies to the endpoint 'b'.
% Parameters of bracket A
data.a = [];
data.f_a = [];
data.fPrime_a = [];
% Parameters of bracket B
data.b = [];
data.f_b = [];
data.fPrime_b = [];
% First trial alpha is user-supplied
% f_alpha will contain f(alpha) for all trial points alpha
% fPrime_alpha will contain f'(alpha) for all trial points alpha
alpha = data.initialStepLength;
f_alpha = data.fInitial;
fPrime_alpha = data.fPrimeInitial;
% Set maximum value of alpha (determined by fminimum)
alphaMax = (data.fminimum - data.fInitial)/(optim.rho*data.fPrimeInitial);
alphaPrev = 0;
here_be_dragons=false;
while(true)
% Evaluate f(alpha) and f'(alpha)
fPrev = f_alpha;
fPrimePrev = fPrime_alpha;
% Calculate value (and gradient if no extra time cost) of current alpha
if ~isequal(optim.GradConstr,'on')
[data,f_alpha, grad]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data, optim);
while isnan(f_alpha) || isinf(f_alpha) || any(isnan(grad)) || any(isinf(grad))
% NaN or Inf encountered, switch to safe mode
here_be_dragons=true;
alphaMax=alpha;
alpha = alphaPrev+0.25*(alpha-alphaPrev);
[data,f_alpha, grad]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data, optim);
end
fPrime_alpha = grad'*data.dir(:);
else
gstep=data.initialStepLength/1e6;
if(gstep>optim.DiffMaxChange), gstep=optim.DiffMaxChange; end
if(gstep<optim.DiffMinChange), gstep=optim.DiffMinChange; end
[data,f_alpha]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data, optim);
while isnan(f_alpha) || isinf(f_alpha)
% NaN or Inf encountered, switch to safe mode
here_be_dragons=true;
alphaMax=alpha;
alpha = alphaPrev+0.25*(alpha-alphaPrev);
[data,f_alpha]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data, optim);
end
[data,f_alpha2]=gradient_function(data.xInitial(:)+(alpha+gstep)*data.dir(:),funfcn, data, optim);
fPrime_alpha=(f_alpha2-f_alpha)/gstep;
end
% Store values linesearch
data.storefx=[data.storefx f_alpha]; data.storex=[data.storex alpha];
% Terminate if f < fminimum
if (f_alpha <= data.fminimum), data.bracket_exitflag = 4; return; end
% Bracket located - case 0 (near NaN or Inf switch to safe solution)
if here_be_dragons && (f_alpha >= fPrev)
% a smaller function value was found on previous step
data.a = 0; data.f_a = data.fInitial; data.fPrime_a = data.fPrimeInitial;
data.b = alpha; data.f_b = f_alpha; data.fPrime_b = fPrime_alpha;
% Finished bracketing phase
data.bracket_exitflag = 2; return
end
% Bracket located - case 1 (Wolfe conditions)
if (f_alpha > (data.fInitial + alpha*optim.rho*data.fPrimeInitial)) || (f_alpha >= fPrev)
% Set the bracket values
data.a = alphaPrev; data.f_a = fPrev; data.fPrime_a = fPrimePrev;
data.b = alpha; data.f_b = f_alpha; data.fPrime_b = fPrime_alpha;
% Finished bracketing phase
data.bracket_exitflag = 2; return
end
% Acceptable step-length found
if (abs(fPrime_alpha) <= -optim.sigma*data.fPrimeInitial),
if isequal(optim.GradConstr,'on')
% Gradient was not yet calculated because of time costs
[data,f_alpha, grad]=gradient_function(data.xInitial(:)+alpha*data.dir(:),funfcn, data, optim);
fPrime_alpha = grad'*data.dir(:);
end
% Store the found alpha values
data.alpha=alpha;
data.fPrime_alpha= fPrime_alpha; data.f_alpha= f_alpha; data.grad=grad;
% Finished bracketing phase, and no need to call sectioning phase
data.bracket_exitflag = []; return
end
% Bracket located - case 2
if (fPrime_alpha >= 0)
% Set the bracket values
data.a = alpha; data.f_a = f_alpha; data.fPrime_a = fPrime_alpha;
data.b = alphaPrev; data.f_b = fPrev; data.fPrime_b = fPrimePrev;
% Finished bracketing phase
data.bracket_exitflag = 2; return
end
% Update alpha
if (2*alpha - alphaPrev < alphaMax )
brcktEndpntA = 2*alpha-alphaPrev;
brcktEndpntB = min(alphaMax,alpha+optim.tau1*(alpha-alphaPrev));
% Find global minimizer in bracket [brcktEndpntA,brcktEndpntB] of 3rd-degree polynomial
% that interpolates f() and f'() at alphaPrev and at alpha
alphaNew = pickAlphaWithinInterval(brcktEndpntA,brcktEndpntB,alphaPrev,alpha,fPrev, ...
fPrimePrev,f_alpha,fPrime_alpha,optim);
alphaPrev = alpha;
alpha = alphaNew;
else
alpha = alphaMax;
end
% maxFunEvals reached
if(data.funcCount >optim.MaxFunEvals), data.bracket_exitflag = -1; return, end
end
function [alpha,f_alpha]= pickAlphaWithinInterval(brcktEndpntA,brcktEndpntB,alpha1,alpha2,f1,fPrime1,f2,fPrime2,optim)
% finds a global minimizer alpha within the bracket [brcktEndpntA,brcktEndpntB] of the cubic polynomial
% that interpolates f() and f'() at alpha1 and alpha2. Here f(alpha1) = f1, f'(alpha1) = fPrime1,
% f(alpha2) = f2, f'(alpha2) = fPrime2.
% determines the coefficients of the cubic polynomial with c(alpha1) = f1,
% c'(alpha1) = fPrime1, c(alpha2) = f2, c'(alpha2) = fPrime2.
coeff = [(fPrime1+fPrime2)*(alpha2-alpha1)-2*(f2-f1) ...
3*(f2-f1)-(2*fPrime1+fPrime2)*(alpha2-alpha1) (alpha2-alpha1)*fPrime1 f1];
% Convert bounds to the z-space
lowerBound = (brcktEndpntA - alpha1)/(alpha2 - alpha1);
upperBound = (brcktEndpntB - alpha1)/(alpha2 - alpha1);
% Swap if lower bound is higher than the upper bound
if (lowerBound > upperBound), t=upperBound; upperBound=lowerBound; lowerBound=t; end
% Find minima and maxima from the roots of the derivative of the polynomial.
sPoints = roots([3*coeff(1) 2*coeff(2) coeff(3)]);
% Remove imaginary and points outside range
sPoints(imag(sPoints)~=0)=[];
sPoints(sPoints<lowerBound)=[]; sPoints(sPoints>upperBound)=[];
% Make vector with all possible solutions
sPoints=[lowerBound sPoints(:)' upperBound];
% Select the global minimum point
[f_alpha,index]=min(polyval(coeff,sPoints)); z=sPoints(index);
% Add the offset and scale back from [0..1] to the alpha domain
alpha = alpha1 + z*(alpha2 - alpha1);
% Show polynomial search
if(optim.Display(1)=='p');
vPoints=polyval(coeff,sPoints);
plot(sPoints*(alpha2 - alpha1)+alpha1,vPoints,'co');
plot([sPoints(1) sPoints(end)]*(alpha2 - alpha1)+alpha1,[vPoints(1) vPoints(end)],'c*');
xPoints=linspace(lowerBound/3, upperBound*1.3, 50);
vPoints=polyval(coeff,xPoints);
plot(xPoints*(alpha2 - alpha1)+alpha1,vPoints,'c');
end
function [data,fval,grad]=gradient_function(x,funfcn, data, optim)
% Call the error function for error (and gradient)
if ( nargout <3 )
timem=tic;
fval=funfcn(reshape(x,data.xsizes));
data.timeExtern=data.timeExtern+toc(timem);
data.funcCount=data.funcCount+1;
else
if(strcmp(optim.GradObj,'on'))
timem=tic;
[fval, grad]=feval(funfcn,reshape(x,data.xsizes));
data.timeExtern=data.timeExtern+toc(timem);
data.funcCount=data.funcCount+1;
data.gradCount=data.gradCount+1;
else
% Calculate gradient with forward difference if not provided by the function
grad=zeros(length(x),1);
fval=funfcn(reshape(x,data.xsizes));
gstep=data.initialStepLength/1e6;
if(gstep>optim.DiffMaxChange), gstep=optim.DiffMaxChange; end
if(gstep<optim.DiffMinChange), gstep=optim.DiffMinChange; end
for i=1:length(x),
x_temp=x; x_temp(i)=x_temp(i)+gstep;
timem=tic;
[fval_g]=feval(funfcn,reshape(x_temp,data.xsizes)); data.funcCount=data.funcCount+1;
data.timeExtern=data.timeExtern+toc(timem);
grad(i)=(fval_g-fval)/gstep;
end
end
grad=grad(:);
end
function data = updateQuasiNewtonMatrix_LBFGS(data,optim)
% updates the quasi-Newton matrix that approximates the inverse to the Hessian.
% Two methods are support BFGS and L-BFGS, in L-BFGS the hessian is not
% constructed or stored.
% Calculate position, and gradient difference between the
% iterations
deltaX=data.alpha* data.dir;
deltaG=data.gradient-data.gOld;
if ((deltaX'*deltaG) >= sqrt(eps)*max( eps,norm(deltaX)*norm(deltaG) ))
if(optim.HessUpdate(1)=='b')
% Default BFGS as described by Nocedal
p_k = 1 / (deltaG'*deltaX);
Vk = eye(data.numberOfVariables) - p_k*deltaG*deltaX';
% Set Hessian
data.Hessian = Vk'*data.Hessian *Vk + p_k * deltaX*deltaX';
% Set new Direction
data.dir = -data.Hessian*data.gradient;
else
% L-BFGS with scaling as described by Nocedal
% Update a list with the history of deltaX and deltaG
data.deltaX(:,2:optim.StoreN)=data.deltaX(:,1:optim.StoreN-1); data.deltaX(:,1)=deltaX;
data.deltaG(:,2:optim.StoreN)=data.deltaG(:,1:optim.StoreN-1); data.deltaG(:,1)=deltaG;
data.nStored=data.nStored+1; if(data.nStored>optim.StoreN), data.nStored=optim.StoreN; end
% Initialize variables
a=zeros(1,data.nStored);
p=zeros(1,data.nStored);
q = data.gradient;
for i=1:data.nStored
p(i)= 1 / (data.deltaG(:,i)'*data.deltaX(:,i));
a(i) = p(i)* data.deltaX(:,i)' * q;
q = q - a(i) * data.deltaG(:,i);
end
% Scaling of initial Hessian (identity matrix)
p_k = data.deltaG(:,1)'*data.deltaX(:,1) / sum(data.deltaG(:,1).^2);
% Make r = - Hessian * gradient
r = p_k * q;
for i=data.nStored:-1:1,
b = p(i) * data.deltaG(:,i)' * r;
r = r + data.deltaX(:,i)*(a(i)-b);
end
% Set new direction
data.dir = -r;
end
end
|
github
|
lcnhappe/happe-master
|
prior_gaussian.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_gaussian.m
| 3,537 |
UNKNOWN
|
7a422794084fa6f6e913f9712c2db4c9
|
function p = prior_gaussian(varargin)
%PRIOR_GAUSSIAN Gaussian prior structure
%
% Description
% P = PRIOR_GAUSSIAN('PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% creates Gaussian prior structure in which the named
% parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% P = PRIOR_GAUSSIAN(P, 'PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% modify a prior structure with the named parameters altered
% with the specified values.
%
% Parameters for Gaussian prior [default]
% mu - location [0]
% s2 - scale squared (variance) [1]
% mu_prior - prior for mu [prior_fixed]
% s2_prior - prior for s2 [prior_fixed]
%
% See also
% PRIOR_*
% Copyright (c) 2000-2001,2010 Aki Vehtari
% Copyright (c) 2010 Jaakko Riihim�ki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_GAUSSIAN';
ip.addOptional('p', [], @isstruct);
ip.addParamValue('mu',0, @(x) isscalar(x) && x>0);
ip.addParamValue('mu_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('s2',1, @(x) isscalar(x) && x>0);
ip.addParamValue('s2_prior',[], @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'Gaussian';
else
if ~isfield(p,'type') && ~isequal(p.type,'Gaussian')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('mu',ip.UsingDefaults)
p.mu = ip.Results.mu;
end
if init || ~ismember('s2',ip.UsingDefaults)
p.s2 = ip.Results.s2;
end
% Initialize prior structure
if init
p.p=[];
end
if init || ~ismember('mu_prior',ip.UsingDefaults)
p.p.mu=ip.Results.mu_prior;
end
if init || ~ismember('s2_prior',ip.UsingDefaults)
p.p.s2=ip.Results.s2_prior;
end
if init
% set functions
p.fh.pak = @prior_gaussian_pak;
p.fh.unpak = @prior_gaussian_unpak;
p.fh.lp = @prior_gaussian_lp;
p.fh.lpg = @prior_gaussian_lpg;
p.fh.recappend = @prior_gaussian_recappend;
end
end
function [w, s] = prior_gaussian_pak(p)
w=[];
s={};
if ~isempty(p.p.mu)
w = p.mu;
s=[s; 'Gaussian.mu'];
end
if ~isempty(p.p.s2)
w = [w log(p.s2)];
s=[s; 'log(Gaussian.s2)'];
end
end
function [p, w] = prior_gaussian_unpak(p, w)
if ~isempty(p.p.mu)
i1=1;
p.mu = w(i1);
w = w(i1+1:end);
end
if ~isempty(p.p.s2)
i1=1;
p.s2 = exp(w(i1));
w = w(i1+1:end);
end
end
function lp = prior_gaussian_lp(x, p)
lp = 0.5*sum(-log(2*pi) -log(p.s2)- 1./p.s2 .* sum((x-p.mu).^2,1));
if ~isempty(p.p.mu)
lp = lp + p.p.mu.fh.lp(p.mu, p.p.mu);
end
if ~isempty(p.p.s2)
lp = lp + p.p.s2.fh.lp(p.s2, p.p.s2) + log(p.s2);
end
end
function lpg = prior_gaussian_lpg(x, p)
lpg = (1./p.s2).*(p.mu-x);
if ~isempty(p.p.mu)
lpgmu = sum((1./p.s2).*(x-p.mu)) + p.p.mu.fh.lpg(p.mu, p.p.mu);
lpg = [lpg lpgmu];
end
if ~isempty(p.p.s2)
lpgs2 = (sum(-0.5*(1./p.s2-1./p.s2.^2.*(x-p.mu).^2 )) + p.p.s2.fh.lpg(p.s2, p.p.s2)).*p.s2 + 1;
lpg = [lpg lpgs2];
end
end
function rec = prior_gaussian_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
if ~isempty(p.p.mu)
rec.mu(ri,:) = p.mu;
end
if ~isempty(p.p.s2)
rec.s2(ri,:) = p.s2;
end
end
|
github
|
lcnhappe/happe-master
|
kernelp.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/kernelp.m
| 1,374 |
utf_8
|
3784b6e72d83468627af9252dd902b7c
|
function [p,xx,sh]=kernelp(x,xx)
%KERNELP 1D Kernel density estimation of data, with automatic kernel width
%
% [P,XX]=KERNELP(X,XX) return density estimates P in points XX,
% given data and optionally ecvaluation points XX. Density
% estimate is based on simple Gaussian kernel density estimate
% where all kernels have equal width and this width is selected by
% optimising plug-in partial predictive density. Works well with
% reasonable sized X.
%
% Copyright (C) 2001-2003,2010 Aki Vehtari
%
% This software is distributed under the GNU General Public
% Licence (version 3 or later); please refer to the file
% Licence.txt, included with the software, for details.
if nargin < 1
error('Too few arguments');
end
[n,m]=size(x);
if n>1 && m>1
error('X must be a vector');
end
x=x(:);
[n,m]=size(x);
xa=min(x);xb=max(x);xab=xb-xa;
if nargin < 2
nn=200;
xa=xa-xab/20;xb=xb+xab/20;
xx=linspace(xa,xb,nn);
else
[mm,nn]=size(xx);
if nn>1 && mm>1
error('XX must be a vector');
end
end
xx=xx(:);
m=length(x)/2;
rp=randperm(n);
xd=bsxfun(@minus,x(rp(1:m)),x(rp(m+1:end))');
sh=fminbnd(@(s) err(s,xd),xab/n*4,xab,optimset('TolX',xab/n*4));
p=mean(normpdf(bsxfun(@minus,x(rp(1:m)),xx'),0,sh));
function e=err(s,xd)
e=-sum(log(sum(normpdf(xd,0,s))));
function y = normpdf(x,mu,sigma)
y = -0.5 * ((x-mu)./sigma).^2 -log(sigma) -log(2*pi)/2;
y=exp(y);
|
github
|
lcnhappe/happe-master
|
normtrand.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/normtrand.m
| 1,912 |
utf_8
|
8420418c944aa575ac4f7577538f089d
|
function result = normtrand(mu,sigma2,left,right)
%NORMTRAND random draws from a normal truncated to (left,right) interval
% ------------------------------------------------------
% USAGE: y = normtrand(mu,sigma2,left,right)
% where: mu = mean (nobs x 1)
% sigma2 = variance (nobs x 1)
% left = left truncation points (nobs x 1)
% right = right truncation points (nobs x 1)
% ------------------------------------------------------
% RETURNS: y = (nobs x 1) vector
% ------------------------------------------------------
% NOTES: use y = normtrand(mu,sigma2,left,mu+5*sigma2)
% to produce a left-truncated draw
% use y = normtrand(mu,sigma2,mu-5*sigma2,right)
% to produce a right-truncated draw
% ------------------------------------------------------
% SEE ALSO: normltrand (left truncated draws), normrtrand (right truncated)
%
% adopted from Bayes Toolbox by
% James P. LeSage, Dept of Economics
% University of Toledo
% 2801 W. Bancroft St,
% Toledo, OH 43606
% [email protected]
% Anyone is free to use these routines, no attribution (or blame)
% need be placed on the author/authors.
% For information on the Bayes Toolbox see:
% Ordinal Data Modeling by Valen Johnson and James Albert
% Springer-Verlag, New York, 1999.
% 2009-01-08 Aki Vehtari - Fixed Naming
if nargin ~= 4
error('normtrand: wrong # of input arguments');
end;
std = sqrt(sigma2);
% Calculate bounds on probabilities
lowerProb = Phi((left-mu)./std);
upperProb = Phi((right-mu)./std);
% Draw uniform from within (lowerProb,upperProb)
u = lowerProb+(upperProb-lowerProb).*rand(size(mu));
% Find needed quantiles
result = mu + Phiinv(u).*std;
function val=Phiinv(x)
% Computes the standard normal quantile function of the vector x, 0<x<1.
val=sqrt(2)*erfinv(2*x-1);
function y = Phi(x)
% Phi computes the standard normal distribution function value at x
y = .5*(1+erf(x/sqrt(2)));
|
github
|
lcnhappe/happe-master
|
prior_sqinvunif.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_sqinvunif.m
| 1,662 |
utf_8
|
91f8f43d06a07b161cdd806273eed21f
|
function p = prior_sqinvunif(varargin)
%PRIOR_SQINVUNIF Uniform prior structure for the square inverse of the parameter
%
% Description
% P = PRIOR_SQINVUNIF creates uniform prior structure for the
% square inverse of the parameter.
%
% See also
% PRIOR_*
% Copyright (c) 2009 Jarno Vanhatalo
% Copyright (c) 2010,2012 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_SQINVUNIFORM';
ip.addOptional('p', [], @isstruct);
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'SqInv-Uniform';
else
if ~isfield(p,'type') && ~isequal(p.type,'SqInv-Uniform')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
if init
% set functions
p.fh.pak = @prior_sqinvunif_pak;
p.fh.unpak = @prior_sqinvunif_unpak;
p.fh.lp = @prior_sqinvunif_lp;
p.fh.lpg = @prior_sqinvunif_lpg;
p.fh.recappend = @prior_sqinvunif_recappend;
end
end
function [w, s] = prior_sqinvunif_pak(p, w)
w=[];
s={};
end
function [p, w] = prior_sqinvunif_unpak(p, w)
w = w;
p = p;
end
function lp = prior_sqinvunif_lp(x, p)
lJ = -log(x)*3 + log(2); % log(-2/x^3) log(|J|) of transformation
lp = sum(lJ);
end
function lpg = prior_sqinvunif_lpg(x, p)
lJg = -3./x; % gradient of log(|J|) of transformation
lpg = lJg;
end
function rec = prior_sqinvunif_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
end
|
github
|
lcnhappe/happe-master
|
prior_sqinvsinvchi2.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_sqinvsinvchi2.m
| 4,247 |
UNKNOWN
|
199059cd0dc523cf6b8df74a02823f7a
|
function p = prior_sqinvsinvchi2(varargin)
%PRIOR_SQINVSINVCHI2 Scaled-Inv-Chi^2 prior structure
%
% Description
% P = PRIOR_SQINVSINVCHI2('PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% creates Scaled-Inv-Chi^2 prior structure for square inverse of
% the parameter in which the named parameters have the specified
% values. Any unspecified parameters are set to default values.
%
% P = PRIOR_SQINVSINVCHI2(P, 'PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% modify a prior structure with the named parameters altered
% with the specified values.
%
% Parameterisation is done by Bayesian Data Analysis,
% second edition, Gelman et.al 2004.
%
% Parameters for Scaled-Inv-Chi^2 [default]
% s2 - scale squared (variance) [1]
% nu - degrees of freedom [4]
% s2_prior - prior for s2 [prior_fixed]
% nu_prior - prior for nu [prior_fixed]
%
% See also
% PRIOR_*
% Copyright (c) 2000-2001,2010,2012 Aki Vehtari
% Copyright (c) 2010 Jaakko Riihim�ki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_SQINVSINVCHI2';
ip.addOptional('p', [], @isstruct);
ip.addParamValue('s2',1, @(x) isscalar(x) && x>0);
ip.addParamValue('s2_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('nu',4, @(x) isscalar(x) && x>0);
ip.addParamValue('nu_prior',[], @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'SqInv-S-Inv-Chi2';
else
if ~isfield(p,'type') && ~isequal(p.type,'SqInv-S-Inv-Chi2')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('s2',ip.UsingDefaults)
p.s2 = ip.Results.s2;
end
if init || ~ismember('nu',ip.UsingDefaults)
p.nu = ip.Results.nu;
end
% Initialize prior structure
if init
p.p=[];
end
if init || ~ismember('s2_prior',ip.UsingDefaults)
p.p.s2=ip.Results.s2_prior;
end
if init || ~ismember('nu_prior',ip.UsingDefaults)
p.p.nu=ip.Results.nu_prior;
end
if init
% set functions
p.fh.pak = @prior_sqinvsinvchi2_pak;
p.fh.unpak = @prior_sqinvsinvchi2_unpak;
p.fh.lp = @prior_sqinvsinvchi2_lp;
p.fh.lpg = @prior_sqinvsinvchi2_lpg;
p.fh.recappend = @prior_sqinvsinvchi2_recappend;
end
end
function [w, s] = prior_sqinvsinvchi2_pak(p)
w=[];
s={};
if ~isempty(p.p.s2)
w = log(p.s2);
s=[s; 'log(SqInv-Sinvchi2.s2)'];
end
if ~isempty(p.p.nu)
w = [w log(p.nu)];
s=[s; 'log(SqInv-Sinvchi2.nu)'];
end
end
function [p, w] = prior_sqinvsinvchi2_unpak(p, w)
if ~isempty(p.p.s2)
i1=1;
p.s2 = exp(w(i1));
w = w(i1+1:end);
end
if ~isempty(p.p.nu)
i1=1;
p.nu = exp(w(i1));
w = w(i1+1:end);
end
end
function lp = prior_sqinvsinvchi2_lp(x, p)
lJ = -log(x)*3 + log(2); % log(-2/x^3) log(|J|) of transformation
xt = x.^-2; % transformation
lp = -sum((p.nu./2+1) .* log(xt) + (p.s2.*p.nu./2./xt) + (p.nu/2) .* log(2./(p.s2.*p.nu)) + gammaln(p.nu/2)) + sum(lJ);
if ~isempty(p.p.s2)
lp = lp + p.p.s2.fh.lp(p.s2, p.p.s2) + log(p.s2);
end
if ~isempty(p.p.nu)
lp = lp + p.p.nu.fh.lp(p.nu, p.p.nu) + log(p.nu);
end
end
function lpg = prior_sqinvsinvchi2_lpg(x, p)
lJg = -3./x; % gradient of log(|J|) of transformation
xt = x.^-2; % transformation
xtg = -2/x.^3; % derivative of transformation
lpg = xtg.*(-(p.nu/2+1)./xt +p.nu.*p.s2./(2*xt.^2)) + lJg;
if ~isempty(p.p.s2)
lpgs2 = (-sum(p.nu/2.*(1./xt-1./p.s2)) + p.p.s2.fh.lpg(p.s2, p.p.s2)).*p.s2 + 1;
lpg = [lpg lpgs2];
end
if ~isempty(p.p.nu)
lpgnu = (-sum(0.5*(log(xt) + p.s2./xt + log(2./p.s2./p.nu) - 1 + digamma1(p.nu/2))) + p.p.nu.fh.lpg(p.nu, p.p.nu)).*p.nu + 1;
lpg = [lpg lpgnu];
end
end
function rec = prior_sqinvsinvchi2_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
if ~isempty(p.p.s2)
rec.s2(ri,:) = p.s2;
end
if ~isempty(p.p.nu)
rec.nu(ri,:) = p.nu;
end
end
|
github
|
lcnhappe/happe-master
|
prior_loggaussian.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_loggaussian.m
| 3,863 |
UNKNOWN
|
2d891ad38e859e59dfe99eab9a5a56a7
|
function p = prior_loggaussian(varargin)
%PRIOR_LOGGAUSSIAN Log-Gaussian prior structure
%
% Description
% P = PRIOR_LOGGAUSSIAN('PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% creates Log-Gaussian prior structure in which the named
% parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% P = PRIOR_LOGGAUSSIAN(P, 'PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% modify a prior structure with the named parameters altered
% with the specified values.
%
% Parameters for Log-Gaussian prior [default]
% mu - location [0]
% s2 - scale squared (variance) [1]
% mu_prior - prior for mu [prior_fixed]
% s2_prior - prior for s2 [prior_fixed]
%
% See also
% PRIOR_*
% Copyright (c) 2000-2001,2010 Aki Vehtari
% Copyright (c) 2010 Jaakko Riihim�ki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_LOGGAUSSIAN';
ip.addOptional('p', [], @isstruct);
ip.addParamValue('mu',0, @(x) isscalar(x));
ip.addParamValue('mu_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('s2',1, @(x) isscalar(x) && x>0);
ip.addParamValue('s2_prior',[], @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'Log-Gaussian';
else
if ~isfield(p,'type') && ~isequal(p.type,'Log-Gaussian')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('mu',ip.UsingDefaults)
p.mu = ip.Results.mu;
end
if init || ~ismember('s2',ip.UsingDefaults)
p.s2 = ip.Results.s2;
end
% Initialize prior structure
if init
p.p=[];
end
if init || ~ismember('mu_prior',ip.UsingDefaults)
p.p.mu=ip.Results.mu_prior;
end
if init || ~ismember('s2_prior',ip.UsingDefaults)
p.p.s2=ip.Results.s2_prior;
end
if init
% set functions
p.fh.pak = @prior_loggaussian_pak;
p.fh.unpak = @prior_loggaussian_unpak;
p.fh.lp = @prior_loggaussian_lp;
p.fh.lpg = @prior_loggaussian_lpg;
p.fh.recappend = @prior_loggaussian_recappend;
end
end
function [w, s] = prior_loggaussian_pak(p)
w=[];
s={};
if ~isempty(p.p.mu)
w = p.mu;
s=[s; 'Log-Gaussian.mu'];
end
if ~isempty(p.p.s2)
w = [w log(p.s2)];
s=[s; 'log(Log-Gaussian.s2)'];
end
end
function [p, w] = prior_loggaussian_unpak(p, w)
if ~isempty(p.p.mu)
i1=1;
p.mu = w(i1);
w = w(i1+1:end);
end
if ~isempty(p.p.s2)
i1=1;
p.s2 = exp(w(i1));
w = w(i1+1:end);
end
end
function lp = prior_loggaussian_lp(x, p)
lJ = -log(x); % =log(1/x)=log(|J|) of transformation
xt = log(x); % transformed x
lp = 0.5*sum(-log(2*pi) -log(p.s2)- 1./p.s2 .* sum((xt-p.mu).^2,1)) +lJ;
if ~isempty(p.p.mu)
lp = lp + p.p.mu.fh.lp(p.mu, p.p.mu);
end
if ~isempty(p.p.s2)
lp = lp + p.p.s2.fh.lp(p.s2, p.p.s2) + log(p.s2);
end
end
function lpg = prior_loggaussian_lpg(x, p)
lJg = -1./x; % gradient of log(|J|) of transformation
xt = log(x); % transformed x
xtg = 1./x; % derivative of transformation
lpg = xtg.*(1./p.s2).*(p.mu-xt) + lJg;
if ~isempty(p.p.mu)
lpgmu = sum((1./p.s2).*(xt-p.mu)) + p.p.mu.fh.lpg(p.mu, p.p.mu);
lpg = [lpg lpgmu];
end
if ~isempty(p.p.s2)
lpgs2 = (sum(-0.5*(1./p.s2-1./p.s2.^2.*(xt-p.mu).^2 )) + p.p.s2.fh.lpg(p.s2, p.p.s2)).*p.s2 + 1;
lpg = [lpg lpgs2];
end
end
function rec = prior_loggaussian_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
if ~isempty(p.p.mu)
rec.mu(ri,:) = p.mu;
end
if ~isempty(p.p.s2)
rec.s2(ri,:) = p.s2;
end
end
|
github
|
lcnhappe/happe-master
|
prior_invgamma.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_invgamma.m
| 3,626 |
UNKNOWN
|
462df219d9ed058f9b45af8042c3da3d
|
function p = prior_invgamma(varargin)
%PRIOR_INVGAMMA Inverse-gamma prior structure
%
% Description
% P = PRIOR_INVGAMMA('PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% creates Gamma prior structure in which the named parameters
% have the specified values. Any unspecified parameters are set
% to default values.
%
% P = PRIOR_INVGAMMA(P, 'PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% modify a prior structure with the named parameters altered
% with the specified values.
%
% Parameterisation is done by Bayesian Data Analysis,
% second edition, Gelman et.al 2004.
%
% Parameters for Gamma prior [default]
% sh - shape [4]
% s - scale [1]
% sh_prior - prior for sh [prior_fixed]
% s_prior - prior for s [prior_fixed]
%
% See also
% PRIOR_*
% Copyright (c) 2000-2001,2010 Aki Vehtari
% Copyright (c) 2010 Jaakko Riihim�ki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_INVGAMMA';
ip.addOptional('p', [], @isstruct);
ip.addParamValue('sh',4, @(x) isscalar(x) && x>0);
ip.addParamValue('sh_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('s',1, @(x) isscalar(x) && x>0);
ip.addParamValue('s_prior',[], @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'Inv-Gamma';
else
if ~isfield(p,'type') && ~isequal(p.type,'Inv-Gamma')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('sh',ip.UsingDefaults)
p.sh = ip.Results.sh;
end
if init || ~ismember('s',ip.UsingDefaults)
p.s = ip.Results.s;
end
% Initialize prior structure
if init
p.p=[];
end
if init || ~ismember('sh_prior',ip.UsingDefaults)
p.p.sh=ip.Results.sh_prior;
end
if init || ~ismember('s_prior',ip.UsingDefaults)
p.p.s=ip.Results.s_prior;
end
if init
% set functions
p.fh.pak = @prior_invgamma_pak;
p.fh.unpak = @prior_invgamma_unpak;
p.fh.lp = @prior_invgamma_lp;
p.fh.lpg = @prior_invgamma_lpg;
p.fh.recappend = @prior_invgamma_recappend;
end
end
function [w, s] = prior_invgamma_pak(p)
w=[];
s={};
if ~isempty(p.p.sh)
w = log(p.sh);
s=[s; 'log(Invgamma.sh)'];
end
if ~isempty(p.p.s)
w = [w log(p.s)];
s=[s; 'log(Invgamma.s)'];
end
end
function [p, w] = prior_invgamma_unpak(p, w)
if ~isempty(p.p.sh)
i1=1;
p.sh = exp(w(i1));
w = w(i1+1:end);
end
if ~isempty(p.p.s)
i1=1;
p.s = exp(w(i1));
w = w(i1+1:end);
end
end
function lp = prior_invgamma_lp(x, p)
lp = sum(-p.s./x - (p.sh+1).*log(x) +p.sh.*log(p.s) - gammaln(p.sh));
if ~isempty(p.p.sh)
lp = lp + p.p.sh.fh.lp(p.sh, p.p.sh) + log(p.sh);
end
if ~isempty(p.p.s)
lp = lp + p.p.s.fh.lp(p.s, p.p.s) + log(p.s);
end
end
function lpg = prior_invgamma_lpg(x, p)
lpg = -(p.sh+1)./x + p.s./x.^2;
if ~isempty(p.p.sh)
lpgsh = (-sum(digamma1(p.sh) + log(p.s) - log(x) ) + p.p.sh.fh.lpg(p.sh, p.p.sh)).*p.sh + 1;
lpg = [lpg lpgsh];
end
if ~isempty(p.p.s)
lpgs = (sum(p.sh./p.s+1./x) + p.p.s.fh.lpg(p.s, p.p.s)).*p.s + 1;
lpg = [lpg lpgs];
end
end
function rec = prior_invgamma_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
if ~isempty(p.p.sh)
rec.sh(ri,:) = p.sh;
end
if ~isempty(p.p.s)
rec.s(ri,:) = p.s;
end
end
|
github
|
lcnhappe/happe-master
|
prior_t.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_t.m
| 5,110 |
UNKNOWN
|
ebeba7f5ea490b60a90e128905d025e4
|
function p = prior_t(varargin)
%PRIOR_T Student-t prior structure
%
% Description
% P = PRIOR_T('PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% creates Student's t-distribution prior structure in which the
% named parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% P = PRIOR_T(P, 'PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% modify a prior structure with the named parameters altered
% with the specified values.
%
% Parameterisation is done as in Bayesian Data Analysis,
% second edition, Gelman et.al 2004.
%
% Parameters for Student-t prior [default]
% mu - location [0]
% s2 - scale [1]
% nu - degrees of freedom [4]
% mu_prior - prior for mu [prior_fixed]
% s2_prior - prior for s2 [prior_fixed]
% nu_prior - prior for nu [prior_fixed]
%
% See also
% PRIOR_*
% Copyright (c) 2000-2001,2010 Aki Vehtari
% Copyright (c) 2009 Jarno Vanhatalo
% Copyright (c) 2010 Jaakko Riihim�ki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_T';
ip.addOptional('p', [], @isstruct);
ip.addParamValue('mu',0, @(x) isscalar(x));
ip.addParamValue('mu_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('s2',1, @(x) isscalar(x) && x>0);
ip.addParamValue('s2_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('nu',4, @(x) isscalar(x) && x>0);
ip.addParamValue('nu_prior',[], @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 't';
else
if ~isfield(p,'type') && ~isequal(p.type,'t')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('mu',ip.UsingDefaults)
p.mu = ip.Results.mu;
end
if init || ~ismember('s2',ip.UsingDefaults)
p.s2 = ip.Results.s2;
end
if init || ~ismember('nu',ip.UsingDefaults)
p.nu = ip.Results.nu;
end
% Initialize prior structure
if init
p.p=[];
end
if init || ~ismember('mu_prior',ip.UsingDefaults)
p.p.mu=ip.Results.mu_prior;
end
if init || ~ismember('s2_prior',ip.UsingDefaults)
p.p.s2=ip.Results.s2_prior;
end
if init || ~ismember('nu_prior',ip.UsingDefaults)
p.p.nu=ip.Results.nu_prior;
end
if init
% set functions
p.fh.pak = @prior_t_pak;
p.fh.unpak = @prior_t_unpak;
p.fh.lp = @prior_t_lp;
p.fh.lpg = @prior_t_lpg;
p.fh.recappend = @prior_t_recappend;
end
end
function [w, s] = prior_t_pak(p)
% This is a mandatory subfunction used for example
% in energy and gradient computations.
w=[];
s={};
if ~isempty(p.p.mu)
w = p.mu;
s=[s; 't.mu'];
end
if ~isempty(p.p.s2)
w = [w log(p.s2)];
s=[s; 'log(t.s2)'];
end
if ~isempty(p.p.nu)
w = [w log(p.nu)];
s=[s; 'log(t.nu)'];
end
end
function [p, w] = prior_t_unpak(p, w)
% This is a mandatory subfunction used for example
% in energy and gradient computations.
if ~isempty(p.p.mu)
i1=1;
p.mu = w(i1);
w = w(i1+1:end);
end
if ~isempty(p.p.s2)
i1=1;
p.s2 = exp(w(i1));
w = w(i1+1:end);
end
if ~isempty(p.p.nu)
i1=1;
p.nu = exp(w(i1));
w = w(i1+1:end);
end
end
function lp = prior_t_lp(x, p)
% This is a mandatory subfunction used for example
% in energy computations.
lp=sum(gammaln((p.nu+1)./2) -gammaln(p.nu./2) -0.5*log(p.nu.*pi.*p.s2) -(p.nu+1)./2.*log(1+(x-p.mu).^2./p.nu./p.s2));
if ~isempty(p.p.mu)
lp = lp + p.p.mu.fh.lp(p.mu, p.p.mu);
end
if ~isempty(p.p.s2)
lp = lp + p.p.s2.fh.lp(p.s2, p.p.s2) +log(p.s2);
end
if ~isempty(p.p.nu)
lp = lp + p.p.nu.fh.lp(p.nu, p.p.nu) +log(p.nu);
end
end
function lpg = prior_t_lpg(x, p)
% This is a mandatory subfunction used for example
% in gradient computations.
%lpg=(p.nu+1)./p.nu .* (x-p.mu)./p.s2 ./ (1 + (x-p.mu).^2./p.nu./p.s2);
lpg=-(p.nu+1).* (x-p.mu) ./ (p.nu.*p.s2 + (x-p.mu).^2);
if ~isempty(p.p.mu)
lpgmu = sum( (p.nu+1).* (x-p.mu) ./ (p.nu.*p.s2 + (x-p.mu).^2) ) + p.p.mu.fh.lpg(p.mu, p.p.mu);
lpg = [lpg lpgmu];
end
if ~isempty(p.p.s2)
lpgs2 = (sum( -1./(2.*p.s2) +((p.nu + 1).*(p.mu - x).^2)./(2.*p.s2.*((p.mu-x).^2 + p.nu.*p.s2))) + p.p.s2.fh.lpg(p.s2, p.p.s2)).*p.s2 + 1;
lpg = [lpg lpgs2];
end
if ~isempty(p.p.nu)
lpgnu = (0.5*sum( digamma1((p.nu+1)./2)-digamma1(p.nu./2)-1./p.nu-log(1+(x-p.mu).^2./p.nu./p.s2)+(p.nu+1)./(1+(x-p.mu).^2./p.nu./p.s2).*(x-p.mu).^2./p.s2./p.nu.^2) + p.p.nu.fh.lpg(p.nu, p.p.nu)).*p.nu + 1;
lpg = [lpg lpgnu];
end
end
function rec = prior_t_recappend(rec, ri, p)
% This subfunction is needed when using MCMC sampling (gp_mc).
% The parameters are not sampled in any case.
rec = rec;
if ~isempty(p.p.mu)
rec.mu(ri,:) = p.mu;
end
if ~isempty(p.p.s2)
rec.s2(ri,:) = p.s2;
end
if ~isempty(p.p.nu)
rec.nu(ri,:) = p.nu;
end
end
|
github
|
lcnhappe/happe-master
|
prior_logt.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_logt.m
| 4,935 |
UNKNOWN
|
3155c4ca02354ee7d652c9e8a1006a43
|
function p = prior_logt(varargin)
%PRIOR_LOGT Student-t prior structure for the log of the parameter
%
% Description
% P = PRIOR_LOGT('PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% creates for the log of the parameter Student's t-distribution
% prior structure in which the named parameters have the
% specified values. Any unspecified parameters are set to
% default values.
%
% P = PRIOR_LOGT(P, 'PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% modify a prior structure with the named parameters altered
% with the specified values.
%
% Parameters for Student-t prior [default]
% mu - location [0]
% s2 - scale [1]
% nu - degrees of freedom [4]
% mu_prior - prior for mu [prior_fixed]
% s2_prior - prior for s2 [prior_fixed]
% nu_prior - prior for nu [prior_fixed]
%
% See also
% PRIOR_t, PRIOR_*
% Copyright (c) 2000-2001,2010 Aki Vehtari
% Copyright (c) 2009 Jarno Vanhatalo
% Copyright (c) 2010 Jaakko Riihim�ki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_LOGT';
ip.addOptional('p', [], @isstruct);
ip.addParamValue('mu',0, @(x) isscalar(x));
ip.addParamValue('mu_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('s2',1, @(x) isscalar(x) && x>0);
ip.addParamValue('s2_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('nu',4, @(x) isscalar(x) && x>0);
ip.addParamValue('nu_prior',[], @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'Log-t';
else
if ~isfield(p,'type') && ~isequal(p.type,'Log-t')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('mu',ip.UsingDefaults)
p.mu = ip.Results.mu;
end
if init || ~ismember('s2',ip.UsingDefaults)
p.s2 = ip.Results.s2;
end
if init || ~ismember('nu',ip.UsingDefaults)
p.nu = ip.Results.nu;
end
% Initialize prior structure
if init
p.p=[];
end
if init || ~ismember('mu_prior',ip.UsingDefaults)
p.p.mu=ip.Results.mu_prior;
end
if init || ~ismember('s2_prior',ip.UsingDefaults)
p.p.s2=ip.Results.s2_prior;
end
if init || ~ismember('nu_prior',ip.UsingDefaults)
p.p.nu=ip.Results.nu_prior;
end
if init
% set functions
p.fh.pak = @prior_logt_pak;
p.fh.unpak = @prior_logt_unpak;
p.fh.lp = @prior_logt_lp;
p.fh.lpg = @prior_logt_lpg;
p.fh.recappend = @prior_logt_recappend;
end
end
function [w, s] = prior_logt_pak(p)
w=[];
s={};
if ~isempty(p.p.mu)
w = p.mu;
s=[s; 'Log-Student-t.mu'];
end
if ~isempty(p.p.s2)
w = [w log(p.s2)];
s=[s; 'log(Log-Student-t.s2)'];
end
if ~isempty(p.p.nu)
w = [w log(p.nu)];
s=[s; 'log(Log-Student-t.nu)'];
end
end
function [p, w] = prior_logt_unpak(p, w)
if ~isempty(p.p.mu)
i1=1;
p.mu = w(i1);
w = w(i1+1:end);
end
if ~isempty(p.p.s2)
i1=1;
p.s2 = exp(w(i1));
w = w(i1+1:end);
end
if ~isempty(p.p.nu)
i1=1;
p.nu = exp(w(i1));
w = w(i1+1:end);
end
end
function lp = prior_logt_lp(x, p)
lJ = -log(x); % =log(1/x)=log(|J|) of transformation
xt = log(x); % transformed x
lp = sum(gammaln((p.nu+1)./2) - gammaln(p.nu./2) - 0.5*log(p.nu.*pi.*p.s2) - (p.nu+1)./2.*log(1+(xt-p.mu).^2./p.nu./p.s2) + lJ);
if ~isempty(p.p.mu)
lp = lp + p.p.mu.fh.lp(p.mu, p.p.mu);
end
if ~isempty(p.p.s2)
lp = lp + p.p.s2.fh.lp(p.s2, p.p.s2) + log(p.s2);
end
if ~isempty(p.p.nu)
lp = lp + p.p.nu.fh.lp(p.nu, p.p.nu) + log(p.nu);
end
end
function lpg = prior_logt_lpg(x, p)
lJg = -1./x; % gradient of log(|J|) of transformation
xt = log(x); % transformed x
xtg = 1./x; % derivative of transformation
lpg = xtg.*(-(p.nu+1).*(xt-p.mu)./(p.nu.*p.s2 + (xt-p.mu).^2)) + lJg;
if ~isempty(p.p.mu)
lpgmu = sum((p.nu+1).*(xt-p.mu)./(p.nu.*p.s2 + (xt-p.mu).^2)) + p.p.mu.fh.lpg(p.mu, p.p.mu);
lpg = [lpg lpgmu];
end
if ~isempty(p.p.s2)
lpgs2 = (sum(-1./(2.*p.s2)+((p.nu + 1)*(p.mu - xt)^2)./(2*p.s2*((p.mu-xt)^2 + p.nu*p.s2))) + p.p.s2.fh.lpg(p.s2, p.p.s2)).*p.s2 + 1;
lpg = [lpg lpgs2];
end
if ~isempty(p.p.nu)
lpgnu = (0.5*sum(digamma1((p.nu+1)./2)-digamma1(p.nu./2)-1./p.nu-log(1+(xt-p.mu).^2./p.nu./p.s2)+(p.nu+1)./(1+(xt-p.mu).^2./p.nu./p.s2).*(xt-p.mu).^2./p.s2./p.nu.^2) + p.p.nu.fh.lpg(p.nu, p.p.nu)).*p.nu + 1;
lpg = [lpg lpgnu];
end
end
function rec = prior_logt_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
if ~isempty(p.p.mu)
rec.mu(ri,:) = p.mu;
end
if ~isempty(p.p.s2)
rec.s2(ri,:) = p.s2;
end
if ~isempty(p.p.nu)
rec.nu(ri,:) = p.nu;
end
end
|
github
|
lcnhappe/happe-master
|
prior_invt.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_invt.m
| 5,044 |
UNKNOWN
|
4d2a44a16c36b25eda045042b14610c0
|
function p = prior_invt(varargin)
%PRIOR_INVT Student-t prior structure for the inverse of the parameter
%
% Description
% P = PRIOR_INVT('PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% creates for the inverse of the parameter Student's
% t-distribution prior structure in which the named parameters
% have the specified values. Any unspecified parameters are set
% to default values.
%
% P = PRIOR_INVT(P, 'PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% modify a prior structure with the named parameters altered
% with the specified values.
%
% Parameterisation is done as in Bayesian Data Analysis,
% second edition, Gelman et.al 2004.
%
% Parameters for Student-t prior [default]
% mu - location [0]
% s2 - scale [1]
% nu - degrees of freedom [4]
% mu_prior - prior for mu [prior_fixed]
% s2_prior - prior for s2 [prior_fixed]
% nu_prior - prior for nu [prior_fixed]
%
% See also
% PRIOR_T, PRIOR_*
% Copyright (c) 2000-2001,2010,2012 Aki Vehtari
% Copyright (c) 2009 Jarno Vanhatalo
% Copyright (c) 2010 Jaakko Riihim�ki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_INVT';
ip.addOptional('p', [], @isstruct);
ip.addParamValue('mu',0, @(x) isscalar(x));
ip.addParamValue('mu_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('s2',1, @(x) isscalar(x) && x>0);
ip.addParamValue('s2_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('nu',4, @(x) isscalar(x) && x>0);
ip.addParamValue('nu_prior',[], @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'Inv-t';
else
if ~isfield(p,'type') && ~isequal(p.type,'Inv-t')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('mu',ip.UsingDefaults)
p.mu = ip.Results.mu;
end
if init || ~ismember('s2',ip.UsingDefaults)
p.s2 = ip.Results.s2;
end
if init || ~ismember('nu',ip.UsingDefaults)
p.nu = ip.Results.nu;
end
% Initialize prior structure
if init
p.p=[];
end
if init || ~ismember('mu_prior',ip.UsingDefaults)
p.p.mu=ip.Results.mu_prior;
end
if init || ~ismember('s2_prior',ip.UsingDefaults)
p.p.s2=ip.Results.s2_prior;
end
if init || ~ismember('nu_prior',ip.UsingDefaults)
p.p.nu=ip.Results.nu_prior;
end
if init
% set functions
p.fh.pak = @prior_invt_pak;
p.fh.unpak = @prior_invt_unpak;
p.fh.lp = @prior_invt_lp;
p.fh.lpg = @prior_invt_lpg;
p.fh.recappend = @prior_invt_recappend;
end
end
function [w, s] = prior_invt_pak(p)
w=[];
s={};
if ~isempty(p.p.mu)
w = p.mu;
s=[s; 'Inv-t.mu'];
end
if ~isempty(p.p.s2)
w = [w log(p.s2)];
s=[s; 'log(Inv-t.s2)'];
end
if ~isempty(p.p.nu)
w = [w log(p.nu)];
s=[s; 'log(Inv-t.nu)'];
end
end
function [p, w] = prior_invt_unpak(p, w)
if ~isempty(p.p.mu)
i1=1;
p.mu = w(i1);
w = w(i1+1:end);
end
if ~isempty(p.p.s2)
i1=1;
p.s2 = exp(w(i1));
w = w(i1+1:end);
end
if ~isempty(p.p.nu)
i1=1;
p.nu = exp(w(i1));
w = w(i1+1:end);
end
end
function lp = prior_invt_lp(x, p)
lJ = -log(x)*2; % log(1/x^2) log(|J|) of transformation
xt = 1./x; % transformation
lp = sum(gammaln((p.nu+1)./2) -gammaln(p.nu./2) -0.5*log(p.nu.*pi.*p.s2) -(p.nu+1)./2.*log(1+(xt-p.mu).^2./p.nu./p.s2) + lJ);
if ~isempty(p.p.mu)
lp = lp + p.p.mu.fh.lp(p.mu, p.p.mu);
end
if ~isempty(p.p.s2)
lp = lp + p.p.s2.fh.lp(p.s2, p.p.s2) +log(p.s2);
end
if ~isempty(p.p.nu)
lp = lp + p.p.nu.fh.lp(p.nu, p.p.nu) +log(p.nu);
end
end
function lpg = prior_invt_lpg(x, p)
lJg = -2./x; % gradient of log(|J|) of transformation
xt = 1./x; % transformation
xtg = -1./x.^2; % derivative of transformation
lpg = xtg.*(-(p.nu+1).* (xt-p.mu) ./ (p.nu.*p.s2 + (xt-p.mu).^2)) + lJg;
if ~isempty(p.p.mu)
lpgmu = sum( (p.nu+1).* (xt-p.mu) ./ (p.nu.*p.s2 + (xt-p.mu).^2) ) + p.p.mu.fh.lpg(p.mu, p.p.mu);
lpg = [lpg lpgmu];
end
if ~isempty(p.p.s2)
lpgs2 = (sum( -1./(2.*p.s2) +((p.nu + 1)*(p.mu - xt)^2)./(2*p.s2*((p.mu-xt)^2 + p.nu*p.s2))) + p.p.s2.fh.lpg(p.s2, p.p.s2)).*p.s2 + 1;
lpg = [lpg lpgs2];
end
if ~isempty(p.p.nu)
lpgnu = (0.5*sum( digamma1((p.nu+1)./2)-digamma1(p.nu./2)-1./p.nu-log(1+(xt-p.mu).^2./p.nu./p.s2)+(p.nu+1)./(1+(xt-p.mu).^2./p.nu./p.s2).*(xt-p.mu).^2./p.s2./p.nu.^2) + p.p.nu.fh.lpg(p.nu, p.p.nu)).*p.nu + 1;
lpg = [lpg lpgnu];
end
end
function rec = prior_invt_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
if ~isempty(p.p.mu)
rec.mu(ri,:) = p.mu;
end
if ~isempty(p.p.s2)
rec.s2(ri,:) = p.s2;
end
if ~isempty(p.p.nu)
rec.nu(ri,:) = p.nu;
end
end
|
github
|
lcnhappe/happe-master
|
prior_unif.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_unif.m
| 1,362 |
utf_8
|
ee7f2a78b02bbd0395bb5893178d1109
|
function p = prior_unif(varargin)
%PRIOR_UNIF Uniform prior structure
%
% Description
% P = PRIOR_UNIF creates uniform prior structure.
%
% See also
% PRIOR_*
% Copyright (c) 2009 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_UNIFORM';
ip.addOptional('p', [], @isstruct);
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'Uniform';
else
if ~isfield(p,'type') && ~isequal(p.type,'Uniform')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
if init
% set functions
p.fh.pak = @prior_unif_pak;
p.fh.unpak = @prior_unif_unpak;
p.fh.lp = @prior_unif_lp;
p.fh.lpg = @prior_unif_lpg;
p.fh.recappend = @prior_unif_recappend;
end
end
function [w, s] = prior_unif_pak(p, w)
w=[];
s={};
end
function [p, w] = prior_unif_unpak(p, w)
w = w;
p = p;
end
function lp = prior_unif_lp(x, p)
lp = 0;
end
function lpg = prior_unif_lpg(x, p)
lpg = zeros(size(x));
end
function rec = prior_unif_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
end
|
github
|
lcnhappe/happe-master
|
prior_gamma.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_gamma.m
| 3,585 |
UNKNOWN
|
652a3a22f8107cf4544a0a14fe522194
|
function p = prior_gamma(varargin)
%PRIOR_GAMMA Gamma prior structure
%
% Description
% P = PRIOR_GAMMA('PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% creates Gamma prior structure in which the named parameters
% have the specified values. Any unspecified parameters are set
% to default values.
%
% P = PRIOR_GAMMA(P, 'PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% modify a prior structure with the named parameters altered
% with the specified values.
%
% Parametrisation is done by Bayesian Data Analysis,
% second edition, Gelman et.al. 2004.
%
% Parameters for Gamma prior [default]
% sh - shape [4]
% is - inverse scale [1]
% sh_prior - prior for sh [prior_fixed]
% is_prior - prior for is [prior_fixed]
%
% See also
% PRIOR_*
% Copyright (c) 2000-2001,2010 Aki Vehtari
% Copyright (c) 2010 Jaakko Riihim�ki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_GAMMA';
ip.addOptional('p', [], @isstruct);
ip.addParamValue('sh',4, @(x) isscalar(x) && x>0);
ip.addParamValue('sh_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('is',1, @(x) isscalar(x) && x>0);
ip.addParamValue('is_prior',[], @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'Gamma';
else
if ~isfield(p,'type') && ~isequal(p.type,'Gamma')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('sh',ip.UsingDefaults)
p.sh = ip.Results.sh;
end
if init || ~ismember('is',ip.UsingDefaults)
p.is = ip.Results.is;
end
% Initialize prior structure
if init
p.p=[];
end
if init || ~ismember('sh_prior',ip.UsingDefaults)
p.p.sh=ip.Results.sh_prior;
end
if init || ~ismember('is_prior',ip.UsingDefaults)
p.p.is=ip.Results.is_prior;
end
if init
% set functions
p.fh.pak = @prior_gamma_pak;
p.fh.unpak = @prior_gamma_unpak;
p.fh.lp = @prior_gamma_lp;
p.fh.lpg = @prior_gamma_lpg;
p.fh.recappend = @prior_gamma_recappend;
end
end
function [w, s] = prior_gamma_pak(p)
w=[];
s={};
if ~isempty(p.p.sh)
w = log(p.sh);
s=[s; 'log(Gamma.sh)'];
end
if ~isempty(p.p.is)
w = [w log(p.is)];
s=[s; 'log(Gamma.is)'];
end
end
function [p, w] = prior_gamma_unpak(p, w)
if ~isempty(p.p.sh)
i1=1;
p.sh = exp(w(i1));
w = w(i1+1:end);
end
if ~isempty(p.p.is)
i1=1;
p.is = exp(w(i1));
w = w(i1+1:end);
end
end
function lp = prior_gamma_lp(x, p)
lp = sum(-p.is.*x + (p.sh-1).*log(x) +p.sh.*log(p.is) -gammaln(p.sh));
if ~isempty(p.p.sh)
lp = lp + p.p.sh.fh.lp(p.sh, p.p.sh) + log(p.sh);
end
if ~isempty(p.p.is)
lp = lp + p.p.is.fh.lp(p.is, p.p.is) + log(p.is);
end
end
function lpg = prior_gamma_lpg(x, p)
lpg = (p.sh-1)./x - p.is;
if ~isempty(p.p.sh)
lpgsh = (sum(-digamma1(p.sh) + log(p.is) + log(x)) + p.p.sh.fh.lpg(p.sh, p.p.sh)).*p.sh + 1;
lpg = [lpg lpgsh];
end
if ~isempty(p.p.is)
lpgis = (sum(p.sh./p.is+x) + p.p.is.fh.lpg(p.is, p.p.is)).*p.is + 1;
lpg = [lpg lpgis];
end
end
function rec = prior_gamma_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
if ~isempty(p.p.sh)
rec.sh(ri,:) = p.sh;
end
if ~isempty(p.p.is)
rec.is(ri,:) = p.is;
end
end
|
github
|
lcnhappe/happe-master
|
prior_loglogunif.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_loglogunif.m
| 1,586 |
utf_8
|
9e47770e4cbf89b1f959bac5ca3e8630
|
function p = prior_loglogunif(varargin)
%PRIOR_LOGLOGUNIF Uniform prior structure for the log-log of the parameter
%
% Description
% P = PRIOR_LOGLOGUNIF creates uniform prior structure for the
% log-log of the parameters.
%
% See also
% PRIOR_*
% Copyright (c) 2009 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_LOGLOGUNIFORM';
ip.addOptional('p', [], @isstruct);
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'Loglog-Uniform';
else
if ~isfield(p,'type') && ~isequal(p.type,'Loglog-Uniform')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
if init
% set functions
p.fh.pak = @prior_loglogunif_pak;
p.fh.unpak = @prior_loglogunif_unpak;
p.fh.lp = @prior_loglogunif_lp;
p.fh.lpg = @prior_loglogunif_lpg;
p.fh.recappend = @prior_loglogunif_recappend;
end
end
function [w, s] = prior_loglogunif_pak(p, w)
w=[];
s={};
end
function [p, w] = prior_loglogunif_unpak(p, w)
w = w;
p = p;
end
function lp = prior_loglogunif_lp(x, p)
lp = -sum(log(log(x)) + log(x)); % = log( 1./log(x) * 1./x)
end
function lpg = prior_loglogunif_lpg(x, p)
lpg = -1./log(x)./x - 1./x;
end
function rec = prior_loglogunif_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
end
|
github
|
lcnhappe/happe-master
|
prior_sqrtinvt.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_sqrtinvt.m
| 5,086 |
UNKNOWN
|
359760ce484043dc8cbe8620784307a0
|
function p = prior_sqrtinvt(varargin)
%PRIOR_SQRTINVT Student-t prior structure for the square root of inverse of the parameter
%
% Description
% P = PRIOR_SQRTINVT('PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% creates for square root of inverse of the parameter Student's
% t-distribution prior structure in which the named parameters
% have the specified values. Any unspecified parameters are set
% to default values.
%
% P = PRIOR_SQRTINVT(P, 'PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% modify a prior structure with the named parameters altered
% with the specified values.
%
% Parameters for Student-t prior [default]
% mu - location [0]
% s2 - scale [1]
% nu - degrees of freedom [4]
% mu_prior - prior for mu [prior_fixed]
% s2_prior - prior for s2 [prior_fixed]
% nu_prior - prior for nu [prior_fixed]
%
% See also
% PRIOR_*
% Copyright (c) 2000-2001,2010 Aki Vehtari
% Copyright (c) 2009 Jarno Vanhatalo
% Copyright (c) 2010 Jaakko Riihim�ki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_SQRTINVT';
ip.addOptional('p', [], @isstruct);
ip.addParamValue('mu',0, @(x) isscalar(x));
ip.addParamValue('mu_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('s2',1, @(x) isscalar(x) && x>0);
ip.addParamValue('s2_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('nu',4, @(x) isscalar(x) && x>0);
ip.addParamValue('nu_prior',[], @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'SqrtInv-t';
else
if ~isfield(p,'type') && ~isequal(p.type,'SqrtInv-t')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('mu',ip.UsingDefaults)
p.mu = ip.Results.mu;
end
if init || ~ismember('s2',ip.UsingDefaults)
p.s2 = ip.Results.s2;
end
if init || ~ismember('nu',ip.UsingDefaults)
p.nu = ip.Results.nu;
end
% Initialize prior structure
if init
p.p=[];
end
if init || ~ismember('mu_prior',ip.UsingDefaults)
p.p.mu=ip.Results.mu_prior;
end
if init || ~ismember('s2_prior',ip.UsingDefaults)
p.p.s2=ip.Results.s2_prior;
end
if init || ~ismember('nu_prior',ip.UsingDefaults)
p.p.nu=ip.Results.nu_prior;
end
if init
% set functions
p.fh.pak = @prior_sqrtinvt_pak;
p.fh.unpak = @prior_sqrtinvt_unpak;
p.fh.lp = @prior_sqrtinvt_lp;
p.fh.lpg = @prior_sqrtinvt_lpg;
p.fh.recappend = @prior_sqrtinvt_recappend;
end
end
function [w, s] = prior_sqrtinvt_pak(p)
w=[];
s={};
if ~isempty(p.p.mu)
w = p.mu;
s=[s; 'SqrtInv-Student-t.mu'];
end
if ~isempty(p.p.s2)
w = [w log(p.s2)];
s=[s; 'log(SqrtInv-Student-t.s2)'];
end
if ~isempty(p.p.nu)
w = [w log(p.nu)];
s=[s; 'log(SqrtInv-Student-t.nu)'];
end
end
function [p, w] = prior_sqrtinvt_unpak(p, w)
if ~isempty(p.p.mu)
i1=1;
p.mu = w(i1);
w = w(i1+1:end);
end
if ~isempty(p.p.s2)
i1=1;
p.s2 = exp(w(i1));
w = w(i1+1:end);
end
if ~isempty(p.p.nu)
i1=1;
p.nu = exp(w(i1));
w = w(i1+1:end);
end
end
function lp = prior_sqrtinvt_lp(x, p)
lJ = -log(2*x.^(3/2)); % log(1/(2*x^(3/2))) log(|J|) of transformation
xt = 1./sqrt(x); % transformation
lp = sum(gammaln((p.nu+1)./2) - gammaln(p.nu./2) - 0.5*log(p.nu.*pi.*p.s2) - (p.nu+1)./2.*log(1+(xt-p.mu).^2./p.nu./p.s2) + lJ);
if ~isempty(p.p.mu)
lp = lp + p.p.mu.fh.lp(p.mu, p.p.mu);
end
if ~isempty(p.p.s2)
lp = lp + p.p.s2.fh.lp(p.s2, p.p.s2) + log(p.s2);
end
if ~isempty(p.p.nu)
lp = lp + p.p.nu.fh.lp(p.nu, p.p.nu) + log(p.nu);
end
end
function lpg = prior_sqrtinvt_lpg(x, p)
lJg = -3./(2*x); % gradient of log(|J|) of transformation
xt = 1./sqrt(x); % transformation
xtg = -1./(2*x.^(3/2)); % derivative of transformation
lpg = xtg.*(-(p.nu+1).*(xt-p.mu)./(p.nu.*p.s2 + (xt-p.mu).^2)) + lJg;
if ~isempty(p.p.mu)
lpgmu = sum((p.nu+1).*(xt-p.mu)./(p.nu.*p.s2 + (xt-p.mu).^2)) + p.p.mu.fh.lpg(p.mu, p.p.mu);
lpg = [lpg lpgmu];
end
if ~isempty(p.p.s2)
lpgs2 = (sum(-1./(2.*p.s2)+((p.nu + 1)*(p.mu - xt)^2)./(2*p.s2*((p.mu-xt)^2 + p.nu*p.s2))) + p.p.s2.fh.lpg(p.s2, p.p.s2)).*p.s2 + 1;
lpg = [lpg lpgs2];
end
if ~isempty(p.p.nu)
lpgnu = (0.5*sum(digamma1((p.nu+1)./2)-digamma1(p.nu./2)-1./p.nu-log(1+(xt-p.mu).^2./p.nu./p.s2)+(p.nu+1)./(1+(xt-p.mu).^2./p.nu./p.s2).*(xt-p.mu).^2./p.s2./p.nu.^2) + p.p.nu.fh.lpg(p.nu, p.p.nu)).*p.nu + 1;
lpg = [lpg lpgnu];
end
end
function rec = prior_sqrtinvt_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
if ~isempty(p.p.mu)
rec.mu(ri,:) = p.mu;
end
if ~isempty(p.p.s2)
rec.s2(ri,:) = p.s2;
end
if ~isempty(p.p.nu)
rec.nu(ri,:) = p.nu;
end
end
|
github
|
lcnhappe/happe-master
|
prior_sqinvgamma.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_sqinvgamma.m
| 4,063 |
UNKNOWN
|
e40fd6ee9ae4d88aaff26182c69a17e9
|
function p = prior_sqinvgamma(varargin)
%PRIOR_SQINVGAMMA Gamma prior structure for square inverse of the parameter
%
% Description
% P = PRIOR_SQINVGAMMA('PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% creates Gamma prior structure for square inverse of the
% parameter in which the named parameters have the specified
% values. Any unspecified parameters are set to default values.
%
% P = PRIOR_SQINVGAMMA(P, 'PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% modify a prior structure with the named parameters altered
% with the specified values.
%
% Parametrisation is done by Bayesian Data Analysis,
% second edition, Gelman et.al. 2004.
%
% Parameters for Gamma prior [default]
% sh - shape [4]
% is - inverse scale [1]
% sh_prior - prior for sh [prior_fixed]
% is_prior - prior for is [prior_fixed]
%
% See also
% PRIOR_*
% Copyright (c) 2000-2001,2010,2012 Aki Vehtari
% Copyright (c) 2010 Jaakko Riihim�ki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_SQINVGAMMA';
ip.addOptional('p', [], @isstruct);
ip.addParamValue('sh',4, @(x) isscalar(x) && x>0);
ip.addParamValue('sh_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('is',1, @(x) isscalar(x) && x>0);
ip.addParamValue('is_prior',[], @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'SqInv-Gamma';
else
if ~isfield(p,'type') && ~isequal(p.type,'SqInv-Gamma')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('sh',ip.UsingDefaults)
p.sh = ip.Results.sh;
end
if init || ~ismember('is',ip.UsingDefaults)
p.is = ip.Results.is;
end
% Initialize prior structure
if init
p.p=[];
end
if init || ~ismember('sh_prior',ip.UsingDefaults)
p.p.sh=ip.Results.sh_prior;
end
if init || ~ismember('is_prior',ip.UsingDefaults)
p.p.is=ip.Results.is_prior;
end
if init
% set functions
p.fh.pak = @prior_sqinvgamma_pak;
p.fh.unpak = @prior_sqinvgamma_unpak;
p.fh.lp = @prior_sqinvgamma_lp;
p.fh.lpg = @prior_sqinvgamma_lpg;
p.fh.recappend = @prior_sqinvgamma_recappend;
end
end
function [w, s] = prior_sqinvgamma_pak(p)
w=[];
s={};
if ~isempty(p.p.sh)
w = log(p.sh);
s=[s; 'log(SqInv-Gamma.sh)'];
end
if ~isempty(p.p.is)
w = [w log(p.is)];
s=[s; 'log(SqInv-Gamma.is)'];
end
end
function [p, w] = prior_sqinvgamma_unpak(p, w)
if ~isempty(p.p.sh)
i1=1;
p.sh = exp(w(i1));
w = w(i1+1:end);
end
if ~isempty(p.p.is)
i1=1;
p.is = exp(w(i1));
w = w(i1+1:end);
end
end
function lp = prior_sqinvgamma_lp(x, p)
lJ = -log(x)*3 + log(2); % log(-2/x^3) log(|J|) of transformation
xt = x.^-2; % transformation
lp = sum(-p.is.*xt + (p.sh-1).*log(xt) +p.sh.*log(p.is) -gammaln(p.sh) +lJ);
if ~isempty(p.p.sh)
lp = lp + p.p.sh.fh.lp(p.sh, p.p.sh) + log(p.sh);
end
if ~isempty(p.p.is)
lp = lp + p.p.is.fh.lp(p.is, p.p.is) + log(p.is);
end
end
function lpg = prior_sqinvgamma_lpg(x, p)
lJg = -3./x; % gradient of log(|J|) of transformation
xt = x.^-2; % transformation
xtg = -2/x.^3; % derivative of transformation
lpg = xtg.*((p.sh-1)./xt - p.is) + lJg;
if ~isempty(p.p.sh)
lpgsh = (sum(-digamma1(p.sh) + log(p.is) + log(x)) + p.p.sh.fh.lpg(p.sh, p.p.sh)).*p.sh + 1;
lpg = [lpg lpgsh];
end
if ~isempty(p.p.is)
lpgis = (sum(p.sh./p.is+x) + p.p.is.fh.lpg(p.is, p.p.is)).*p.is + 1;
lpg = [lpg lpgis];
end
end
function rec = prior_sqinvgamma_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
if ~isempty(p.p.sh)
rec.sh(ri,:) = p.sh;
end
if ~isempty(p.p.is)
rec.is(ri,:) = p.is;
end
end
|
github
|
lcnhappe/happe-master
|
prior_laplace.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_laplace.m
| 3,459 |
UNKNOWN
|
47b15af8fb5fcf7caa1bbad31be2af42
|
function p = prior_laplace(varargin)
%PRIOR_LAPLACE Laplace (double exponential) prior structure
%
% Description
% P = PRIOR_LAPLACE('PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% creates Laplace prior structure in which the named parameters
% have the specified values. Any unspecified parameters are set
% to default values.
%
% P = PRIOR_LAPLACE(P, 'PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% modify a prior structure with the named parameters altered
% with the specified values.
%
% Parameters for Laplace prior [default]
% mu - location [0]
% s - scale [1]
% mu_prior - prior for mu [prior_fixed]
% s_prior - prior for s [prior_fixed]
%
% See also
% PRIOR_*
% Copyright (c) 2000-2001,2010 Aki Vehtari
% Copyright (c) 2010 Jaakko Riihim�ki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_LAPLACE';
ip.addOptional('p', [], @isstruct);
ip.addParamValue('mu',0, @(x) isscalar(x) && x>0);
ip.addParamValue('mu_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('s',1, @(x) isscalar(x) && x>0);
ip.addParamValue('s_prior',[], @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'Laplace';
else
if ~isfield(p,'type') && ~isequal(p.type,'Laplace')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('mu',ip.UsingDefaults)
p.mu = ip.Results.mu;
end
if init || ~ismember('s',ip.UsingDefaults)
p.s = ip.Results.s;
end
% Initialize prior structure
if init
p.p=[];
end
if init || ~ismember('mu_prior',ip.UsingDefaults)
p.p.mu=ip.Results.mu_prior;
end
if init || ~ismember('s_prior',ip.UsingDefaults)
p.p.s=ip.Results.s_prior;
end
if init
% set functions
p.fh.pak = @prior_laplace_pak;
p.fh.unpak = @prior_laplace_unpak;
p.fh.lp = @prior_laplace_lp;
p.fh.lpg = @prior_laplace_lpg;
p.fh.recappend = @prior_laplace_recappend;
end
end
function [w, s] = prior_laplace_pak(p)
w=[];
s={};
if ~isempty(p.p.mu)
w = p.mu;
s=[s; 'Laplace.mu'];
end
if ~isempty(p.p.s)
w = [w log(p.s)];
s=[s; 'log(Laplace.s)'];
end
end
function [p, w] = prior_laplace_unpak(p, w)
if ~isempty(p.p.mu)
i1=1;
p.mu = w(i1);
w = w(i1+1:end);
end
if ~isempty(p.p.s)
i1=1;
p.s = exp(w(i1));
w = w(i1+1:end);
end
end
function lp = prior_laplace_lp(x, p)
lp = sum(-log(2*p.s) - 1./p.s.* abs(x-p.mu));
if ~isempty(p.p.mu)
lp = lp + p.p.mu.fh.lp(p.mu, p.p.mu);
end
if ~isempty(p.p.s)
lp = lp + p.p.s.fh.lp(p.s, p.p.s) + log(p.s);
end
end
function lpg = prior_laplace_lpg(x, p)
lpg = -sign(x-p.mu)./p.s;
if ~isempty(p.p.mu)
lpgmu = sum(sign(x-p.mu)./p.s) + p.p.mu.fh.lpg(p.mu, p.p.mu);
lpg = [lpg lpgmu];
end
if ~isempty(p.p.s)
lpgs = (sum(-1./p.s +1./p.s.^2.*abs(x-p.mu)) + p.p.s.fh.lpg(p.s, p.p.s)).*p.s + 1;
lpg = [lpg lpgs];
end
end
function rec = prior_laplace_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
if ~isempty(p.p.mu)
rec.mu(ri,:) = p.mu;
end
if ~isempty(p.p.s)
rec.s(ri,:) = p.s;
end
end
|
github
|
lcnhappe/happe-master
|
prior_invunif.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_invunif.m
| 1,615 |
utf_8
|
e974f05998d83ecfb5e7bd348f6d8a8d
|
function p = prior_invunif(varargin)
%PRIOR_INVUNIF Uniform prior structure for the inverse of the parameter
%
% Description
% P = PRIOR_INVUNIF creates uniform prior structure for the
% inverse of the parameter.
%
% See also
% PRIOR_*
% Copyright (c) 2009 Jarno Vanhatalo
% Copyright (c) 2010,2012 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_INVUNIFORM';
ip.addOptional('p', [], @isstruct);
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'Inv-Uniform';
else
if ~isfield(p,'type') && ~isequal(p.type,'Inv-Uniform')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
if init
% set functions
p.fh.pak = @prior_invunif_pak;
p.fh.unpak = @prior_invunif_unpak;
p.fh.lp = @prior_invunif_lp;
p.fh.lpg = @prior_invunif_lpg;
p.fh.recappend = @prior_invunif_recappend;
end
end
function [w, s] = prior_invunif_pak(p, w)
w=[];
s={};
end
function [p, w] = prior_invunif_unpak(p, w)
w = w;
p = p;
end
function lp = prior_invunif_lp(x, p)
lJ=-log(x)*2; % log(1/x^2) log(|J|) of transformation
lp = sum(0 +lJ);
end
function lpg = prior_invunif_lpg(x, p)
lJg=-2./x; % gradient of log(|J|) of transformation
lpg = zeros(size(x)) + lJg;
end
function rec = prior_invunif_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
end
|
github
|
lcnhappe/happe-master
|
prior_sinvchi2.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_sinvchi2.m
| 3,787 |
UNKNOWN
|
bb36cbc475243c31f51e061847442b18
|
function p = prior_sinvchi2(varargin)
%PRIOR_SINVCHI2 Scaled-Inv-Chi^2 prior structure
%
% Description
% P = PRIOR_SINVCHI2('PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% creates Scaled-Inv-Chi^2 prior structure in which the named
% parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% P = PRIOR_SINVCHI2(P, 'PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% modify a prior structure with the named parameters altered
% with the specified values.
%
% Parameterisation is done by Bayesian Data Analysis,
% second edition, Gelman et.al 2004.
%
% Parameters for Scaled-Inv-Chi^2 [default]
% s2 - scale squared (variance) [1]
% nu - degrees of freedom [4]
% s2_prior - prior for s2 [prior_fixed]
% nu_prior - prior for nu [prior_fixed]
%
% See also
% PRIOR_*
% Copyright (c) 2000-2001,2010 Aki Vehtari
% Copyright (c) 2010 Jaakko Riihim�ki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_SINVCHI2';
ip.addOptional('p', [], @isstruct);
ip.addParamValue('s2',1, @(x) isscalar(x) && x>0);
ip.addParamValue('s2_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('nu',4, @(x) isscalar(x) && x>0);
ip.addParamValue('nu_prior',[], @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'S-Inv-Chi2';
else
if ~isfield(p,'type') && ~isequal(p.type,'S-Inv-Chi2')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('s2',ip.UsingDefaults)
p.s2 = ip.Results.s2;
end
if init || ~ismember('nu',ip.UsingDefaults)
p.nu = ip.Results.nu;
end
% Initialize prior structure
if init
p.p=[];
end
if init || ~ismember('s2_prior',ip.UsingDefaults)
p.p.s2=ip.Results.s2_prior;
end
if init || ~ismember('nu_prior',ip.UsingDefaults)
p.p.nu=ip.Results.nu_prior;
end
if init
% set functions
p.fh.pak = @prior_sinvchi2_pak;
p.fh.unpak = @prior_sinvchi2_unpak;
p.fh.lp = @prior_sinvchi2_lp;
p.fh.lpg = @prior_sinvchi2_lpg;
p.fh.recappend = @prior_sinvchi2_recappend;
end
end
function [w, s] = prior_sinvchi2_pak(p)
w=[];
s={};
if ~isempty(p.p.s2)
w = log(p.s2);
s=[s; 'log(Sinvchi2.s2)'];
end
if ~isempty(p.p.nu)
w = [w log(p.nu)];
s=[s; 'log(Sinvchi2.nu)'];
end
end
function [p, w] = prior_sinvchi2_unpak(p, w)
if ~isempty(p.p.s2)
i1=1;
p.s2 = exp(w(i1));
w = w(i1+1:end);
end
if ~isempty(p.p.nu)
i1=1;
p.nu = exp(w(i1));
w = w(i1+1:end);
end
end
function lp = prior_sinvchi2_lp(x, p)
lp = -sum((p.nu./2+1) .* log(x) + (p.s2.*p.nu./2./x) + (p.nu/2) .* log(2./(p.s2.*p.nu)) + gammaln(p.nu/2)) ;
if ~isempty(p.p.s2)
lp = lp + p.p.s2.fh.lp(p.s2, p.p.s2) + log(p.s2);
end
if ~isempty(p.p.nu)
lp = lp + p.p.nu.fh.lp(p.nu, p.p.nu) + log(p.nu);
end
end
function lpg = prior_sinvchi2_lpg(x, p)
lpg = -(p.nu/2+1)./x +p.nu.*p.s2./(2*x.^2);
if ~isempty(p.p.s2)
lpgs2 = (-sum(p.nu/2.*(1./x-1./p.s2)) + p.p.s2.fh.lpg(p.s2, p.p.s2)).*p.s2 + 1;
lpg = [lpg lpgs2];
end
if ~isempty(p.p.nu)
lpgnu = (-sum(0.5*(log(x) + p.s2./x + log(2./p.s2./p.nu) - 1 + digamma1(p.nu/2))) + p.p.nu.fh.lpg(p.nu, p.p.nu)).*p.nu + 1;
lpg = [lpg lpgnu];
end
end
function rec = prior_sinvchi2_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
if ~isempty(p.p.s2)
rec.s2(ri,:) = p.s2;
end
if ~isempty(p.p.nu)
rec.nu(ri,:) = p.nu;
end
end
|
github
|
lcnhappe/happe-master
|
prior_sqrtt.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/dist/prior_sqrtt.m
| 5,015 |
UNKNOWN
|
face07c0efdd74ed6d833079c1bfac18
|
function p = prior_sqrtt(varargin)
%PRIOR_SQRTT Student-t prior structure for the square root of the parameter
%
% Description
% P = PRIOR_SQRTT('PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% creates for the square root of the parameter Student's
% t-distribution prior structure in which the named parameters
% have the specified values. Any unspecified parameters are set
% to default values.
%
% P = PRIOR_SQRTT(P, 'PARAM1', VALUE1, 'PARAM2', VALUE2, ...)
% modify a prior structure with the named parameters altered
% with the specified values.
%
% Parameters for Student-t prior [default]
% mu - location [0]
% s2 - scale [1]
% nu - degrees of freedom [4]
% mu_prior - prior for mu [prior_fixed]
% s2_prior - prior for s2 [prior_fixed]
% nu_prior - prior for nu [prior_fixed]
%
% See also
% PRIOR_t, PRIOR_*
% Copyright (c) 2000-2001,2010 Aki Vehtari
% Copyright (c) 2009 Jarno Vanhatalo
% Copyright (c) 2010 Jaakko Riihim�ki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'PRIOR_SQRTT';
ip.addOptional('p', [], @isstruct);
ip.addParamValue('mu',0, @(x) isscalar(x));
ip.addParamValue('mu_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('s2',1, @(x) isscalar(x) && x>0);
ip.addParamValue('s2_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('nu',4, @(x) isscalar(x) && x>0);
ip.addParamValue('nu_prior',[], @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
p=ip.Results.p;
if isempty(p)
init=true;
p.type = 'Sqrt-t';
else
if ~isfield(p,'type') && ~isequal(p.type,'Sqrt-t')
error('First argument does not seem to be a valid prior structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('mu',ip.UsingDefaults)
p.mu = ip.Results.mu;
end
if init || ~ismember('s2',ip.UsingDefaults)
p.s2 = ip.Results.s2;
end
if init || ~ismember('nu',ip.UsingDefaults)
p.nu = ip.Results.nu;
end
% Initialize prior structure
if init
p.p=[];
end
if init || ~ismember('mu_prior',ip.UsingDefaults)
p.p.mu=ip.Results.mu_prior;
end
if init || ~ismember('s2_prior',ip.UsingDefaults)
p.p.s2=ip.Results.s2_prior;
end
if init || ~ismember('nu_prior',ip.UsingDefaults)
p.p.nu=ip.Results.nu_prior;
end
if init
% set functions
p.fh.pak = @prior_sqrtt_pak;
p.fh.unpak = @prior_sqrtt_unpak;
p.fh.lp = @prior_sqrtt_lp;
p.fh.lpg = @prior_sqrtt_lpg;
p.fh.recappend = @prior_sqrtt_recappend;
end
end
function [w, s] = prior_sqrtt_pak(p)
w=[];
s={};
if ~isempty(p.p.mu)
w = p.mu;
s=[s; 'Sqrt-Student-t.mu'];
end
if ~isempty(p.p.s2)
w = [w log(p.s2)];
s=[s; 'log(Sqrt-Student-t.s2)'];
end
if ~isempty(p.p.nu)
w = [w log(p.nu)];
s=[s; 'log(Sqrt-Student-t.nu)'];
end
end
function [p, w] = prior_sqrtt_unpak(p, w)
if ~isempty(p.p.mu)
i1=1;
p.mu = w(i1);
w = w(i1+1:end);
end
if ~isempty(p.p.s2)
i1=1;
p.s2 = exp(w(i1));
w = w(i1+1:end);
end
if ~isempty(p.p.nu)
i1=1;
p.nu = exp(w(i1));
w = w(i1+1:end);
end
end
function lp = prior_sqrtt_lp(x, p)
lJ = -log(2*sqrt(x)); % log(1/(2*x^(1/2))) log(|J|) of transformation
xt = sqrt(x); % transformation
lp = sum(gammaln((p.nu+1)./2) - gammaln(p.nu./2) - 0.5*log(p.nu.*pi.*p.s2) - (p.nu+1)./2.*log(1+(xt-p.mu).^2./p.nu./p.s2) + lJ);
if ~isempty(p.p.mu)
lp = lp + p.p.mu.fh.lp(p.mu, p.p.mu);
end
if ~isempty(p.p.s2)
lp = lp + p.p.s2.fh.lp(p.s2, p.p.s2) + log(p.s2);
end
if ~isempty(p.p.nu)
lp = lp + p.p.nu.fh.lp(p.nu, p.p.nu) + log(p.nu);
end
end
function lpg = prior_sqrtt_lpg(x, p)
lJg = -1./(2*x); % gradient of log(|J|) of transformation
xt = sqrt(x); % transformation
xtg = 1./(2*sqrt(x)); % derivative of transformation
lpg = xtg.*(-(p.nu+1).*(xt-p.mu)./(p.nu.*p.s2 + (xt-p.mu).^2)) + lJg;
if ~isempty(p.p.mu)
lpgmu = sum((p.nu+1).*(xt-p.mu)./(p.nu.*p.s2 + (xt-p.mu).^2)) + p.p.mu.fh.lpg(p.mu, p.p.mu);
lpg = [lpg lpgmu];
end
if ~isempty(p.p.s2)
lpgs2 = (sum(-1./(2.*p.s2)+((p.nu + 1)*(p.mu - xt)^2)./(2*p.s2*((p.mu-xt)^2 + p.nu*p.s2))) + p.p.s2.fh.lpg(p.s2, p.p.s2)).*p.s2 + 1;
lpg = [lpg lpgs2];
end
if ~isempty(p.p.nu)
lpgnu = (0.5*sum(digamma1((p.nu+1)./2)-digamma1(p.nu./2)-1./p.nu-log(1+(xt-p.mu).^2./p.nu./p.s2)+(p.nu+1)./(1+(xt-p.mu).^2./p.nu./p.s2).*(xt-p.mu).^2./p.s2./p.nu.^2) + p.p.nu.fh.lpg(p.nu, p.p.nu)).*p.nu + 1;
lpg = [lpg lpgnu];
end
end
function rec = prior_sqrtt_recappend(rec, ri, p)
% The parameters are not sampled in any case.
rec = rec;
if ~isempty(p.p.mu)
rec.mu(ri,:) = p.mu;
end
if ~isempty(p.p.s2)
rec.s2(ri,:) = p.s2;
end
if ~isempty(p.p.nu)
rec.nu(ri,:) = p.nu;
end
end
|
github
|
lcnhappe/happe-master
|
gradcheck.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/diag/gradcheck.m
| 2,489 |
utf_8
|
1e4a8a8b0e776eb3b76a4be1c60b649e
|
function delta = gradcheck(w, func, grad, varargin)
%GRADCHECK Checks a user-defined gradient function using finite differences.
%
% Description
% This function is intended as a utility to check whether a gradient
% calculation has been correctly implemented for a given function.
% GRADCHECK(W, FUNC, GRAD) checks how accurate the gradient GRAD of
% a function FUNC is at a parameter vector X. A central difference
% formula with step size 1.0e-6 is used, and the results for both
% gradient function and finite difference approximation are printed.
%
% GRADCHECK(X, FUNC, GRAD, P1, P2, ...) allows additional arguments to
% be passed to FUNC and GRAD.
%
% Copyright (c) Christopher M Bishop, Ian T Nabney (1996, 1997)
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
% Reasonable value for step size
epsilon = 1.0e-6;
func = fcnchk(func, length(varargin));
grad = fcnchk(grad, length(varargin));
% Treat
nparams = length(w);
deltaf = zeros(1, nparams);
step = zeros(1, nparams);
for i = 1:nparams
% Move a small way in the ith coordinate of w
step(i) = 1.0;
fplus = feval('linef', epsilon, func, w, step, varargin{:});
fminus = feval('linef', -epsilon, func, w, step, varargin{:});
% Use central difference formula for approximation
deltaf(i) = 0.5*(fplus - fminus)/epsilon;
step(i) = 0.0;
end
gradient = feval(grad, w, varargin{:});
fprintf(1, 'Checking gradient ...\n\n');
fprintf(1, ' analytic diffs delta\n\n');
disp([gradient', deltaf', gradient' - deltaf'])
delta = gradient' - deltaf';
function y = linef(lambda, fn, x, d, varargin)
%LINEF Calculate function value along a line.
%
% Description
% LINEF(LAMBDA, FN, X, D) calculates the value of the function FN at
% the point X+LAMBDA*D. Here X is a row vector and LAMBDA is a scalar.
%
% LINEF(LAMBDA, FN, X, D, P1, P2, ...) allows additional arguments to
% be passed to FN(). This function is used for convenience in some of
% the optimization routines.
%
% See also
% GRADCHEK, LINEMIN
%
% Copyright (c) Christopher M Bishop, Ian T Nabney (1996, 1997)
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
% Check function string
fn = fcnchk(fn, length(varargin));
y = feval(fn, x+lambda.*d, varargin{:});
|
github
|
lcnhappe/happe-master
|
geyer_icse.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/diag/geyer_icse.m
| 1,895 |
utf_8
|
d1069007aefb5a41440b9e5e98d12a94
|
function [t,t1] = geyer_icse(x,maxlag)
% GEYER_ICSE - Compute autocorrelation time tau using Geyer's
% initial convex sequence estimator
%
% C = GEYER_ICSE(X) returns autocorrelation time tau.
% C = GEYER_ICSE(X,MAXLAG) returns autocorrelation time tau with
% MAXLAG . Default MAXLAG = M-1.
%
% References:
% [1] C. J. Geyer, (1992). "Practical Markov Chain Monte Carlo",
% Statistical Science, 7(4):473-511
% Copyright (C) 2002 Aki Vehtari
%
% This software is distributed under the GNU General Public
% Licence (version 3 or later); please refer to the file
% Licence.txt, included with the software, for details.
% compute autocorrelation
if nargin > 1
cc=acorr(x,maxlag);
else
cc=acorr(x);
end
[n,m]=size(cc);
% acorr returns values starting from lag 1, so add lag 0 here
cc=[ones(1,m);cc];
n=n+1;
% now make n even
if mod(n,2)
n=n-1;
cc(end,:)=[];
end
% loop through variables
t=zeros(1,m);
t1=zeros(1,m);
opt=optimset('LargeScale','off','display','off');
for i1=1:m
c=cc(:,i1);
c=sum(reshape(c,2,n/2),1);
ci=find(c<0);
if isempty(ci)
warning(sprintf('Inital positive could not be found for variable %d, using maxlag value',i1));
ci=n/2;
else
ci=ci(1)-1; % initial positive
end
c=[c(1:ci) 0]; % initial positive sequence
t1(i1)=-1+2*sum(c); % initial positive sequence estimator
if ci>2
ca=fmincon(@se,c,[],[],[],[],0*c,c,@sc,opt,c); % monotone convex sequence
else
ca=c;
end
t(i1)=-1+2*sum(ca); % monotone convex sequence estimator
end
function e = se(x,xx)
% SE - Error in monotone convex sequene estimator
e=sum((xx-x).^2);
function [c,ceq] = sc(x,xx)
% SE - Constraint in monotone convex sequene estimator
ceq=0*x;
c=ceq;
d=diff(x);
dd=-diff(d);
d(d<0)=0;d=d.^2;
dd(dd<0)=0;dd=dd.^2;
c(1:end-1)=d;c(2:end)=c(2:end)+d;
c(1:end-2)=dd;c(2:end-1)=c(2:end-1)+dd;c(3:end)=c(3:end)+dd;
|
github
|
lcnhappe/happe-master
|
ksstat.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/diag/ksstat.m
| 3,201 |
utf_8
|
cb6899b300a0a54054487e9374ea9e89
|
function [snks, snkss] = ksstat(varargin)
%KSSTAT Kolmogorov-Smirnov statistics
%
% ks = KSSTAT(X) or
% ks = KSSTAT(X1,X2,...,XJ)
% returns Kolmogorov-Smirnov statistics in form sqrt(N)*K
% where M is number of samples. X is a NxMxJ matrix which
% contains J MCMC simulations of length N, each with
% dimension M. MCMC-simulations can be given as separate
% arguments X1,X2,... which should have the same length.
%
% When comparing more than two simulations, maximum of all the
% comparisons for each dimension is returned (Brooks et al,
% 2003).
%
% Function returns ks-values in vector KS of length M.
% An approximation of the 95% quantile for the limiting
% distribution of sqrt(N)*K with M>=100 is 1.36. ks-values
% can be compared against this value (Robert & Casella, 2004).
% In case of comparing several chains, maximum of all the
% comparisons can be compared to simulated distribution of
% the maximum of all comparisons obtained using independent
% random random numbers (e.g. using randn(size(X))).
%
% If only one simulation is given, the factor is calculated
% between first and last third of the chain.
%
% Note that for this test samples have to be approximately
% independent. Use thinning or batching for Markov chains.
%
% Example:
% How to estimate the limiting value when comparing several
% chains stored in (thinned) variable R. Simulate 100 times
% independent samples. kss contains then 100 simulations of the
% maximum of all the comparisons for independent samples.
% Compare actual value for R to 95%-percentile of kss.
%
% kss=zeros(1,100);
% for i1=1:100
% kss(i1)=ksstat(randn(size(R)));
% end
% ks95=prctile(kss,95);
%
% References:
% Robert, C. P, and Casella, G. (2004) Monte Carlo Statistical
% Methods. Springer. p. 468-470.
% Brooks, S. P., Giudici, P., and Philippe, A. (2003)
% "Nonparametric Convergence Assessment for MCMC Model
% Selection". Journal of Computational & Graphical Statistics,
% 12(1):1-22.
%
% See also
% PSRF, GEYER_IMSE, THIN
% Copyright (C) 2001-2005 Aki Vehtari
%
% This software is distributed under the GNU General Public
% Licence (version 3 or later); please refer to the file
% Licence.txt, included with the software, for details.
% In case of one argument split to two halves (first and last thirds)
if nargin==1
X = varargin{1};
if size(X,3)==1
n = floor(size(X,1)/3);
x = zeros([n size(X,2) 2]);
x(:,:,1) = X(1:n,:);
x(:,:,2) = X((end-n+1):end,:);
X = x;
end
else
X = zeros([size(varargin{1}) nargin]);
for i1=1:nargin
X(:,:,i1) = varargin{i1};
end
end
if (size(X,1)<1)
error('X has zero rows');
end
[n1,n2,n3]=size(X);
%if n1<=100
% warning('Too few samples for reliable analysis');
%end
P = zeros(1,n2);
snkss=zeros(sum(1:(n3-1)),1);
for j1=1:size(X,2)
ii=0;
for i1=1:n3-1
for i2=i1+1:n3
ii=ii+1;
snkss(ii,j1)=ksc(X(:,j1,i1),X(:,j1,i2));
end
end
snks(j1)=max(snkss(:,j1));
end
function snks = ksc(x1,x2)
n=numel(x1);
edg=sort([x1; x2]);
c1=histc(x1,edg);
c2=histc(x2,edg);
K=max(abs(cumsum(c1)-cumsum(c2))/n);
snks=sqrt(n)*K;
|
github
|
lcnhappe/happe-master
|
hmc2.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/mc/hmc2.m
| 9,845 |
utf_8
|
4a06c9a759a73a727cd280c3d43cf40f
|
function [samples, energies, diagn] = hmc2(f, x, opt, gradf, varargin)
%HMC2 Hybrid Monte Carlo sampling.
%
% Description
% SAMPLES = HMC2(F, X, OPTIONS, GRADF) uses a hybrid Monte Carlo
% algorithm to sample from the distribution P ~ EXP(-F), where F is the
% first argument to HMC2. The Markov chain starts at the point X, and
% the function GRADF is the gradient of the `energy' function F.
%
% HMC2(F, X, OPTIONS, GRADF, P1, P2, ...) allows additional arguments to
% be passed to F() and GRADF().
%
% [SAMPLES, ENERGIES, DIAGN] = HMC2(F, X, OPTIONS, GRADF) also returns a
% log of the energy values (i.e. negative log probabilities) for the
% samples in ENERGIES and DIAGN, a structure containing diagnostic
% information (position, momentum and acceptance threshold) for each
% step of the chain in DIAGN.POS, DIAGN.MOM and DIAGN.ACC respectively.
% All candidate states (including rejected ones) are stored in
% DIAGN.POS. The DIAGN structure contains fields:
%
% pos
% the position vectors of the dynamic process
% mom
% the momentum vectors of the dynamic process
% acc
% the acceptance thresholds
% rej
% the number of rejections
% stp
% the step size vectors
%
% S = HMC2('STATE') returns a state structure that contains
% the used random stream and its state and the momentum of
% the dynamic process. These are contained in fields stream,
% streamstate and mom respectively. The momentum state is
% only used for a persistent momentum update.
%
% HMC2('STATE', S) resets the state to S. If S is an integer,
% then it is used as a seed for the random stream and the
% momentum variable is randomised. If S is a structure
% returned by HMC2('STATE') then it resets the random stream
% to exactly the same state.
%
% See HMC2_OPT for the optional parameters in the OPTIONS structure.
%
% See also
% HMC2_OPT, METROP
%
% Copyright (c) 1996-1998 Christopher M Bishop, Ian T Nabney
% Copyright (c) 1998-2000,2010 Aki Vehtari
% The portion of the HMC algorithm involving "windows" is derived
% from the C code for this function included in the Software for
% Flexible Bayesian Modeling written by Radford Neal
% <http://www.cs.toronto.edu/~radford/fbm.software.html>.
% Global variable to store state of momentum variables: set by set_state
% Used to initialize variable if set
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
global HMC_MOM
%Return state or set state to given state.
if nargin <= 2
if ~strcmp(f, 'state')
error('Unknown argument to hmc2');
end
switch nargin
case 1
samples = get_state(f);
return;
case 2
set_state(f, x);
return;
end
end
% Set empty options to default values
opt=hmc2_opt(opt);
% Reference to structures is much slower, so...
opt_nsamples=opt.nsamples;
opt_window=opt.window;
opt_steps=opt.steps;
opt_display = opt.display;
opt_persistence = opt.persistence;
if opt_persistence
alpha = opt.decay;
salpha = sqrt(1-alpha*alpha);
end
% Stepsizes, varargin gives the opt.stepsf arguments (net, x ,y)
% where x is input data and y is a target data.
if ~isempty(opt.stepsf)
epsilon = feval(opt.stepsf,varargin{:}).*opt.stepadj;
else
epsilon = opt.stepadj;
end
% Force x to be a row vector
x = x(:)';
nparams = length(x);
%Set up strings for evaluating potential function and its gradient.
f = fcnchk(f, length(varargin));
gradf = fcnchk(gradf, length(varargin));
% Check the gradient evaluation.
if (opt.checkgrad)
% Check gradients
feval('gradcheck', x, f, gradf, varargin{:});
end
% Matrix of returned samples.
samples = zeros(opt_nsamples, nparams);
% Return energies?
if nargout >= 2
en_save = 1;
energies = zeros(opt_nsamples, 1);
else
en_save = 0;
end
% Return diagnostics?
if nargout >= 3
diagnostics = 1;
diagn_pos = zeros(opt_nsamples, nparams);
diagn_mom = zeros(opt_nsamples, nparams);
diagn_acc = zeros(opt_nsamples, 1);
else
diagnostics = 0;
end
if (~opt_persistence | isempty(HMC_MOM) | nparams ~= length(HMC_MOM))
% Initialise momenta at random
p = randn(1, nparams);
else
% Initialise momenta from stored state
p = HMC_MOM;
end
% Evaluate starting energy.
E = feval(f, x, varargin{:});
% Main loop.
nreject = 0; %number of rejected samples
window_offset=0; %window offset initialised to zero
k = - opt.nomit + 1; %nomit samples are omitted, so we store
while k <= opt_nsamples %samples from k>0
% Store starting position and momenta
xold = x;
pold = p;
% Recalculate Hamiltonian as momenta have changed
Eold = E;
Hold = E + 0.5*(p*p');
% Decide on window offset, if windowed HMC is used
if opt_window>1
window_offset=fix(opt_window*rand(1));
end
have_rej = 0;
have_acc = 0;
n = window_offset;
dir = -1; %the default value for dir (=direction)
%assumes that windowing is used
while (dir==-1 | n~=opt_steps)
%if windowing is not used or we have allready taken
%window_offset steps backwards...
if (dir==-1 & n==0)
% Restore, next state should be original start state.
if window_offset > 0
x = xold;
p = pold;
n = window_offset;
end
%set dir for forward steps
E = Eold;
H = Hold;
dir = 1;
stps = dir;
else
if (n*dir+1<opt_window | n>(opt_steps-opt_window))
% State in the accept and/or reject window.
stps = dir;
else
% State not in the accept and/or reject window.
stps = opt_steps-2*(opt_window-1);
end
% First half-step of leapfrog.
p = p - dir*0.5*epsilon.*feval(gradf, x, varargin{:});
x = x + dir*epsilon.*p;
% Full leapfrog steps.
for m = 1:(abs(stps)-1)
p = p - dir*epsilon.*feval(gradf, x, varargin{:});
x = x + dir*epsilon.*p;
end
% Final half-step of leapfrog.
p = p - dir*0.5*epsilon.*feval(gradf, x, varargin{:});
E = feval(f, x, varargin{:});
H = E + 0.5*(p*p');
n=n+stps;
end
if (opt_window~=opt_steps+1 & n<opt_window)
% Account for state in reject window. Reject window can be
% ignored if windows consist of the entire trajectory.
if ~have_rej
rej_free_energy = H;
else
rej_free_energy = -addlogs(-rej_free_energy, -H);
end
if (~have_rej | rand(1) < exp(rej_free_energy-H));
E_rej=E;
x_rej=x;
p_rej=p;
have_rej = 1;
end
end
if (n>(opt_steps-opt_window))
% Account for state in the accept window.
if ~have_acc
acc_free_energy = H;
else
acc_free_energy = -addlogs(-acc_free_energy, -H);
end
if (~have_acc | rand(1) < exp(acc_free_energy-H))
E_acc=E;
x_acc=x;
p_acc=p;
have_acc = 1;
end
end
end
% Acceptance threshold.
a = exp(rej_free_energy - acc_free_energy);
if (diagnostics & k > 0)
diagn_pos(k,:) = x_acc;
diagn_mom(k,:) = p_acc;
diagn_acc(k,:) = a;
end
if (opt_display > 1)
fprintf(1, 'New position is\n');
disp(x);
end
% Take new state from the appropriate window.
if a > rand(1)
% Accept
E=E_acc;
x=x_acc;
p=-p_acc; % Reverse momenta
if (opt_display > 0)
fprintf(1, 'Finished step %4d Threshold: %g\n', k, a);
end
else
% Reject
if k > 0
nreject = nreject + 1;
end
E=E_rej;
x=x_rej;
p=p_rej;
if (opt_display > 0)
fprintf(1, ' Sample rejected %4d. Threshold: %g\n', k, a);
end
end
if k > 0
% Store sample
samples(k,:) = x;
if en_save
% Store energy
energies(k) = E;
end
end
% Set momenta for next iteration
if opt_persistence
% Reverse momenta
p = -p;
% Adjust momenta by a small random amount
p = alpha.*p + salpha.*randn(1, nparams);
else
% Replace all momenta
p = randn(1, nparams);
end
k = k + 1;
end
if (opt_display > 0)
fprintf(1, '\nFraction of samples rejected: %g\n', ...
nreject/(opt_nsamples));
end
% Store diagnostics
if diagnostics
diagn.pos = diagn_pos; %positions matrix
diagn.mom = diagn_mom; %momentum matrix
diagn.acc = diagn_acc; %acceptance treshold matrix
diagn.rej = nreject/(opt_nsamples); %rejection rate
diagn.stps = epsilon; %stepsize vector
end
% Store final momentum value in global so that it can be retrieved later
if opt_persistence
HMC_MOM = p;
else
HMC_MOM = [];
end
return
function state = get_state(f)
%GET_STATE Return complete state of sampler
% (including momentum)
global HMC_MOM
state.stream=setrandstream();
state.streamstate = state.stream.State;
state.mom = HMC_MOM;
return
function set_state(f, x)
%SET_STATE Set complete state of sample
%
% Description
% Set complete state of sampler (including momentum)
% or just set randn and rand with integer argument.
global HMC_MOM
if isnumeric(x)
setrandstream(x);
else
if ~isstruct(x)
error('Second argument to hmc must be number or state structure');
end
if (~isfield(x, 'stream') | ~isfield(x, 'streamstate') ...
| ~isfield(x, 'mom'))
error('Second argument to hmc must contain correct fields')
end
setrandstream(x.stream);
x.State=x.streamstate;
HMC_MOM = x.mom;
end
return
function c=addlogs(a,b)
%ADDLOGS(A,B) Add numbers represented by their logarithms.
%
% Description
% Add numbers represented by their logarithms.
% Computes log(exp(a)+exp(b)) in such a fashion that it
% works even when a and b have large magnitude.
if a>b
c = a + log(1+exp(b-a));
else
c = b + log(1+exp(a-b));
end
|
github
|
lcnhappe/happe-master
|
hmc_nuts.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/mc/hmc_nuts.m
| 10,716 |
utf_8
|
01197102ae8e357143d8a5cc213d39b4
|
function [samples, logp, diagn] = hmc_nuts(f, theta0, opt)
%HMC_NUTS No-U-Turn Sampler (NUTS)
%
% Description
% [SAMPLES, LOGP, DIAGN] = HMC_NUTS(f, theta0, opt)
% Implements the No-U-Turn Sampler (NUTS), specifically,
% algorithm 6 from the NUTS paper (Hoffman & Gelman, 2011). Runs
% opt.Madapt steps of burn-in, during which it adapts the step
% size parameter epsilon, then starts generating samples to
% return.
%
% f(theta) should be a function that returns the log probability its
% gradient evaluated at theta. I.e., you should be able to call
% [logp grad] = f(theta).
%
% opt.epsilon is a step size parameter.
% opt.M is the number of samples to generate.
% opt.Madapt is the number of steps of burn-in/how long to run
% the dual averaging algorithm to fit the step size
% epsilon. Note that there is no need to provide
% opt.epsilon if doing adaptation.
% opt.theta0 is a 1-by-D vector with the desired initial setting
% of the parameters.
% opt.delta should be between 0 and 1, and is a target HMC
% acceptance probability. Defaults to 0.8 if
% unspecified.
%
%
% The returned variable "samples" is an (M+Madapt)-by-D matrix
% of samples generated by NUTS, including burn-in samples.
%
% Note that when used from gp_mc, opt.M and opt.Madapt are both 0 or
% 1 (hmc_nuts returns only one sample to gp_mc). Number of epsilon
% adaptations should be set in hmc options structure hmc_opt.nadapt, in
% gp_mc(... ,'hmc_opt', hmc_opt).
%
% The returned structure diagn includes step-size vector
% epsilon, number of rejected samples and dual averaging
% parameters so its possible to continue adapting step-size
% parameter.
% Copyright (c) 2011, Matthew D. Hoffman
% Copyright (c) 2012, Ville Tolvanen
% All rights reserved.
%
% Redistribution and use in source and binary forms, with or
% without modification, are permitted provided that the following
% conditions are met:
%
% Redistributions of source code must retain the above copyright
% notice, this list of conditions and the following disclaimer.
%
% Redistributions in binary form must reproduce the above copyright
% notice, this list of conditions and the following disclaimer in
% the documentation and/or other materials provided with the
% distribution.
%
% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
% CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
% INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
% MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
% CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
% SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
% LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
% USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
% AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
% LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
% ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
% POSSIBILITY OF SUCH DAMAGE.
global nfevals;
nfevals = 0;
if ~isfield(opt, 'delta')
delta = 0.8;
else
delta = opt.delta;
end
if ~isfield(opt, 'M')
M = 1;
else
M = opt.M;
end
if ~isfield(opt, 'Madapt')
Madapt = 0;
else
Madapt = opt.Madapt;
end
diagn.rej = 0;
assert(size(theta0, 1) == 1);
D = length(theta0);
samples = zeros(M+Madapt, D);
[logp grad] = f(theta0);
samples(1, :) = theta0;
% Parameters to the dual averaging algorithm.
gamma = 0.05;
t0 = 10;
kappa = 0.75;
% Initialize dual averaging algorithm.
epsilonbar = 1;
Hbar = 0;
if isfield(opt, 'epsilon') && ~isempty(opt.epsilon)
epsilon = opt.epsilon(end);
% Hbar & epsilonbar are needed when doing adaptation of step-length
if isfield(opt, 'Hbar') && ~isempty(opt.Hbar)
Hbar = opt.Hbar;
end
if isfield(opt, 'epsilonbar') && ~isempty(opt.epsilonbar)
epsilonbar=opt.epsilonbar;
else
epsilonbar=opt.epsilon;
end
mu = log(10*opt.epsilon(1));
else
% Choose a reasonable first epsilon by a simple heuristic.
epsilon = find_reasonable_epsilon(theta0, grad, logp, f);
mu = log(10*epsilon);
opt.epsilon = epsilon;
opt.epsilonbar = epsilonbar;
opt.Hbar = Hbar;
end
for m = 2:M+Madapt+1,
% m
% Resample momenta.
r0 = randn(1, D);
% Joint log-probability of theta and momenta r.
joint = logp - 0.5 * (r0 * r0');
% Resample u ~ uniform([0, exp(joint)]).
% Equivalent to (log(u) - joint) ~ exponential(1).
logu = joint - exprnd(1);
% Initialize tree.
thetaminus = samples(m-1, :);
thetaplus = samples(m-1, :);
rminus = r0;
rplus = r0;
gradminus = grad;
gradplus = grad;
% Initial height j = 0.
j = 0;
% If all else fails, the next sample is the previous sample.
samples(m, :) = samples(m-1, :);
% Initially the only valid point is the initial point.
n = 1;
rej = 0;
% Main loop---keep going until the criterion s == 0.
s = 1;
while (s == 1)
% Choose a direction. -1=backwards, 1=forwards.
v = 2*(rand() < 0.5)-1;
% Double the size of the tree.
if (v == -1)
[thetaminus, rminus, gradminus, tmp, tmp, tmp, thetaprime, gradprime, logpprime, nprime, sprime, alpha, nalpha] = ...
build_tree(thetaminus, rminus, gradminus, logu, v, j, epsilon, f, joint);
else
[tmp, tmp, tmp, thetaplus, rplus, gradplus, thetaprime, gradprime, logpprime, nprime, sprime, alpha, nalpha] = ...
build_tree(thetaplus, rplus, gradplus, logu, v, j, epsilon, f, joint);
end
% Use Metropolis-Hastings to decide whether or not to move to a
% point from the half-tree we just generated.
if ((sprime == 1) && (rand() < nprime/n))
samples(m, :) = thetaprime;
logp = logpprime;
grad = gradprime;
else
rej = rej + 1;
end
% Update number of valid points we've seen.
n = n + nprime;
% Decide if it's time to stop.
s = sprime && stop_criterion(thetaminus, thetaplus, rminus, rplus);
% Increment depth.
j = j + 1;
end
% Do adaptation of epsilon if we're still doing burn-in.
eta = 1 / (length(opt.epsilon) + t0);
Hbar = (1 - eta) * Hbar + eta * (delta - alpha / nalpha);
if (m <= Madapt+1)
epsilon = exp(mu - sqrt(m-1)/gamma * Hbar);
eta = (length(opt.epsilon))^-kappa;
epsilonbar = exp((1 - eta) * log(epsilonbar) + eta * log(epsilon));
else
epsilon = epsilonbar;
end
opt.epsilon(end+1) = epsilon;
opt.epsilonbar = epsilonbar;
opt.Hbar = Hbar;
diagn.rej = diagn.rej + rej;
end
diagn.opt = opt;
end
function [thetaprime, rprime, gradprime, logpprime] = leapfrog(theta, r, grad, epsilon, f)
rprime = r + 0.5 * epsilon * grad;
thetaprime = theta + epsilon * rprime;
[logpprime, gradprime] = f(thetaprime);
rprime = rprime + 0.5 * epsilon * gradprime;
global nfevals;
nfevals = nfevals + 1;
end
function criterion = stop_criterion(thetaminus, thetaplus, rminus, rplus)
thetavec = thetaplus - thetaminus;
criterion = (thetavec * rminus' >= 0) && (thetavec * rplus' >= 0);
end
% The main recursion.
function [thetaminus, rminus, gradminus, thetaplus, rplus, gradplus, thetaprime, gradprime, logpprime, nprime, sprime, alphaprime, nalphaprime] = ...
build_tree(theta, r, grad, logu, v, j, epsilon, f, joint0)
if (j == 0)
% Base case: Take a single leapfrog step in the direction v.
[thetaprime, rprime, gradprime, logpprime] = leapfrog(theta, r, grad, v*epsilon, f);
joint = logpprime - 0.5 * (rprime * rprime');
% Is the new point in the slice?
nprime = logu < joint;
% Is the simulation wildly inaccurate?
sprime = logu - 1000 < joint;
% Set the return values---minus=plus for all things here, since the
% "tree" is of depth 0.
thetaminus = thetaprime;
thetaplus = thetaprime;
rminus = rprime;
rplus = rprime;
gradminus = gradprime;
gradplus = gradprime;
% Compute the acceptance probability.
alphaprime = min(1, exp(logpprime - 0.5 * (rprime * rprime') - joint0));
nalphaprime = 1;
else
% Recursion: Implicitly build the height j-1 left and right subtrees.
[thetaminus, rminus, gradminus, thetaplus, rplus, gradplus, thetaprime, gradprime, logpprime, nprime, sprime, alphaprime, nalphaprime] = ...
build_tree(theta, r, grad, logu, v, j-1, epsilon, f, joint0);
% No need to keep going if the stopping criteria were met in the first
% subtree.
if (sprime == 1)
if (v == -1)
[thetaminus, rminus, gradminus, tmp, tmp, tmp, thetaprime2, gradprime2, logpprime2, nprime2, sprime2, alphaprime2, nalphaprime2] = ...
build_tree(thetaminus, rminus, gradminus, logu, v, j-1, epsilon, f, joint0);
else
[tmp, tmp, tmp, thetaplus, rplus, gradplus, thetaprime2, gradprime2, logpprime2, nprime2, sprime2, alphaprime2, nalphaprime2] = ...
build_tree(thetaplus, rplus, gradplus, logu, v, j-1, epsilon, f, joint0);
end
% Choose which subtree to propagate a sample up from.
if (rand() < nprime2 / (nprime + nprime2))
thetaprime = thetaprime2;
gradprime = gradprime2;
logpprime = logpprime2;
end
% Update the number of valid points.
nprime = nprime + nprime2;
% Update the stopping criterion.
sprime = sprime && sprime2 && stop_criterion(thetaminus, thetaplus, rminus, rplus);
% Update the acceptance probability statistics.
alphaprime = alphaprime + alphaprime2;
nalphaprime = nalphaprime + nalphaprime2;
end
end
end
function epsilon = find_reasonable_epsilon(theta0, grad0, logp0, f)
epsilon = 0.1;
r0 = randn(1, length(theta0));
% Figure out what direction we should be moving epsilon.
[tmp, rprime, tmp, logpprime] = leapfrog(theta0, r0, grad0, epsilon, f);
acceptprob = exp(logpprime - logp0 - 0.5 * (rprime * rprime' - r0 * r0'));
% Here we presume that energy function returns NaN, if energy cannot be
% evaluated at the suggested hyperparameters so that we need smalled epsilon
if isnan(acceptprob)
acceptprob=0;
end
a = 2 * (acceptprob > 0.5) - 1;
% Keep moving epsilon in that direction until acceptprob crosses 0.5.
while (acceptprob^a > 2^(-a))
epsilon = epsilon * 2^a;
[tmp, rprime, tmp, logpprime] = leapfrog(theta0, r0, grad0, epsilon, f);
acceptprob = exp(logpprime - logp0 - 0.5 * (rprime * rprime' - r0 * r0'));
end
end
|
github
|
lcnhappe/happe-master
|
sls.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/mc/sls.m
| 18,996 |
utf_8
|
909e1e12015c1cde6f731f39015ae059
|
function [samples,energies,diagn] = sls(f, x, opt, gradf, varargin)
%SLS Markov Chain Monte Carlo sampling using Slice Sampling
%
% Description
% SAMPLES = SLS(F, X, OPTIONS) uses slice sampling to sample
% from the distribution P ~ EXP(-F), where F is the first
% argument to SLS. Markov chain starts from point X and the
% sampling from multivariate distribution is implemented by
% sampling each variable at a time either using overrelaxation
% or not. See SLS_OPT for details. A simple multivariate scheme
% using hyperrectangles is utilized when method is defined 'multi'.
%
% SAMPLES = SLS(F, X, OPTIONS, [], P1, P2, ...) allows additional
% arguments to be passed to F(). The fourth argument is ignored,
% but included for compatibility with HMC and the optimisers.
%
% [SAMPLES, ENERGIES, DIAGN] = SLS(F, X, OPTIONS) Returns some additional
% diagnostics for the values in SAMPLES and ENERGIES.
%
% See SLS_OPT for the optional parameters in the OPTIONS structure.
%
% See also
% METROP2, HMC2, SLS_OPT
% Based on "Slice Sampling" by Radford M. Neal in "The Annals of Statistics"
% 2003, Vol. 31, No. 3, 705-767, (c) Institute of Mathematical Statistics, 2003
% Thompson & Neal (2010) Covariance-Adaptive Slice Sampling. Technical
% Report No. 1002, Department of Statistic, University of Toronto
%
% Copyright (c) Toni Auranen, 2003-2006
% Copyright (c) Ville Tolvanen, 2012
% This software is distributed under the GNU General Public
% Licence (version 3 or later); please refer to the file
% Licence.txt, included with the software, for details.
% Version 1.01, 21/1/2004, TA
% Version 1.02, 28/1/2004, TA
% - fixed the limit-checks for stepping_out and doubling
% Version 1.02b, 26/2/2004, TA
% - changed some display-settings
% Version 1.03, 9/3/2004, TA
% - changed some display-settings
% - changed the variable initialization
% - optimized the number of fevals
% Version 1.04, 24/3/2004, TA
% - overrelaxation separately for each variable in multivariate case
% - added maxiter parameter for shrinkage
% - added some argument checks
% Version 1.04b, 29/3/2004, TA
% - minor fixes
% Version 1.05, 7/4/2004, TA
% - minor bug fixes
% - added nomit-option
% Version 1.06, 22/4/2004, TA
% - added unimodality shortcut for stepping-out and doubling
% - optimized the number of fevals in doubling
% Version 1.06b, 27/4/2004, TA
% - fixed some bugs
% Version 1.7, 15/3/2005, TA
% - added the hyperrectangle multivariate sampling
% Start timing and construct a function handle from the function name string
% (Timing is off, and function handles are left for the user)
%t = cputime;
%f = str2func(f);
% Set empty options to default values
opt = sls_opt(opt);
%if opt.display, disp(opt); end
if opt.display == 1
opt.display = 2; % verbose
elseif opt.display == 2
opt.display = 1; % all
end
% Forces x to be a row vector
x = x(:)';
% Set up some variables
nparams = length(x);
samples = zeros(opt.nsamples,nparams);
if nargout >= 2
save_energies = 1;
energies = zeros(opt.nsamples,1);
else
save_energies = 0;
end
if nargout >= 3
save_diagnostics = 1;
else
save_diagnostics = 0;
end
if nparams == 1
multivariate = 0;
if strcmp(opt.method,'multi')
opt.method = 'stepping';
end
end
if nparams > 1
multivariate = 1;
end
rej = 0;
rej_step = 0;
rej_old = 0;
x_0 = x;
umodal = opt.unimodal;
nomit = opt.nomit;
nsamples = opt.nsamples;
display_info = opt.display;
method = opt.method;
overrelaxation = opt.overrelaxation;
overrelaxation_info = ~isempty(find(overrelaxation));
w = opt.wsize;
maxiter = opt.maxiter;
m = opt.mlimit;
p = opt.plimit;
a = opt.alimit;
mmin = opt.mmlimits(1,:);
mmax = opt.mmlimits(2,:);
if multivariate
if length(w) == 1
w = w.*ones(1,nparams);
end
if length(m) == 1
m = m.*ones(1,nparams);
end
if length(p) == 1
p = p.*ones(1,nparams);
end
if length(overrelaxation) == 1
overrelaxation = overrelaxation.*ones(1,nparams);
end
if length(a) == 1
a = a.*ones(1,nparams);
end
if length(mmin) == 1
mmin = mmin.*ones(1,nparams);
end
if length(mmax) == 1
mmax = mmax.*ones(1,nparams);
end
end
if overrelaxation_info
nparams_or = length(find(overrelaxation));
end
if ~isempty(find(w<=0))
error('Parameter ''wsize'' must be positive.');
end
if (strcmp(method,'stepping') || strcmp(method,'doubling')) && isempty(find(mmax-mmin>2*w))
error('Check parameter ''mmlimits''. The interval is too small in comparison to parameter ''wsize''.');
end
if strcmp(method,'stepping') && ~isempty(find(m<1))
error('Parameter ''mlimit'' must be >0.');
end
if overrelaxation_info && ~isempty(find(a<1))
error('Parameter ''alimit'' must be >0.');
end
if strcmp(method,'doubling') && ~isempty(find(p<1))
error('Parameter ''plimit'' must be >0.');
end
ind_umodal = 0;
j = 0;
y_new = -f(x_0,varargin{:});
% The main loop of slice sampling
for i = 1-nomit:1:nsamples
switch method
% Slice covariance matching from Thompson & Neal (2010)
case 'covmatch'
theta = 1;
np = length(x_0);
M = y_new;
ee = exprnd(1);
ytilde0 = M-ee;
x_0 = x_0';
sigma = opt.sigma;
R = 1./sigma*eye(np);
F = R;
cbarstar = 0;
while 1
z = mvnrnd(zeros(1,np), eye(np))';
c = x_0 + F\z;
cbarstar = cbarstar + F'*(F*c);
cbar = R\(R'\cbarstar);
z = mvnrnd(zeros(1,np), eye(np))';
x_prop = cbar + R\z;
y_new = -f(x_prop',varargin{:});
if y_new > ytilde0
% Accept proposal
break;
end
G = -gradf(x_prop', varargin{:})';
gr = G/norm(G);
delta = norm(x_prop - c);
u = x_prop + delta*gr;
lu = -f(u', varargin{:});
kappa = -2/delta^2*(lu-y_new-delta*norm(G));
lxu = 0.5*norm(G)^2/kappa + y_new;
M = max(M, lxu);
sigma2 = 2/3*(M-ytilde0)/kappa;
alpha = max(0, 1/sigma2 - (1+theta)*gr'*(R'*(R*gr)));
F = chol(theta*(R'*R) + alpha*(gr*gr'));
R = chol((1+theta)*(R'*R) + alpha*(gr*gr'));
end
% Save sampling step and set up the new 'old' sample
x_0 = x_prop;
if i > 0
samples(i,:) = x_prop;
end
% Save energies
if save_energies && i > 0
energies(i) = -y_new;
end
% Shrinking-Rank method from Thompson & Neal (2010)
case 'shrnk'
ytr = -log(rand) - y_new;
k = 0;
sigma(1) = opt.sigma;
J = [];
np = length(x_0);
while 1
k = k+1;
c(k,:) = P(J, mvnrnd(x_0, sigma(k).^2*eye(np)));
sigma2 = 1./(sum(1./sigma.^2));
mu = sigma2*(sum(bsxfun(@times, 1./sigma'.^2, bsxfun(@minus, c,x_0)),1));
x_prop = x_0 + P(J, mvnrnd(mu, sqrt(sigma2)*eye(np)));
y_new = f(x_prop, varargin{:});
if y_new < ytr
% Accept proposal
break;
end
gradient = gradf(x_prop, varargin{:});
gstar = P(J, gradient);
if size(J,2) < size(x,2)-1 && gstar*gradient'/(norm(gstar)*norm(gradient)) > cos(pi/3)
J = [J gstar'/norm(gstar)];
sigma(k+1) = sigma(k);
else
sigma(k+1) = 0.95*sigma(k);
end
end
% Save sampling step and set up the new 'old' sample
x_0 = x_prop;
if i > 0
samples(i,:) = x_prop;
end
% Save energies
if save_energies && i > 0
energies(i) = y_new;
end
% Multivariate rectangle sampling step
case 'multi'
x_new = x_0;
y = y_new + log(rand(1));
if isinf(y)
x_new = mmin + (mmax-mmin).*rand(1,length(x_new));
y_new = -f(x_new,varargin{:});
else
L = max(x_0 - w.*rand(1,length(x_0)),mmin);
R = min(L + w,mmax);
x_new = L + rand(1,length(x_new)).*(R-L);
y_new = -f(x_new,varargin{:});
while y >= y_new
L(x_new < x_0) = x_new(x_new < x_0);
R(x_new >= x_0) = x_new(x_new >= x_0);
x_new = L + rand(1,length(x_new)).*(R-L);
y_new = -f(x_new,varargin{:});
end % while
end % isinf(y)
% Save sampling step and set up the new 'old' sample
x_0 = x_new;
if i > 0
samples(i,:) = x_new;
end
% Save energies
if save_energies && i > 0
energies(i) = -y_new;
end
% Display energy information
if display_info == 1
fprintf('Finished multi-step %4d Energy: %g\n',i,-y_new);
end
case 'multimm'
x_new = x_0;
y = y_new + log(rand(1));
if isinf(y)
x_new = mmin + (mmax-mmin).*rand(1,length(x_new));
y_new = -f(x_new,varargin{:});
else
L = mmin;
R = mmax;
x_new = L + rand(1,length(x_new)).*(R-L);
y_new = -f(x_new,varargin{:});
while y >= y_new
L(x_new < x_0) = x_new(x_new < x_0);
R(x_new >= x_0) = x_new(x_new >= x_0);
x_new = L + rand(1,length(x_new)).*(R-L);
y_new = -f(x_new,varargin{:});
end % while
end % isinf(y)
% Save sampling step and set up the new 'old' sample
x_0 = x_new;
if i > 0
samples(i,:) = x_new;
end
% Save energies
if save_energies && i > 0
energies(i) = -y_new;
end
% Display energy information
if display_info == 1
fprintf('Finished multimm-step %4d Energy: %g\n',i,-y_new);
end
% Other sampling steps
otherwise
ind_umodal = ind_umodal + 1;
x_new = x_0;
for j = 1:nparams
y = y_new + log(rand(1));
if isinf(y)
x_new(j) = mmin(j) + (mmax(j)-mmin(j)).*rand;
y_new = -f(x_new,varargin{:});
else
L = x_new;
R = x_new;
switch method
case 'stepping'
[L, R] = stepping_out(f,y,x_new,L,R,w,m,j,mmin,mmax,display_info,umodal,varargin{:});
case 'doubling'
[L, R] = doubling(f,y,x_new,L,R,w,p,j,mmin,mmax,display_info,umodal,varargin{:});
case 'minmax'
L(j) = mmin(j);
R(j) = mmax(j);
end % switch
if overrelaxation(j)
[x_new, y_new, rej_step, rej_old] = bisection(f,y,x_new,L,R,w,a,rej_step,j,umodal,varargin{:});
else
[x_new, y_new] = shrinkage(f,y,x_new,w,L,R,method,j,maxiter,umodal,varargin{:});
end % if overrelaxation
if umodal % adjust the slice if the distribution is known to be unimodal
w(j) = (w(j)*ind_umodal + abs(x_0(j)-x_new(j)))/(ind_umodal+1);
end % if umodal
end % if isinf(y)
end % j:nparams
if overrelaxation_info & multivariate
rej = rej + rej_step/nparams_or;
elseif overrelaxation_info & ~multivariate
rej = rej + rej_step;
end
% Save sampling step and set up the new 'old' sample
x_0 = x_new;
if i > 0
samples(i,:) = x_new;
end
% Save energies
if save_energies && i > 0
energies(i) = -y_new;
end
% Display information and keep track of rejections (overrelaxation)
if display_info == 1
if ~multivariate && overrelaxation_info && rej_old
fprintf(' Sample %4d rejected (overrelaxation).\n',i);
rej_old = 0;
rej_step = 0;
elseif multivariate && overrelaxation_info
fprintf('Finished step %4d (RR: %1.1f, %d/%d) Energy: %g\n',i,100*rej_step/nparams_or,nparams_or,nparams,-y_new);
rej_step = 0;
rej_old = 0;
else
fprintf('Finished step %4d Energy: %g\n',i,-y_new);
end
else
rej_old = 0;
rej_step = 0;
end
end % switch
end % i:nsamples
% Save diagnostics
if save_diagnostics
diagn.opt = opt;
end
% Display rejection information after slice sampling is complete (overrelaxation)
if overrelaxation_info && nparams == 1 && display_info == 1
fprintf('\nRejected samples due to overrelaxation (percentage): %1.1f\n',100*rej/nsamples);
elseif overrelaxation_info && nparams > 1 && display_info == 1
fprintf('\nAverage rejections per step due to overrelaxation (percentage): %1.1f\n',100*rej/nsamples);
end
% Display the elapsed time
%if display_info == 1
% if (cputime-t)/60 < 4
% fprintf('\nElapsed cputime (seconds): %1.1f\n\n',cputime-t);
% else
% fprintf('\nElapsed cputime (minutes): %1.1f\n\n',(cputime-t)/60);
% end
%end
%disp(w);
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
function [x_new, y_new, rej, rej_old] = bisection(f,y,x_0,L,R,w,a,rej,j,um,varargin);
%function [x_new, y_new, rej, rej_old] = bisection(f,y,x_0,L,R,w,a,rej,j,um,varargin);
%
% Bisection for overrelaxation (stepping-out needs to be used)
x_new = x_0;
M = (L + R) / 2;
l = L;
r = R;
q = w(j);
s = a(j);
if (R(j) - L(j)) < 1.1*w(j)
while 1
M(j) = (l(j) + r(j))/2;
if s == 0 || y < -f(M,varargin{:})
break;
end
if x_0(j) > M(j)
l(j) = M(j);
else
r(j) = M(j);
end
s = s - 1;
q = q / 2;
end % while
end % if
ll = l;
rr = r;
while s > 0
s = s - 1;
q = q / 2;
tmp_ll = ll;
tmp_ll(j) = tmp_ll(j) + q;
tmp_rr = rr;
tmp_rr(j) = tmp_rr(j) - q;
if y >= -f((tmp_ll),varargin{:})
ll(j) = ll(j) + q;
end
if y >= -f((tmp_rr),varargin{:})
rr(j) = rr(j) - q;
end
end % while
x_new(j) = ll(j) + rr(j) - x_0(j);
y_new = -f(x_new,varargin{:});
if x_new(j) < l(j) || x_new(j) > r(j) || y >= y_new
x_new(j) = x_0(j);
rej = rej + 1;
rej_old = 1;
y_new = y;
else
rej_old = 0;
end
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
function [x_new, y_new] = shrinkage(f,y,x_0,w,L,R,method,j,maxiter,um,varargin);
%function [x_new, y_new] = shrinkage(f,y,x_0,w,L,R,method,j,maxiter,um,varargin);
%
% Shrinkage with acceptance-check for doubling scheme
% - acceptance-check is skipped if the distribution is defined
% to be unimodal by the user
iter = 0;
x_new = x_0;
l = L(j);
r = R(j);
while 1
x_new(j) = l + (r-l).*rand;
if strcmp(method,'doubling')
y_new = -f(x_new,varargin{:});
if y < y_new && (um || accept(f,y,x_0,x_new,w,L,R,j,varargin{:}))
break;
end
else
y_new = -f(x_new,varargin{:});
if y < y_new
break;
break;
end
end % if strcmp
if x_new(j) < x_0(j)
l = x_new(j);
else
r = x_new(j);
end % if
iter = iter + 1;
if iter > maxiter
fprintf('Maximum number (%d) of iterations reached for parameter %d during shrinkage.\n',maxiter,j);
if strcmp(method,'minmax')
error('Check function F, decrease the interval ''mmlimits'' or increase the value of ''maxiter''.');
else
error('Check function F or increase the value of ''maxiter''.');
end
end
end % while
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
function [L,R] = stepping_out(f,y,x_0,L,R,w,m,j,mmin,mmax,di,um,varargin);
%function [L,R] = stepping_out(f,y,x_0,L,R,w,m,j,mmin,mmax,di,um,varargin);
%
% Stepping-out procedure
if um % if the user defines the distribution to be unimodal
L(j) = x_0(j) - w(j).*rand;
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
end
R(j) = L(j) + w(j);
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
end
while y < -f(L,varargin{:})
L(j) = L(j) - w(j);
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
break;
end
end
while y < -f(R,varargin{:})
R(j) = R(j) + w(j);
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
break;
end
end
else % if the distribution is not defined to be unimodal
L(j) = x_0(j) - w(j).*rand;
J = floor(m(j).*rand);
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
J = 0;
end
R(j) = L(j) + w(j);
K = (m(j)-1) - J;
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
K = 0;
end
while J > 0 && y < -f(L,varargin{:})
L(j) = L(j) - w(j);
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
break;
end
J = J - 1;
end
while K > 0 && y < -f(R,varargin{:})
R(j) = R(j) + w(j);
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
break;
end
K = K - 1;
end
end
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
function [L,R] = doubling(f,y,x_0,L,R,w,p,j,mmin,mmax,di,um,varargin);
%function [L,R] = doubling(f,y,x_0,L,R,w,p,j,mmin,mmax,di,um,varargin);
%
% Doubling scheme for slice sampling
if um % if the user defines the distribution to be unimodal
L(j) = x_0(j) - w(j).*rand;
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
Ao = 1;
else
Ao = 0;
end
R(j) = L(j) + w(j);
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
Bo = 1;
else
Bo = 0;
end
AL = -f(L,varargin{:});
AR = -f(R,varargin{:});
while (Ao == 0 && y < AL) || (Bo == 0 && y < AR)
if rand < 1/2
L(j) = L(j) - (R(j)-L(j));
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
Ao = 1;
else
Ao = 0;
end
AL = -f(L,varargin{:});
else
R(j) = R(j) + (R(j)-L(j));
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
Bo = 1;
else
Bo = 0;
end
AR = -f(R,varargin{:});
end
end % while
else % if the distribution is not defined to be unimodal
L(j) = x_0(j) - w(j).*rand;
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
end
R(j) = L(j) + w(j);
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
end
K = p(j);
AL = -f(L,varargin{:});
AR = -f(R,varargin{:});
while K > 0 && (y < AL || y < AR)
if rand < 1/2
L(j) = L(j) - (R(j)-L(j));
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
end
AL = -f(L,varargin{:});
else
R(j) = R(j) + (R(j)-L(j));
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
end
AR = -f(R,varargin{:});
end
K = K - 1;
end % while
end
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
function out = accept(f,y,x_0,x_new,w,L,R,j,varargin)
%function out = accept(f,y,x_0,x_new,w,L,R,j,varargin)
%
% Acceptance check for doubling scheme
out = [];
l = L;
r = R;
d = 0;
while r(j)-l(j) > 1.1*w(j)
m = (l(j)+r(j))/2;
if (x_0(j) < m && x_new(j) >= m) || (x_0(j) >= m && x_new(j) < m)
d = 1;
end
if x_new(j) < m
r(j) = m;
else
l(j) = m;
end
if d && y >= -f(l,varargin{:}) && y >= -f(r,varargin{:})
out = 0;
break;
end
end % while
if isempty(out)
out = 1;
end;
function p = P(J,v)
if size(J,2) ~= 0
p = v' - J*(J'*v');
p = p';
else
p = v;
end
|
github
|
lcnhappe/happe-master
|
metrop2.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/mc/metrop2.m
| 4,703 |
utf_8
|
08184b230c18c07d461096023d9c021a
|
function [samples, energies, diagn] = metrop2(f, x, opt, gradf, varargin)
%METROP2 Markov Chain Monte Carlo sampling with Metropolis algorithm.
%
% Description
% SAMPLES = METROP(F, X, OPT) uses the Metropolis algorithm to
% sample from the distribution P ~ EXP(-F), where F is the first
% argument to METROP. The Markov chain starts at the point X and each
% candidate state is picked from a Gaussian proposal distribution and
% accepted or rejected according to the Metropolis criterion.
%
% SAMPLES = METROP(F, X, OPT, [], P1, P2, ...) allows additional
% arguments to be passed to F(). The fourth argument is ignored, but
% is included for compatibility with HMC and the optimizers.
%
% [SAMPLES, ENERGIES, DIAGN] = METROP(F, X, OPT) also returns a log
% of the energy values (i.e. negative log probabilities) for the
% samples in ENERGIES and DIAGN, a structure containing diagnostic
% information (position and acceptance threshold) for each step of the
% chain in DIAGN.POS and DIAGN.ACC respectively. All candidate states
% (including rejected ones) are stored in DIAGN.POS.
%
% S = METROP('STATE') returns a state structure that contains the state
% of the two random number generators RAND and RANDN. These are
% contained in fields randstate, randnstate.
%
% METROP('STATE', S) resets the state to S. If S is an integer, then
% it is passed to RAND and RANDN. If S is a structure returned by
% METROP('STATE') then it resets the generator to exactly the same
% state.
%
% See METROP2_OPT for the optional parameters in the OPTIONS
% structure.
%
% See also
% HMC, METROP2_OPT
%
% Copyright (c) Christopher M Bishop, Ian T Nabney (1996, 1997)
% Copyright (c) 1998-2000 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
% Global variable to store state of momentum variables: set by set_state
% Used to initialize variable if set
global HMC_MOM
if nargin <= 2
if ~strcmp(f, 'state')
error('Unknown argument to metrop2');
end
switch nargin
case 1
samples = get_state(f);
return;
case 2
set_state(f, x);
return;
end
end
% Set empty omptions to default values
opt=metrop2_opt(opt);
% Refrence to structures is much slower, so...
opt_nsamples=opt.nsamples;
opt_display =opt.display;
stddev = opt.stddev;
% Set up string for evaluating potential function.
%f = fcnchk(f, length(varargin));
nparams = length(x);
samples = zeros(opt_nsamples, nparams); % Matrix of returned samples.
if nargout >= 2
en_save = 1;
energies = zeros(opt_nsamples, 1);
else
en_save = 0;
end
if nargout >= 3
diagnostics = 1;
diagn_pos = zeros(opt_nsamples, nparams);
diagn_acc = zeros(opt_nsamples, 1);
else
diagnostics = 0;
end
% Main loop.
k = - opt.nomit + 1;
Eold = f(x, varargin{:}); % Evaluate starting energy.
nreject = 0; % Initialise count of rejected states.
while k <= opt_nsamples
xold = x;
% Sample a new point from the proposal distribution
x = xold + randn(1, nparams)*stddev;
% Now apply Metropolis algorithm.
Enew = f(x, varargin{:}); % Evaluate new energy.
a = exp(Eold - Enew); % Acceptance threshold.
if (diagnostics & k > 0)
diagn_pos(k,:) = x;
diagn_acc(k,:) = a;
end
if (opt_display > 1)
fprintf(1, 'New position is\n');
disp(x);
end
if a > rand(1) % Accept the new state.
Eold = Enew;
if (opt_display > 0)
fprintf(1, 'Finished step %4d Threshold: %g\n', k, a);
end
else % Reject the new state
if k > 0
nreject = nreject + 1;
end
x = xold; % Reset position
if (opt_display > 0)
fprintf(1, ' Sample rejected %4d. Threshold: %g\n', k, a);
end
end
if k > 0
samples(k,:) = x; % Store sample.
if en_save
energies(k) = Eold; % Store energy.
end
end
k = k + 1;
end
if (opt_display > 0)
fprintf(1, '\nFraction of samples rejected: %g\n', ...
nreject/(opt_nsamples));
end
if diagnostics
diagn.pos = diagn_pos;
diagn.acc = diagn_acc;
end
% Return complete state of the sampler.
function state = get_state(f)
state.randstate = rand('state');
state.randnstate = randn('state');
return
% Set state of sampler, either from full state, or with an integer
function set_state(f, x)
if isnumeric(x)
rand('state', x);
randn('state', x);
else
if ~isstruct(x)
error('Second argument to metrop must be number or state structure');
end
if (~isfield(x, 'randstate') | ~isfield(x, 'randnstate'))
error('Second argument to metrop must contain correct fields')
end
rand('state', x.randstate);
randn('state', x.randnstate);
end
return
|
github
|
lcnhappe/happe-master
|
gp_optim.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gp_optim.m
| 5,760 |
utf_8
|
853821530091611d1616a58b4951a2c7
|
function [gp, varargout] = gp_optim(gp, x, y, varargin)
%GP_OPTIM Optimize paramaters of a Gaussian process
%
% Description
% GP = GP_OPTIM(GP, X, Y, OPTIONS) optimises the parameters of a
% GP structure given matrix X of training inputs and vector
% Y of training targets.
%
% [GP, OUTPUT1, OUTPUT2, ...] = GP_OPTIM(GP, X, Y, OPTIONS)
% optionally returns outputs of the optimization function.
%
% OPTIONS is optional parameter-value pair
% z - optional observed quantity in triplet (x_i,y_i,z_i)
% Some likelihoods may use this. For example, in case of
% Poisson likelihood we have z_i=E_i, that is, expected
% value for ith case.
% optimf - function handle for an optimization function, which is
% assumed to have similar input and output arguments
% as usual fmin*-functions. Default is @fminscg.
% opt - options structure for the minimization function.
% Use optimset to set these options. By default options
% 'GradObj' is 'on', 'LargeScale' is 'off'.
% loss - 'e' to minimize the marginal posterior energy (default) or
% 'loo' to minimize the negative leave-one-out lpd
% 'kfcv' to minimize the negative k-fold-cv lpd
% 'waic' to minimize the WAIC loss
% only 'e' and 'loo' with Gaussian likelihood have gradients
% k - number of folds in kfcv
%
% See also
% GP_SET, GP_E, GP_G, GP_EG, FMINSCG, FMINLBFGS, OPTIMSET, DEMO_REGRESSION*
%
% Copyright (c) 2010-2012 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GP_OPTIM';
ip.addRequired('gp',@isstruct);
ip.addRequired('x', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addRequired('y', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addParamValue('z', [], @(x) isreal(x) && all(isfinite(x(:))))
ip.addParamValue('optimf', @fminscg, @(x) isa(x,'function_handle'))
ip.addParamValue('opt', [], @isstruct)
ip.addParamValue('loss', 'e', @(x) ismember(lower(x),{'e', 'loo', 'kfcv', 'waic' 'waic' 'waicv' 'waicg'}))
ip.addParamValue('k', 10, @(x) isreal(x) && isscalar(x) && isfinite(x) && x>0)
ip.parse(gp, x, y, varargin{:});
z=ip.Results.z;
optimf=ip.Results.optimf;
opt=ip.Results.opt;
loss=ip.Results.loss;
k=ip.Results.k;
if isempty(gp_pak(gp))
% nothing to optimize
return
end
switch lower(loss)
case 'e'
fh_eg=@(ww) gp_eg(ww, gp, x, y, 'z', z);
optdefault=struct('GradObj','on','LargeScale','off');
case 'loo'
fh_eg=@(ww) gp_looeg(ww, gp, x, y, 'z', z);
if isfield(gp.lik.fh,'trcov') || isequal(gp.latent_method, 'EP')
optdefault=struct('GradObj','on','LargeScale','off');
else
% Laplace-LOO does not have yet gradients
optdefault=struct('Algorithm','interior-point');
if ismember('optimf',ip.UsingDefaults)
optimf=@fmincon;
end
end
case 'kfcv'
% kfcv does not have yet gradients
fh_eg=@(ww) gp_kfcve(ww, gp, x, y, 'z', z, 'k', k);
optdefault=struct('Algorithm','interior-point');
if ismember('optimf',ip.UsingDefaults)
optimf=@fmincon;
end
case {'waic' 'waicv'}
% waic does not have yet gradients
fh_eg=@(ww) -gp_waic(gp_unpak(gp,ww), x, y, 'z', z);
optdefault=struct('Algorithm','interior-point');
if ismember('optimf',ip.UsingDefaults)
optimf=@fmincon;
end
case 'waicg'
% waic does not have yet gradients
fh_eg=@(ww) -gp_waic(gp_unpak(gp,ww), x, y, 'z', z, 'method', 'G');
optdefault=struct('Algorithm','interior-point');
if ismember('optimf',ip.UsingDefaults)
optimf=@fmincon;
end
end
opt=setOpt(optdefault,opt);
w=gp_pak(gp);
if isequal(lower(loss),'e') || (isequal(lower(loss),'loo')) && (isfield(gp.lik.fh,'trcov') || isequal(gp.latent_method, 'EP'))
switch nargout
case 6
[w,fval,exitflag,output,grad,hessian] = optimf(fh_eg, w, opt);
varargout={fval,exitflag,output,grad,hessian};
case 5
[w,fval,exitflag,output,grad] = optimf(fh_eg, w, opt);
varargout={fval,exitflag,output,grad};
case 4
[w,fval,exitflag,output] = optimf(fh_eg, w, opt);
varargout={fval,exitflag,output};
case 3
[w,fval,exitflag] = optimf(fh_eg, w, opt);
varargout={fval,exitflag};
case 2
[w,fval] = optimf(fh_eg, w, opt);
varargout={fval};
case 1
w = optimf(fh_eg, w, opt);
varargout={};
end
else
lb=repmat(-8,size(w));
ub=repmat(10,size(w));
switch nargout
case 6
[w,fval,exitflag,output,grad,hessian] = optimf(fh_eg, w, [], [], [], [], lb, ub, [], opt);
varargout={fval,exitflag,output,grad,hessian};
case 5
[w,fval,exitflag,output,grad] = optimf(fh_eg, w, [], [], [], [], lb, ub, [], opt);
varargout={fval,exitflag,output,grad};
case 4
[w,fval,exitflag,output] = optimf(fh_eg, w, [], [], [], [], lb, ub, [], opt);
varargout={fval,exitflag,output};
case 3
[w,fval,exitflag] = optimf(fh_eg, w, [], [], [], [], lb, ub, [], opt);
varargout={fval,exitflag};
case 2
[w,fval] = optimf(fh_eg, w, [], [], [], [], lb, ub, [], opt);
varargout={fval};
case 1
w = optimf(fh_eg, w, [], [], [], [], lb, ub, [], opt);
varargout={};
end
end
gp=gp_unpak(gp,w);
end
function opt=setOpt(optdefault, opt)
% Set default options
opttmp=optimset(optdefault,opt);
% Set some additional options for @fminscg
if isfield(opt,'lambda')
opttmp.lambda=opt.lambda;
end
if isfield(opt,'lambdalim')
opttmp.lambdalim=opt.lambdalim;
end
opt=opttmp;
end
|
github
|
lcnhappe/happe-master
|
gp_mc.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gp_mc.m
| 22,688 |
utf_8
|
42a2a7d17e9adb7a658a7b89b4390d19
|
function [record, gp, opt] = gp_mc(gp, x, y, varargin)
%GP_MC Markov chain Monte Carlo sampling for Gaussian process models
%
% Description
% [RECORD, GP, OPT] = GP_MC(GP, X, Y, OPTIONS) Takes the Gaussian
% process structure GP, inputs X and outputs Y. Returns record
% structure RECORD with parameter samples, the Gaussian process GP
% at current state of the sampler and an options structure OPT
% containing all the options in OPTIONS and information of the
% current state of the sampler (e.g. the random number seed)
%
% OPTIONS is optional parameter-value pair
% z - Optional observed quantity in triplet (x_i,y_i,z_i).
% Some likelihoods may use this. For example, in
% case of Poisson likelihood we have z_i=E_i,
% that is, expected value for ith case.
% repeat - Number of iterations between successive sample saves
% (that is every repeat'th sample is stored). Default 1.
% nsamples - Number of samples to be returned
% display - Defines if sampling information is printed, 1=yes, 0=no.
% Default 1. If >1, only every nth iteration is displayed.
% hmc_opt - Options structure for HMC sampler (see hmc2_opt).
% When this is given the covariance function and
% likelihood parameters are sampled with hmc2
% (respecting infer_params option). If optional
% argument hmc_opt.nuts = 1, No-U-Turn HMC is used
% instead. With NUTS, only mandatory parameter is
% number of adaptation steps hmc_opt.nadapt of step-size
% parameter. For additional info, see hmc_nuts.
% sls_opt - Options structure for slice sampler (see sls_opt).
% When this is given the covariance function and
% likelihood parameters are sampled with sls
% (respecting infer_params option).
% latent_opt - Options structure for latent variable sampler. When this
% is given the latent variables are sampled with
% function stored in the gp.fh.mc field in the
% GP structure. See gp_set.
% lik_hmc_opt - Options structure for HMC sampler (see hmc2_opt).
% When this is given the parameters of the
% likelihood are sampled with hmc2. This can be
% used to have different hmc options for
% covariance and likelihood parameters.
% lik_sls_opt - Options structure for slice sampler (see sls_opt).
% When this is given the parameters of the
% likelihood are sampled with hmc2. This can be
% used to have different hmc options for
% covariance and likelihood parameters.
% lik_gibbs_opt
% - Options structure for Gibbs sampler. Some likelihood
% function parameters need to be sampled with
% Gibbs sampling (such as lik_smt). The Gibbs
% sampler is implemented in the respective lik_*
% file.
% persistence_reset
% - Reset the momentum parameter in HMC sampler after
% every repeat'th iteration, default 0.
% record - An old record structure from where the sampling is
% continued
%
% The GP_MC function makes nsamples*repeat iterations and stores
% every repeat'th sample. At each iteration it samples first the
% latent variables (if 'latent_opt' option is given), then the
% covariance and likelihood parameters (if 'hmc_opt', 'sls_opt'
% or 'gibbs_opt' option is given and respecting infer_params
% option), and for last the the likelihood parameters (if
% 'lik_hmc_opt' or 'lik_sls_opt' option is given).
%
% See also:
% DEMO_CLASSIFIC1, DEMO_ROBUSTREGRESSION
% Copyright (c) 1998-2000,2010 Aki Vehtari
% Copyright (c) 2007-2010 Jarno Vanhatalo
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
%#function gp_e gp_g
ip=inputParser;
ip.FunctionName = 'GP_MC';
ip.addRequired('gp',@isstruct);
ip.addRequired('x', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addRequired('y', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addParamValue('z', [], @(x) isreal(x) && all(isfinite(x(:))))
ip.addParamValue('nsamples', 1, @(x) isreal(x) && all(isfinite(x(:))))
ip.addParamValue('repeat', 1, @(x) isreal(x) && all(isfinite(x(:))))
ip.addParamValue('display', 1, @(x) isreal(x) && all(isfinite(x(:))))
ip.addParamValue('record',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('hmc_opt', [], @(x) isstruct(x) || isempty(x));
ip.addParamValue('sls_opt', [], @(x) isstruct(x) || isempty(x));
ip.addParamValue('ssls_opt', [], @(x) isstruct(x) || isempty(x));
ip.addParamValue('latent_opt', [], @(x) isstruct(x) || isempty(x));
ip.addParamValue('lik_hmc_opt', [], @(x) isstruct(x) || isempty(x));
ip.addParamValue('lik_sls_opt', [], @(x) isstruct(x) || isempty(x));
ip.addParamValue('lik_gibbs_opt', [], @(x) isstruct(x) || isempty(x));
ip.addParamValue('persistence_reset', 0, @(x) ~isempty(x) && isreal(x));
ip.parse(gp, x, y, varargin{:});
z=ip.Results.z;
opt.nsamples=ip.Results.nsamples;
opt.repeat=ip.Results.repeat;
opt.display=ip.Results.display;
record=ip.Results.record;
opt.hmc_opt = ip.Results.hmc_opt;
opt.ssls_opt = ip.Results.ssls_opt;
opt.sls_opt = ip.Results.sls_opt;
opt.latent_opt = ip.Results.latent_opt;
opt.lik_hmc_opt = ip.Results.lik_hmc_opt;
opt.lik_sls_opt = ip.Results.lik_sls_opt;
opt.lik_gibbs_opt = ip.Results.lik_gibbs_opt;
opt.persistence_reset = ip.Results.persistence_reset;
% if isfield(gp.lik, 'nondiagW');
% switch gp.lik.type
% case {'LGP', 'LGPC'}
% error('gp2_mc not implemented for this type of likelihood');
% case {'Softmax', 'Multinom'}
% [n,nout] = size(y);
% otherwise
% n = size(y,1);
% nout=length(gp.comp_cf);
% end
% end
% Default samplers and some checking
if isfield(gp,'latent_method') && isequal(gp.latent_method,'MCMC')
% If no options structures, use SSLS as a default sampler for hyperparameters
% and ESLS for latent values
if isempty(opt.hmc_opt) && isempty(opt.ssls_opt) && isempty(opt.sls_opt) && ...
isempty(opt.latent_opt) && isempty(opt.lik_hmc_opt) && isempty(opt.lik_sls_opt) && ...
isempty(opt.lik_gibbs_opt)
opt.ssls_opt.latent_opt.repeat = 20;
if opt.display>0
fprintf(' Using SSLS sampler for hyperparameters and ESLS for latent values\n')
end
end
% Set latent values
if (~isfield(gp,'latentValues') || isempty(gp.latentValues)) ...
&& ~isfield(gp.lik.fh,'trcov')
if (~isfield(gp.lik, 'nondiagW') || ismember(gp.lik.type, {'Softmax', 'Multinom', ...
'LGP', 'LGPC'}))
gp.latentValues=zeros(size(y));
else
if ~isfield(gp, 'comp_cf') || isempty(gp.comp_cf)
error('Define multiple covariance functions for latent processes using gp.comp_cf (see gp_set)');
end
if isfield(gp.lik,'xtime')
ntime = size(gp.lik.xtime,1);
gp.latentValues=zeros(size(y,1)+ntime,1);
else
gp.latentValues=zeros(size(y,1)*length(gp.comp_cf),1);
end
end
end
else
% latent method is not MCMC
% If no options structures, use SLS as a default sampler for parameters
if ~isempty(opt.ssls_opt)
warning('Latent method is not MCMC. ssls_opt ignored')
opt.ssls_opt=[];
end
if ~isempty(opt.latent_opt)
warning('Latent method is not MCMC. latent_opt ignored')
opt.latent_opt=[];
end
if isempty(opt.hmc_opt) && isempty(opt.sls_opt) && ...
isempty(opt.lik_hmc_opt) && isempty(opt.lik_sls_opt) && ...
isempty(opt.lik_gibbs_opt)
opt.sls_opt.nomit = 0;
opt.sls_opt.display = 0;
opt.sls_opt.method = 'minmax';
opt.sls_opt.wsize = 10;
opt.sls_opt.plimit = 5;
opt.sls_opt.unimodal = 0;
opt.sls_opt.mmlimits = [-10; 10];
if opt.display>0
if isfield(gp,'latent_method')
fprintf(' Using SLS sampler for hyperparameters and %s for latent values\n',gp.latent_method)
else
fprintf(' Using SLS sampler for hyperparameters\n')
end
end
end
end
% Initialize record
if isempty(record)
% No old record
record=recappend();
else
ri=size(record.etr,1);
end
% Set the states of samplers
if ~isempty(opt.latent_opt)
f=gp.latentValues;
if isfield(opt.latent_opt, 'rstate')
if ~isempty(opt.latent_opt.rstate)
latent_rstate = opt.latent_opt.rstate;
else
hmc2('state', sum(100*clock))
latent_rstate=hmc2('state');
end
else
hmc2('state', sum(100*clock))
latent_rstate=hmc2('state');
end
else
f=y;
end
if ~isempty(opt.hmc_opt)
if isfield(opt.hmc_opt, 'nuts') && opt.hmc_opt.nuts
% Number of step-size adapting stept in hmc_nuts
if ~isfield(opt.hmc_opt, 'nadapt')
opt.hmc_opt.nadapt = 20;
end
end
if isfield(opt.hmc_opt, 'rstate')
if ~isempty(opt.hmc_opt.rstate)
hmc_rstate = opt.hmc_opt.rstate;
else
hmc2('state', sum(100*clock))
hmc_rstate=hmc2('state');
end
else
hmc2('state', sum(100*clock))
hmc_rstate=hmc2('state');
end
end
if ~isempty(opt.ssls_opt)
f=gp.latentValues;
end
if ~isempty(opt.lik_hmc_opt)
if isfield(opt.lik_hmc_opt, 'rstate')
if ~isempty(opt.lik_hmc_opt.rstate)
lik_hmc_rstate = opt.lik_hmc_opt.rstate;
else
hmc2('state', sum(100*clock))
lik_hmc_rstate=hmc2('state');
end
else
hmc2('state', sum(100*clock))
lik_hmc_rstate=hmc2('state');
end
end
% Print labels for sampling information
if opt.display
fprintf(' cycle etr ');
if ~isempty(opt.hmc_opt)
fprintf('hrej ') % rejection rate of latent value sampling
end
if ~isempty(opt.sls_opt)
fprintf('slsrej ');
end
if ~isempty(opt.lik_hmc_opt)
fprintf('likel.rej ');
end
if ~isempty(opt.latent_opt)
if isequal(gp.fh.mc, @esls)
fprintf('lslsn') % No rejection rate for esls, print first accepted value
else
fprintf('lrej ') % rejection rate of latent value sampling
end
if isfield(opt.latent_opt, 'sample_latent_scale')
fprintf(' lvScale ')
end
end
fprintf('\n');
end
% --- Start sampling ------------
for k=1:opt.nsamples
if opt.persistence_reset
if ~isempty(opt.hmc_opt)
hmc_rstate.mom = [];
end
if ~isempty(opt.latent_opt)
if isfield(opt.latent_opt, 'rstate')
opt.latent_opt.rstate.mom = [];
end
end
if ~isempty(opt.lik_hmc_opt)
lik_hmc_rstate.mom = [];
end
end
hmcrej = 0;
lik_hmcrej = 0;
lrej=0;
indrej=0;
for l=1:opt.repeat
% --- Sample latent Values -------------
if ~isempty(opt.latent_opt)
[f, energ, diagnl] = gp.fh.mc(f, opt.latent_opt, gp, x, y, z);
gp.latentValues = f(:);
f = f(:);
if ~isequal(gp.fh.mc, @esls)
lrej=lrej+diagnl.rej/opt.repeat;
else
lrej = diagnl.rej;
end
if isfield(diagnl, 'opt')
opt.latent_opt = diagnl.opt;
end
end
% --- Sample parameters with HMC -------------
if ~isempty(opt.hmc_opt)
if isfield(opt.hmc_opt, 'nuts') && opt.hmc_opt.nuts
% Use NUTS hmc
w = gp_pak(gp);
lp = @(w) deal(-gpmc_e(w,gp,x,y,f,z), -gpmc_g(w,gp,x,y,f,z));
if k<opt.hmc_opt.nadapt
% Take one sample while adjusting step length
opt.hmc_opt.Madapt = 1;
opt.hmc_opt.M = 0;
else
% Take one sample without adjusting step length
opt.hmc_opt.Madapt = 0;
opt.hmc_opt.M = 1;
end
[w, energies, diagnh] = hmc_nuts(lp, w, opt.hmc_opt);
opt.hmc_opt = diagnh.opt;
hmcrej=hmcrej+diagnh.rej/opt.repeat;
w=w(end,:);
gp = gp_unpak(gp, w);
else
if isfield(opt.hmc_opt,'infer_params')
infer_params = gp.infer_params;
gp.infer_params = opt.hmc_opt.infer_params;
end
w = gp_pak(gp);
% Set the state
hmc2('state',hmc_rstate);
% sample (y is passed as z, to allow sampling of likelihood parameters)
[w, energies, diagnh] = hmc2(@gpmc_e, w, opt.hmc_opt, @gpmc_g, gp, x, y, f, z);
% Save the current state
hmc_rstate=hmc2('state');
hmcrej=hmcrej+diagnh.rej/opt.repeat;
if isfield(diagnh, 'opt')
opt.hmc_opt = diagnh.opt;
end
opt.hmc_opt.rstate = hmc_rstate;
w=w(end,:);
gp = gp_unpak(gp, w);
if isfield(opt.hmc_opt,'infer_params')
gp.infer_params = infer_params;
end
end
end
% --- Sample parameters with SLS -------------
if ~isempty(opt.sls_opt)
if isfield(opt.sls_opt,'infer_params')
infer_params = gp.infer_params;
gp.infer_params = opt.sls_opt.infer_params;
end
w = gp_pak(gp);
[w, energies, diagns] = sls(@gpmc_e, w, opt.sls_opt, @gpmc_g, gp, x, y, f, z);
if isfield(diagns, 'opt')
opt.sls_opt = diagns.opt;
end
w=w(end,:);
gp = gp_unpak(gp, w);
if isfield(opt.sls_opt,'infer_params')
gp.infer_params = infer_params;
end
end
% Sample parameters & latent values with SSLS
if ~isempty(opt.ssls_opt)
if isfield(opt.ssls_opt,'infer_params')
infer_params = gp.infer_params;
gp.infer_params = opt.sls_opt.infer_params;
end
w = gp_pak(gp);
[w, f, diagns] = surrogate_sls(f, w, opt.ssls_opt, gp, x, y, z);
gp.latentValues = f;
if isfield(diagns, 'opt')
opt.ssls_opt = diagns.opt;
end
w=w(end,:);
gp = gp_unpak(gp, w);
if isfield(opt.sls_opt,'infer_params')
gp.infer_params = infer_params;
end
end
% --- Sample the likelihood parameters with Gibbs -------------
if ~isempty(strfind(gp.infer_params, 'likelihood')) && ...
isfield(gp.lik,'gibbs') && isequal(gp.lik.gibbs,'on')
[gp.lik, f] = gp.lik.fh.gibbs(gp, gp.lik, x, f);
end
% --- Sample the likelihood parameters with HMC -------------
if ~isempty(strfind(gp.infer_params, 'likelihood')) && ...
~isempty(opt.lik_hmc_opt)
infer_params = gp.infer_params;
gp.infer_params = 'likelihood';
w = gp_pak(gp);
fe = @(w, lik) (-lik.fh.ll(feval(lik.fh.unpak,lik,w),y,f,z)-lik.fh.lp(feval(lik.fh.unpak,lik,w)));
fg = @(w, lik) (-lik.fh.llg(feval(lik.fh.unpak,lik,w),y,f,'param',z)-lik.fh.lpg(feval(lik.fh.unpak,lik,w)));
% Set the state
hmc2('state',lik_hmc_rstate);
[w, energies, diagnh] = hmc2(fe, w, opt.lik_hmc_opt, fg, gp.lik);
% Save the current state
lik_hmc_rstate=hmc2('state');
lik_hmcrej=lik_hmcrej+diagnh.rej/opt.repeat;
if isfield(diagnh, 'opt')
opt.lik_hmc_opt = diagnh.opt;
end
opt.lik_hmc_opt.rstate = lik_hmc_rstate;
w=w(end,:);
gp = gp_unpak(gp, w);
gp.infer_params = infer_params;
end
% --- Sample the likelihood parameters with SLS -------------
if ~isempty(strfind(gp.infer_params, 'likelihood')) && ...
~isempty(opt.lik_sls_opt)
w = gp_pak(gp, 'likelihood');
fe = @(w, lik) (-lik.fh.ll(feval(lik.fh.unpak,lik,w),y,f,z) -lik.fh.lp(feval(lik.fh.unpak,lik,w)));
[w, energies, diagns] = sls(fe, w, opt.lik_sls_opt, [], gp.lik);
if isfield(diagns, 'opt')
opt.lik_sls_opt = diagns.opt;
end
w=w(end,:);
gp = gp_unpak(gp, w, 'likelihood');
end
end % ----- for l=1:opt.repeat ---------
% --- Set record -------
ri=ri+1;
record=recappend(record);
% Display some statistics THIS COULD BE DONE NICER ALSO...
if opt.display && rem(ri,opt.display)==0
fprintf(' %4d %.3f ',ri, record.etr(ri,1));
if ~isempty(opt.hmc_opt)
fprintf(' %.1e ',record.hmcrejects(ri));
end
if ~isempty(opt.sls_opt)
fprintf('sls ');
end
if ~isempty(opt.lik_hmc_opt)
fprintf(' %.1e ',record.lik_hmcrejects(ri));
end
if ~isempty(opt.latent_opt)
fprintf('%.1e',record.lrejects(ri));
fprintf(' ');
if isfield(diagnl, 'lvs')
fprintf('%.6f', diagnl.lvs);
end
end
fprintf('\n');
end
end
%------------------------
function record = recappend(record)
% RECAPPEND - Record append
% Description
% RECORD = RECAPPEND(RECORD, RI, GP, P, T, PP, TT, REJS, U) takes
% old record RECORD, record index RI, training data P, target
% data T, test data PP, test target TT and rejections
% REJS. RECAPPEND returns a structure RECORD containing following
% record fields of:
ncf = length(gp.cf);
if nargin == 0 % Initialize record structure
record.type = gp.type;
record.lik = gp.lik;
if isfield(gp,'latent_method')
record.latent_method = gp.latent_method;
end
if isfield(gp, 'comp_cf')
record.comp_cf = gp.comp_cf;
end
% If sparse model is used save the information about which
switch gp.type
case 'FIC'
record.X_u = [];
case {'PIC' 'PIC_BLOCK'}
record.X_u = [];
record.tr_index = gp.tr_index;
case 'CS+FIC'
record.X_u = [];
otherwise
% Do nothing
end
if isfield(gp,'latentValues')
record.latentValues = [];
record.lrejects = 0;
end
record.jitterSigma2 = [];
if isfield(gp, 'site_tau')
record.site_tau = [];
record.site_nu = [];
record.Ef = [];
record.Varf = [];
record.p1 = [];
end
% Initialize the records of covariance functions
for i=1:ncf
cf = gp.cf{i};
record.cf{i} = cf.fh.recappend([], gp.cf{i});
% Initialize metric structure
if isfield(cf,'metric')
record.cf{i}.metric = cf.metric.fh.recappend(cf.metric, 1);
end
end
% Initialize the record for likelihood
lik = gp.lik;
record.lik = lik.fh.recappend([], gp.lik);
% Set the meanfunctions into record if they exist
if isfield(gp, 'meanf')
record.meanf = gp.meanf;
end
if isfield(gp, 'comp_cf')
record.comp_cf = gp.comp_cf;
end
if isfield(gp,'p')
record.p = gp.p;
end
if isfield(gp,'latent_method')
record.latent_method = gp.latent_method;
end
if isfield(gp,'latent_opt')
record.latent_opt = gp.latent_opt;
end
if isfield(gp,'fh')
record.fh=gp.fh;
end
record.infer_params = gp.infer_params;
record.e = [];
record.edata = [];
record.eprior = [];
record.etr = [];
record.hmcrejects = 0;
ri = 1;
lrej = 0;
indrej = 0;
hmcrej=0;
lik_hmcrej=0;
end
% Set the record for every covariance function
for i=1:ncf
gpcf = gp.cf{i};
record.cf{i} = gpcf.fh.recappend(record.cf{i}, ri, gpcf);
% Record metric structure
if isfield(gpcf,'metric')
record.cf{i}.metric = record.cf{i}.metric.fh.recappend(record.cf{i}.metric, ri, gpcf.metric);
end
end
% Set the record for likelihood
lik = gp.lik;
record.lik = lik.fh.recappend(record.lik, ri, lik);
% Set jitterSigma2 to record
if ~isempty(gp.jitterSigma2)
record.jitterSigma2(ri,:) = gp.jitterSigma2;
end
% Set the latent values to record structure
if isfield(gp, 'latentValues')
record.latentValues(ri,:)=gp.latentValues(:)';
end
% Set the inducing inputs in the record structure
switch gp.type
case {'FIC', 'PIC', 'PIC_BLOCK', 'CS+FIC'}
record.X_u(ri,:) = gp.X_u(:)';
end
% Record training error and rejects
if isfield(gp,'latentValues')
elik = gp.lik.fh.ll(gp.lik, y, gp.latentValues, z);
[record.e(ri,:),record.edata(ri,:),record.eprior(ri,:)] = gp_e(gp_pak(gp), gp, x, gp.latentValues);
record.etr(ri,:) = record.e(ri,:) - elik;
% Set rejects
record.lrejects(ri,1)=lrej;
else
[record.e(ri,:),record.edata(ri,:),record.eprior(ri,:)] = gp_e(gp_pak(gp), gp, x, y, 'z', z);
record.etr(ri,:) = record.e(ri,:);
end
if ~isempty(opt.hmc_opt)
record.hmcrejects(ri,1)=hmcrej;
end
if ~isempty(opt.lik_hmc_opt)
record.lik_hmcrejects(ri,1)=lik_hmcrej;
end
% If inputs are sampled set the record which are on at this moment
if isfield(gp,'inputii')
record.inputii(ri,:)=gp.inputii;
end
if isfield(gp, 'meanf')
nmf = numel(gp.meanf);
for i=1:nmf
gpmf = gp.meanf{i};
record.meanf{i} = gpmf.fh.recappend(record.meanf{i}, ri, gpmf);
end
end
end
function e = gpmc_e(w, gp, x, y, f, z)
e=0;
if ~isempty(strfind(gp.infer_params, 'covariance'))
e=e+gp_e(w, gp, x, f, 'z', z);
end
if ~isempty(strfind(gp.infer_params, 'likelihood')) ...
&& ~isfield(gp.lik.fh,'trcov') ...
&& isfield(gp.lik.fh,'lp') && ~isequal(y,f)
% Evaluate the contribution to the error from non-Gaussian likelihood
% if latent method is MCMC
gp=gp_unpak(gp,w);
lik=gp.lik;
e=e-lik.fh.ll(lik,y,f,z)-lik.fh.lp(lik);
end
end
function g = gpmc_g(w, gp, x, y, f, z)
g=[];
if ~isempty(strfind(gp.infer_params, 'covariance'))
g=[g gp_g(w, gp, x, f, 'z', z)];
end
if ~isempty(strfind(gp.infer_params, 'likelihood')) ...
&& ~isfield(gp.lik.fh,'trcov') ...
&& isfield(gp.lik.fh,'lp') && ~isequal(y,f)
% Evaluate the contribution to the gradient from non-Gaussian likelihood
% if latent method is not MCMC
gp=gp_unpak(gp,w);
lik=gp.lik;
g=[g -lik.fh.llg(lik,y,f,'param',z)-lik.fh.lpg(lik)];
end
end
end
|
github
|
lcnhappe/happe-master
|
gpla_e.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpla_e.m
| 90,674 |
UNKNOWN
|
7f5dc0edae06d00e07815ed9c70bd1e7
|
function [e, edata, eprior, f, L, a, La2, p] = gpla_e(w, gp, varargin)
%GPLA_E Do Laplace approximation and return marginal log posterior estimate
%
% Description
% E = GPLA_E(W, GP, X, Y, OPTIONS) takes a GP structure GP
% together with a matrix X of input vectors and a matrix Y of
% target vectors, and finds the Laplace approximation for the
% conditional posterior p(Y | X, th), where th is the
% parameters. Returns the energy at th (see below). Each
% row of X corresponds to one input vector and each row of Y
% corresponds to one target vector.
%
% [E, EDATA, EPRIOR] = GPLA_E(W, GP, X, Y, OPTIONS) returns also
% the data and prior components of the total energy.
%
% The energy is minus log posterior cost function for th:
% E = EDATA + EPRIOR
% = - log p(Y|X, th) - log p(th),
% where th represents the parameters (lengthScale,
% magnSigma2...), X is inputs and Y is observations.
%
% OPTIONS is optional parameter-value pair
% z - optional observed quantity in triplet (x_i,y_i,z_i)
% Some likelihoods may use this. For example, in case of
% Poisson likelihood we have z_i=E_i, that is, expected
% value for ith case.
%
% See also
% GP_SET, GP_E, GPLA_G, GPLA_PRED
%
% Description 2
% Additional properties meant only for internal use.
%
% GP = GPLA_E('init', GP) takes a GP structure GP and
% initializes required fields for the Laplace approximation.
%
% GP = GPLA_E('clearcache', GP) takes a GP structure GP and clears the
% internal cache stored in the nested function workspace.
%
% [e, edata, eprior, f, L, a, La2, p] = GPLA_E(w, gp, x, y, varargin)
% returns many useful quantities produced by EP algorithm.
%
% The Newton's method is implemented as described in Rasmussen
% and Williams (2006).
%
% The stabilized Newton's method is implemented as suggested by
% Hannes Nickisch (personal communication).
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% Copyright (c) 2010 Pasi Jyl�nki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
% parse inputs
ip=inputParser;
ip.FunctionName = 'GPLA_E';
ip.addRequired('w', @(x) ...
isempty(x) || ...
(ischar(x) && strcmp(w, 'init')) || ...
isvector(x) && isreal(x) && all(isfinite(x)) ...
|| all(isnan(x)));
ip.addRequired('gp',@isstruct);
ip.addOptional('x', @(x) isnumeric(x) && isreal(x) && all(isfinite(x(:))))
ip.addOptional('y', @(x) isnumeric(x) && isreal(x) && all(isfinite(x(:))))
ip.addParamValue('z', [], @(x) isnumeric(x) && isreal(x) && all(isfinite(x(:))))
ip.parse(w, gp, varargin{:});
x=ip.Results.x;
y=ip.Results.y;
z=ip.Results.z;
if strcmp(w, 'init')
% Initialize cache
ch = [];
% set function handle to the nested function laplace_algorithm
% this way each gp has its own peristent memory for EP
gp.fh.ne = @laplace_algorithm;
% set other function handles
gp.fh.e=@gpla_e;
gp.fh.g=@gpla_g;
gp.fh.pred=@gpla_pred;
gp.fh.jpred=@gpla_jpred;
gp.fh.looe=@gpla_looe;
gp.fh.loog=@gpla_loog;
gp.fh.loopred=@gpla_loopred;
e = gp;
% remove clutter from the nested workspace
clear w gp varargin ip x y z
elseif strcmp(w, 'clearcache')
% clear the cache
gp.fh.ne('clearcache');
else
% call laplace_algorithm using the function handle to the nested function
% this way each gp has its own peristent memory for Laplace
[e, edata, eprior, f, L, a, La2, p] = gp.fh.ne(w, gp, x, y, z);
end
function [e, edata, eprior, f, L, a, La2, p] = laplace_algorithm(w, gp, x, y, z)
if strcmp(w, 'clearcache')
ch=[];
return
end
% code for the Laplace algorithm
% check whether saved values can be used
if isempty(z)
datahash=hash_sha512([x y]);
else
datahash=hash_sha512([x y z]);
end
if ~isempty(ch) && all(size(w)==size(ch.w)) && all(abs(w-ch.w)<1e-8) && ...
isequal(datahash,ch.datahash)
% The covariance function parameters or data haven't changed so we
% can return the energy and the site parameters that are
% saved in the cache
e = ch.e;
edata = ch.edata;
eprior = ch.eprior;
f = ch.f;
L = ch.L;
La2 = ch.La2;
a = ch.a;
p = ch.p;
else
% The parameters or data have changed since
% the last call for gpla_e. In this case we need to
% re-evaluate the Laplace approximation
gp=gp_unpak(gp, w);
ncf = length(gp.cf);
n = size(x,1);
p = [];
maxiter = gp.latent_opt.maxiter;
tol = gp.latent_opt.tol;
% Initialize latent values
% zero seems to be a robust choice (Jarno)
% with mean functions, initialize to mean function values
if ~isfield(gp,'meanf')
f = zeros(size(y));
else
[H,b_m,B_m]=mean_prep(gp,x,[]);
f = H'*b_m;
end
% =================================================
% First Evaluate the data contribution to the error
switch gp.type
% ============================================================
% FULL
% ============================================================
case 'FULL'
if ~isfield(gp.lik, 'nondiagW')
K = gp_trcov(gp, x);
if isfield(gp,'meanf')
K=K+H'*B_m*H;
end
% If K is sparse, permute all the inputs so that evaluations are more efficient
if issparse(K) % Check if compact support covariance is used
p = analyze(K);
y = y(p);
K = K(p,p);
if ~isempty(z)
z = z(p,:);
end
end
switch gp.latent_opt.optim_method
% --------------------------------------------------------------------------------
% find the posterior mode of latent variables by Newton method
case 'newton'
a = f;
if isfield(gp,'meanf')
a = a-H'*b_m;
end
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
lp_new = gp.lik.fh.ll(gp.lik, y, f, z);
lp_old = -Inf;
if issparse(K)
speyen=speye(n);
end
iter=0;
while abs(lp_new - lp_old) > tol && iter < maxiter
iter = iter + 1;
lp_old = lp_new; a_old = a;
sW = sqrt(W);
if issparse(K)
sW = sparse(1:n, 1:n, sW, n, n);
[L,notpositivedefinite] = ldlchol(speyen+sW*K*sW );
else
%L = chol(eye(n)+sW*sW'.*K); % L'*L=B=eye(n)+sW*K*sW
L=bsxfun(@times,bsxfun(@times,sW,K),sW');
L(1:n+1:end)=L(1:n+1:end)+1;
[L, notpositivedefinite] = chol(L);
end
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
if ~isfield(gp,'meanf')
b = W.*f+dlp;
else
b = W.*f+K\(H'*b_m)+dlp;
end
if issparse(K)
a = b - sW*ldlsolve(L,sW*(K*b));
else
a = b - sW.*(L\(L'\(sW.*(K*b))));
end
if any(isnan(a))
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
f = K*a;
lp = gp.lik.fh.ll(gp.lik, y, f, z);
if ~isfield(gp,'meanf')
lp_new = -a'*f/2 + lp;
else
lp_new = -(f-H'*b_m)'*(a-K\(H'*b_m))/2 + lp; %f^=f-H'*b_m,
end
i = 0;
while i < 10 && (lp_new < lp_old || isnan(sum(f)))
% reduce step size by half
a = (a_old+a)/2;
f = K*a;
lp = gp.lik.fh.ll(gp.lik, y, f, z);
if ~isfield(gp,'meanf')
lp_new = -a'*f/2 + lp;
else
lp_new = -(f-H'*b_m)'*(a-K\(H'*b_m))/2 + lp;
end
i = i+1;
end
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
end
% --------------------------------------------------------------------------------
% find the posterior mode of latent variables by stabilized Newton method.
% This is implemented as suggested by Hannes Nickisch (personal communication)
case 'stabilized-newton'
% Gaussian initialization
% sigma=gp.lik.sigma;
% W = ones(n,1)./sigma.^2;
% sW = sqrt(W);
% %B = eye(n) + siV*siV'.*K;
% L=bsxfun(@times,bsxfun(@times,sW,K),sW');
% L(1:n+1:end)=L(1:n+1:end)+1;
% L = chol(L,'lower');
% a=sW.*(L'\(L\(sW.*y)));
% f = K*a;
% initialize to observations
%f=y;
switch gp.lik.type
% should be handled inside lik_*
case 'Student-t'
nu=gp.lik.nu;
sigma2=gp.lik.sigma2;
Wmax=(nu+1)/nu/sigma2;
case 'Negbinztr'
r=gp.lik.disper;
Wmax=1./((1+r)./(1*r));
otherwise
Wmax=100;
end
Wlim=0;
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
lp = -(f'*(K\f))/2 +gp.lik.fh.ll(gp.lik, y, f, z);
lp_old = -Inf;
f_old = f+1;
ge = Inf; %max(abs(a-dlp));
if issparse(K)
speyen=speye(n);
end
iter=0;
% begin Newton's iterations
while (lp - lp_old > tol || max(abs(f-f_old)) > tol) && iter < maxiter
iter=iter+1;
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
W(W<Wlim)=Wlim;
sW = sqrt(W);
if issparse(K)
sW = sparse(1:n, 1:n, sW, n, n);
[L, notpositivedefinite] = ldlchol(speyen+sW*K*sW );
else
%L = chol(eye(n)+sW*sW'.*K); % L'*L=B=eye(n)+sW*K*sW
L=bsxfun(@times,bsxfun(@times,sW,K),sW');
L(1:n+1:end)=L(1:n+1:end)+1;
[L, notpositivedefinite] = chol(L);
end
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
b = W.*f+dlp;
if issparse(K)
a = b - sW*ldlsolve(L,sW*(K*b));
else
a = b - sW.*(L\(L'\(sW.*(K*b))));
end
f_new = K*a;
lp_new = -(a'*f_new)/2 + gp.lik.fh.ll(gp.lik, y, f_new, z);
ge_new=max(abs(a-dlp));
d=lp_new-lp;
if (d<-1e-6 || (abs(d)<1e-6 && ge_new>ge) ) && Wlim<Wmax*0.5
%fprintf('%3d, p(f)=%.12f, max|a-g|=%.12f, %.3f \n',i1,lp,ge,Wlim)
Wlim=Wlim+Wmax*0.05; %Wmax*0.01
else
Wlim=0;
ge=ge_new;
lp_old = lp;
lp = lp_new;
f_old = f;
f = f_new;
%fprintf('%3d, p(f)=%.12f, max|a-g|=%.12f, %.3f \n',i1,lp,ge,Wlim)
end
if Wlim>Wmax
%fprintf('\n%3d, p(f)=%.12f, max|a-g|=%.12f, %.3f \n',i1,lp,ge,Wlim)
break
end
end
% --------------------------------------------------------------------------------
% find the posterior mode of latent variables by fminunc
case 'fminunc_large'
if issparse(K)
[LD,notpositivedefinite] = ldlchol(K);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
fhm = @(W, f, varargin) (ldlsolve(LD,f) + repmat(W,1,size(f,2)).*f); % W*f; %
else
[LD,notpositivedefinite] = chol(K);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
fhm = @(W, f, varargin) (LD\(LD'\f) + repmat(W,1,size(f,2)).*f); % W*f; %
end
defopts=struct('GradObj','on','Hessian','on','HessMult', fhm,'TolX', tol,'TolFun', tol,'LargeScale', 'on','Display', 'off');
if ~isfield(gp.latent_opt, 'fminunc_opt')
opt = optimset(defopts);
else
opt = optimset(defopts,gp.latent_opt.fminunc_opt);
end
if issparse(K)
fe = @(f, varargin) (0.5*f*(ldlsolve(LD,f')) - gp.lik.fh.ll(gp.lik, y, f', z));
fg = @(f, varargin) (ldlsolve(LD,f') - gp.lik.fh.llg(gp.lik, y, f', 'latent', z))';
fh = @(f, varargin) (-gp.lik.fh.llg2(gp.lik, y, f', 'latent', z)); %inv(K) + diag(g2(f', gp.lik)) ; %
else
fe = @(f, varargin) (0.5*f*(LD\(LD'\f')) - gp.lik.fh.ll(gp.lik, y, f', z));
fg = @(f, varargin) (LD\(LD'\f') - gp.lik.fh.llg(gp.lik, y, f', 'latent', z))';
fh = @(f, varargin) (-gp.lik.fh.llg2(gp.lik, y, f', 'latent', z)); %inv(K) + diag(g2(f', gp.lik)) ; %
end
mydeal = @(varargin)varargin{1:nargout};
[f,fval,exitflag,output] = fminunc(@(ww) mydeal(fe(ww), fg(ww), fh(ww)), f', opt);
f = f';
if issparse(K)
a = ldlsolve(LD,f);
else
a = LD\(LD'\f);
end
% --------------------------------------------------------------------------------
% find the posterior mode of latent variables with likelihood specific algorithm
% For example, with Student-t likelihood this mean EM-algorithm which is coded in the
% lik_t file.
case 'lik_specific'
[f, a] = gp.lik.fh.optimizef(gp, y, K);
if isnan(f)
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
otherwise
error('gpla_e: Unknown optimization method ! ')
end
% evaluate the approximate log marginal likelihood
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
if ~isfield(gp,'meanf')
logZ = 0.5 *f'*a - gp.lik.fh.ll(gp.lik, y, f, z);
else
logZ = 0.5 *((f-H'*b_m)'*(a-K\(H'*b_m))) - gp.lik.fh.ll(gp.lik, y, f, z);
end
if min(W) >= 0
% This is the usual case where likelihood is log concave
% for example, Poisson and probit
if issparse(K)
W = sparse(1:n,1:n, -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z), n,n);
sqrtW = sqrt(W);
B = sparse(1:n,1:n,1,n,n) + sqrtW*K*sqrtW;
[L, notpositivedefinite] = ldlchol(B);
% Note that here we use LDL cholesky
edata = logZ + 0.5.*sum(log(diag(L))); % 0.5*log(det(eye(size(K)) + K*W)) ; %
else
sW = sqrt(W);
L=bsxfun(@times,bsxfun(@times,sW,K),sW');
L(1:n+1:end)=L(1:n+1:end)+1;
[L, notpositivedefinite] = chol(L, 'lower');
edata = logZ + sum(log(diag(L))); % 0.5*log(det(eye(size(K)) + K*W)) ; %
end
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
else
% We may end up here if the likelihood is not log concave
% For example Student-t likelihood.
[W2,I] = sort(W, 1, 'descend');
if issparse(K)
error(['gpla_e: Unfortunately the compact support covariance (CS) functions do not work if'...
'the second gradient of negative likelihood is negative. This happens for example '...
'with Student-t likelihood. Please use non-CS functions instead (e.g. gpcf_sexp) ']);
end
[L, notpositivedefinite] = chol(K);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
L1 = L;
for jj=1:size(K,1)
i = I(jj);
ll = sum(L(:,i).^2);
l = L'*L(:,i);
upfact = W(i)./(1 + W(i).*ll);
% Check that Cholesky factorization will remain positive definite
if 1./ll + W(i) < 0 %1 + W(i).*ll <= 0 | abs(upfact) > abs(1./ll) %upfact > 1./ll
warning('gpla_e: 1./Sigma(i,i) + W(i) < 0')
if ~isfield(gp.lik.fh,'upfact')
% log-concave likelihood, this should not happen
% let's just return NaN
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
% non-log-concave likelihood, this may happen
% let's try to do something about it
ind = 1:i-1;
if isempty(z)
mu = K(i,ind)*gp.lik.fh.llg(gp.lik, y(I(ind)), f(I(ind)), 'latent', z);
upfact = gp.lik.fh.upfact(gp, y(I(i)), mu, ll);
else
mu = K(i,ind)*gp.lik.fh.llg(gp.lik, y(I(ind)), f(I(ind)), 'latent', z(I(ind)));
upfact = gp.lik.fh.upfact(gp, y(I(i)), mu, ll, z(I(i)));
end
end
if upfact > 0
[L,notpositivedefinite] = cholupdate(L, l.*sqrt(upfact), '-');
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
else
L = cholupdate(L, l.*sqrt(-upfact));
end
end
edata = logZ + sum(log(diag(L1))) - sum(log(diag(L)));
end
La2 = W;
else
% Likelihoods with non-diagonal Hessian
[n,nout] = size(y);
if isfield(gp, 'comp_cf') % own covariance for each ouput component
multicf = true;
if length(gp.comp_cf) ~= nout && nout > 1
error('GPLA_ND_E: the number of component vectors in gp.comp_cf must be the same as number of outputs.')
end
else
multicf = false;
end
p=[];
switch gp.lik.type
case {'LGP', 'LGPC'}
nl=n;
% Initialize latent values
% zero seems to be a robust choice (Jarno)
% with mean functions, initialize to mean function values
if ~isfield(gp,'meanf')
f = zeros(sum(nl),1);
else
[H,b_m,B_m]=mean_prep(gp,x,[]);
Hb_m=H'*b_m;
f = Hb_m;
end
if isfield(gp.latent_opt, 'kron') && gp.latent_opt.kron==1
gptmp=gp; gptmp.jitterSigma2=0;
% Use Kronecker product kron(Ka,Kb) instead of K
Ka = gp_trcov(gptmp, unique(x(:,1)));
% fix the magnitude sigma to 1 for Kb matrix
wtmp=gp_pak(gptmp); wtmp(1)=0; gptmp=gp_unpak(gptmp,wtmp);
Kb = gp_trcov(gptmp, unique(x(:,2)));
clear gptmp
n1=size(Ka,1);
n2=size(Kb,1);
[Va,Da]=eig(Ka); [Vb,Db]=eig(Kb);
% eigenvalues of K matrix
Dtmp=kron(diag(Da),diag(Db));
[sDtmp,istmp]=sort(Dtmp,'descend');
% Form the low-rank approximation. Exclude eigenvalues
% smaller than gp.latent_opt.eig_tol or take
% gp.latent_opt.eig_prct*n eigenvalues at most.
nlr=min([sum(sDtmp>gp.latent_opt.eig_tol) round(gp.latent_opt.eig_prct*n)]);
sDtmp=sDtmp+gp.jitterSigma2;
itmp1=meshgrid(1:n1,1:n2);
itmp2=meshgrid(1:n2,1:n1)';
ind=[itmp1(:) itmp2(:)];
% included eigenvalues
Dlr=sDtmp(1:nlr);
% included eigenvectors
Vlr=zeros(n,nlr);
for i1=1:nlr
Vlr(:,i1)=kron(Va(:,ind(istmp(i1),1)),Vb(:,ind(istmp(i1),2)));
end
%L=[];
% diag(K)-diag(Vlr*diag(Dlr)*Vlr')
Lb=gp_trvar(gp,x)-sum(bsxfun(@times,Vlr.*Vlr,Dlr'),2);
if isfield(gp,'meanf')
Dt=[Dlr; diag(B_m)];
Vt=[Vlr H'];
else
Dt=Dlr;
Vt=Vlr;
end
Dtsq=sqrt(Dt);
elseif isfield(gp.latent_opt, 'fft') && gp.latent_opt.fft==1
% unique values from covariance matrix
K1 = gp_cov(gp, x(1,:), x);
K1(1)=K1(1)+gp.jitterSigma2;
if size(x,2)==1
% form circulant matrix to avoid border effects
Kcirc=[K1 0 K1(end:-1:2)];
fftKcirc = fft(Kcirc);
elseif size(x,2)==2
n1=gp.latent_opt.gridn(1);
n2=gp.latent_opt.gridn(2);
Ktmp=reshape(K1,n2,n1);
% form circulant matrix to avoid border effects
Ktmp=[Ktmp; zeros(1,n1); flipud(Ktmp(2:end,:))];
fftKcirc=fft2([Ktmp zeros(2*n2,1) fliplr(Ktmp(:,2:end))]);
else
error('FFT speed-up implemented only for 1D and 2D cases.')
end
else
K = gp_trcov(gp, x);
end
% Mean function contribution to K
if isfield(gp,'meanf')
if isfield(gp.latent_opt, 'kron') && gp.latent_opt.kron==1
% only zero mean function implemented for Kronecker
% approximation
iKHb_m=zeros(n,1);
elseif isfield(gp.latent_opt, 'fft') && gp.latent_opt.fft==1
% only zero mean function implemented for FFT speed-up
iKHb_m=zeros(n,1);
else
K=K+H'*B_m*H;
ws=warning('off','MATLAB:singularMatrix');
iKHb_m=K\Hb_m;
warning(ws);
end
end
% Main Newton algorithm
tol = 1e-12;
a = f;
if isfield(gp,'meanf')
a = a-Hb_m;
end
% a vector to form the second gradient
g2 = gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
g2sq=sqrt(g2);
ny=sum(y); % total number of observations
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
lp_new = gp.lik.fh.ll(gp.lik, y, f, z);
lp_old = -Inf;
iter=0;
while abs(lp_new - lp_old) > tol && iter < maxiter
iter = iter + 1;
lp_old = lp_new; a_old = a;
if ~isfield(gp,'meanf')
if strcmpi(gp.lik.type,'LGPC')
n1=gp.lik.gridn(1); n2=gp.lik.gridn(2);
b=zeros(n,1);
ny2=sum(reshape(y,fliplr(gp.lik.gridn)));
for k1=1:n1
b((1:n2)+(k1-1)*n2) = ny2(k1)*(g2((1:n2)+(k1-1)*n2).*f((1:n2)+(k1-1)*n2)-g2((1:n2)+(k1-1)*n2)*(g2((1:n2)+(k1-1)*n2)'*f((1:n2)+(k1-1)*n2)))+dlp((1:n2)+(k1-1)*n2);
end
else
b = ny*(g2.*f-g2*(g2'*f))+dlp;
%b = W.*f+dlp;
end
else
if strcmpi(gp.lik.type,'LGPC')
n1=gp.lik.gridn(1); n2=gp.lik.gridn(2);
b=zeros(n,1);
ny2=sum(reshape(y,fliplr(gp.lik.gridn)));
for k1=1:n1
b((1:n2)+(k1-1)*n2) = ny2(k1)*(g2((1:n2)+(k1-1)*n2).*f((1:n2)+(k1-1)*n2)-g2((1:n2)+(k1-1)*n2)*(g2((1:n2)+(k1-1)*n2)'*f((1:n2)+(k1-1)*n2)))+iKHb_m((1:n2)+(k1-1)*n2)+dlp((1:n2)+(k1-1)*n2);
end
else
b = ny*(g2.*f-g2*(g2'*f))+iKHb_m+dlp;
%b = W.*f+K\(H'*b_m)+dlp;
end
end
if isfield(gp.latent_opt, 'kron') && gp.latent_opt.kron==1
% Use Kronecker product structure in matrix vector
% multiplications
%-
% q=Kb*reshape(b,n2,n1)*Ka;
% Kg=q(:);
% Kg=Kg+gp.jitterSigma2*b;
%-
% OR use reduced-rank approximation for K
%-
Kg=Lb.*b+Vlr*(Dlr.*(Vlr'*b));
%-
if isfield(gp,'meanf')
Kg=Kg+H'*(B_m*(H*b));
end
% % Use Kronecker product structure
%-
% v=sqrt(ny)*(g2sq.*Kg-(g2*(g2'*Kg))./g2sq);
% % fast matrix vector multiplication with
% % Kronecker product for matrix inversion
% if isfield(gp,'meanf')
% [iSg,~]=pcg(@(z) mvm_kron(g2,ny,Ka,Kb,H,B_m,gp.jitterSigma2,z), v, gp.latent_opt.pcg_tol);
% else
% [iSg,~]=pcg(@(z) mvm_kron(g2,ny,Ka,Kb,[],[],gp.jitterSigma2,z), v, gp.latent_opt.pcg_tol);
% end
% a=b-sqrt(ny)*(g2sq.*iSg - g2*(g2'*(iSg./g2sq)));
%-
% use reduced-rank approximation for K
%-
Zt=1./(1+ny*g2.*Lb);
Ztsq=sqrt(Zt);
Ltmp=bsxfun(@times,Ztsq.*sqrt(ny).*g2sq,bsxfun(@times,Vt,sqrt(Dt)'));
Ltmp=Ltmp'*Ltmp;
Ltmp(1:(size(Dt,1)+1):end)=Ltmp(1:(size(Dt,1)+1):end)+1;
[L,notpositivedefinite] = chol(Ltmp,'lower');
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
EKg=ny*g2.*(Zt.*Kg)-sqrt(ny)*g2sq.*(Zt.*(sqrt(ny)*g2sq.*(Vt*(Dtsq.*(L'\(L\(Dtsq.*(Vt'*(sqrt(ny)*g2sq.*(Zt.*(sqrt(ny)*g2sq.*Kg)))))))))));
E1=ny*g2.*(Zt.*ones(n,1))-sqrt(ny)*g2sq.*(Zt.*(sqrt(ny)*g2sq.*(Vt*(Dtsq.*(L'\(L\(Dtsq.*(Vt'*(sqrt(ny)*g2sq.*(Zt.*(sqrt(ny)*g2sq.*ones(n,1))))))))))));
a=b-(EKg-E1*((E1'*Kg)./(ones(1,n)*E1)));
%-
elseif isfield(gp.latent_opt, 'fft') && gp.latent_opt.fft==1
% use FFT speed-up in matrix vector multiplications
if size(x,2)==1
gge=zeros(2*n,1);
gge(1:n)=b;
q=ifft(fftKcirc.*fft(gge'));
Kg=q(1:n)';
elseif size(x,2)==2
gge=zeros(2*n2,2*n1);
gge(1:n2,1:n1)=reshape(b,n2,n1);
q=ifft2(fftKcirc.*fft2(gge));
q=q(1:n2,1:n1);
Kg=q(:);
else
error('FFT speed-up implemented only for 1D and 2D cases.')
end
if isfield(gp,'meanf')
Kg=Kg+H'*(B_m*(H*b));
end
v=sqrt(ny)*(g2sq.*Kg-(g2*(g2'*Kg))./g2sq);
if isfield(gp,'meanf')
% fast matrix vector multiplication with fft for matrix inversion
[iSg,~]=pcg(@(z) mvm_fft(g2,ny,fftKcirc,H,B_m,z), v, gp.latent_opt.pcg_tol);
else
[iSg,~]=pcg(@(z) mvm_fft(g2,ny,fftKcirc,[],[],z), v, gp.latent_opt.pcg_tol);
end
a=b-sqrt(ny)*(g2sq.*iSg - g2*(g2'*(iSg./g2sq)));
else
if strcmpi(gp.lik.type,'LGPC')
R=zeros(n);
RKR=K;
for k1=1:n1
R((1:n2)+(k1-1)*n2,(1:n2)+(k1-1)*n2)=sqrt(ny2(k1))*(diag(g2sq((1:n2)+(k1-1)*n2))-g2((1:n2)+(k1-1)*n2)*g2sq((1:n2)+(k1-1)*n2)');
RKR(:,(1:n2)+(k1-1)*n2)=RKR(:,(1:n2)+(k1-1)*n2)*R((1:n2)+(k1-1)*n2,(1:n2)+(k1-1)*n2);
end
for k1=1:n1
RKR((1:n2)+(k1-1)*n2,:)=R((1:n2)+(k1-1)*n2,(1:n2)+(k1-1)*n2)'*RKR((1:n2)+(k1-1)*n2,:);
end
%RKR=R'*K*R;
RKR(1:(n+1):end)=RKR(1:(n+1):end)+1;
[L,notpositivedefinite] = chol(RKR,'lower');
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
Kb=K*b;
RCb=R'*Kb;
iRCb=L'\(L\RCb);
a=b-R*iRCb;
else
%R=-g2*g2sq'; R(1:(n+1):end)=R(1:(n+1):end)+g2sq';
KR=bsxfun(@times,K,g2sq')-(K*g2)*g2sq';
RKR=ny*(bsxfun(@times,g2sq,KR)-g2sq*(g2'*KR));
RKR(1:(n+1):end)=RKR(1:(n+1):end)+1;
[L,notpositivedefinite] = chol(RKR,'lower');
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
Kb=K*b;
RCb=g2sq.*Kb-g2sq*(g2'*Kb);
iRCb=L'\(L\RCb);
a=b-ny*(g2sq.*iRCb-g2*(g2sq'*iRCb));
end
end
if isfield(gp.latent_opt, 'kron') && gp.latent_opt.kron==1
% % Use Kronecker product structure
%-
% f2=Kb*reshape(a,n2,n1)*Ka;
% f=f2(:);
% f=f+gp.jitterSigma2*a;
%-
% use reduced-rank approximation for K
%-
f=Lb.*a+Vlr*(Dlr.*(Vlr'*a));
%-
if isfield(gp,'meanf')
f=f+H'*(B_m*(H*a));
end
elseif isfield(gp.latent_opt, 'fft') && gp.latent_opt.fft==1
if size(x,2)==1
a2=zeros(2*n,1);
a2(1:n)=a;
f2=ifft(fftKcirc.*fft(a2'));
f=f2(1:n)';
if isfield(gp,'meanf')
f=f+H'*(B_m*(H*a));
end
elseif size(x,2)==2
a2=zeros(2*n2,2*n1);
a2(1:n2,1:n1)=reshape(a,n2,n1);
f2=ifft2(fftKcirc.*fft2(a2));
f2=f2(1:n2,1:n1);
f=f2(:);
if isfield(gp,'meanf')
f=f+H'*(B_m*(H*a));
end
else
error('FFT speed-up implemented only for 1D and 2D cases.')
end
else
f = K*a;
end
lp = gp.lik.fh.ll(gp.lik, y, f, z);
if ~isfield(gp,'meanf')
lp_new = -a'*f/2 + lp;
else
%lp_new = -(f-H'*b_m)'*(a-K\(H'*b_m))/2 + lp; %f^=f-H'*b_m,
lp_new = -(f-Hb_m)'*(a-iKHb_m)/2 + lp; %f^=f-Hb_m,
end
i = 0;
while i < 10 && lp_new < lp_old && ~isnan(sum(f))
% reduce step size by half
a = (a_old+a)/2;
if isfield(gp.latent_opt, 'kron') && gp.latent_opt.kron==1
% % Use Kronecker product structure
%-
% f2=Kb*reshape(a,n2,n1)*Ka;
% f=f2(:);
% f=f+gp.jitterSigma2*a;
%-
% use reduced-rank approximation for K
%-
f=Lb.*a+Vlr*(Dlr.*(Vlr'*a));
%-
if isfield(gp,'meanf')
f=f+H'*(B_m*(H*a));
end
elseif isfield(gp.latent_opt, 'fft') && gp.latent_opt.fft==1
if size(x,2)==1
a2=zeros(2*n,1);
a2(1:n)=a;
f2=ifft(fftKcirc.*fft(a2'));
f=f2(1:n)';
if isfield(gp,'meanf')
f=f+H'*(B_m*(H*a));
end
elseif size(x,2)==2
a2=zeros(2*n2,2*n1);
a2(1:n2,1:n1)=reshape(a,n2,n1);
f2=ifft2(fftKcirc.*fft2(a2));
f2=f2(1:n2,1:n1);
f=f2(:);
if isfield(gp,'meanf')
f=f+H'*(B_m*(H*a));
end
else
error('FFT speed-up implemented only for 1D and 2D cases.')
end
else
f = K*a;
end
lp = gp.lik.fh.ll(gp.lik, y, f, z);
if ~isfield(gp,'meanf')
lp_new = -a'*f/2 + lp;
else
%lp_new = -(f-H'*b_m)'*(a-K\(H'*b_m))/2 + lp;
lp_new = -(f-Hb_m)'*(a-iKHb_m)/2 + lp;
end
i = i+1;
end
g2 = gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
g2sq=sqrt(g2);
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
end
% evaluate the approximate log marginal likelihood
g2 = gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
g2sq=sqrt(g2);
if ~isfield(gp,'meanf')
logZ = 0.5 *f'*a - gp.lik.fh.ll(gp.lik, y, f, z);
else
% logZ = 0.5 *((f-H'*b_m)'*(a-K\(H'*b_m))) - gp.lik.fh.ll(gp.lik, y, f, z);
logZ = 0.5 *((f-Hb_m)'*(a-iKHb_m)) - gp.lik.fh.ll(gp.lik, y, f, z);
end
if isfield(gp.latent_opt, 'kron') && gp.latent_opt.kron==1
% % Use Kronecker product structure
%-
% tmp=bsxfun(@times,Lb.^(-1/2),bsxfun(@times,Vt,sqrt(Dt)'));
% tmp=tmp'*tmp;
% tmp(1:size(tmp,1)+1:end)=tmp(1:size(tmp,1)+1:end)+1;
% logZa=sum(log(diag(chol(tmp,'lower'))));
%
% Lbt=ny*(g2)+1./Lb;
%
% St=[diag(1./Dt)+Vt'*bsxfun(@times,1./Lb,Vt) zeros(size(Dt,1),1); ...
% zeros(1,size(Dt,1)) 1];
% Pt=[bsxfun(@times,1./Lb,Vt) sqrt(ny)*g2];
%
% logZb=sum(log(diag(chol(St,'lower'))));
%
% Ptt=bsxfun(@times,1./sqrt(Lbt),Pt);
% logZc=sum(log(diag(chol(St-Ptt'*Ptt,'lower'))));
%
% edata = logZ + logZa - logZb + logZc + 0.5*sum(log(Lb)) + 0.5*sum(log(Lbt));
%-
% use reduced-rank approximation for K
%-
Zt=1./(1+ny*g2.*Lb);
Ztsq=sqrt(Zt);
Ltmp=bsxfun(@times,Ztsq.*sqrt(ny).*g2sq,bsxfun(@times,Vt,sqrt(Dt)'));
Ltmp=Ltmp'*Ltmp;
Ltmp(1:(size(Dt,1)+1):end)=Ltmp(1:(size(Dt,1)+1):end)+1;
L=chol(Ltmp,'lower');
LTtmp=L\( Dtsq.*(Vt'*( (g2sq.*sqrt(ny)).*((1./(1+ny*g2.*Lb)).* (sqrt(ny)*g2sq) ) )) );
edata = logZ + sum(log(diag(L)))+0.5*sum(log(1+ny*g2.*Lb)) ...
-0.5*log(ny) + 0.5*log(sum(((g2*ny)./(ny*g2.*Lb+1)))-LTtmp'*LTtmp);
%-
elseif isfield(gp.latent_opt, 'fft') && gp.latent_opt.fft==1
K = gp_trcov(gp, x);
if isfield(gp,'meanf')
K=K+H'*B_m*H;
end
% exact determinant
KR=bsxfun(@times,K,g2sq')-(K*g2)*g2sq';
RKR=ny*(bsxfun(@times,g2sq,KR)-g2sq*(g2'*KR));
RKR(1:(n+1):end)=RKR(1:(n+1):end)+1;
[L,notpositivedefinite] = chol(RKR,'lower');
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
edata = logZ + sum(log(diag(L)));
% % determinant approximated using only the largest eigenvalues
% opts.issym = 1;
% Deig=eigs(@(z) mvm_fft(g2, ny, fftKcirc, H, B_m, z),n,round(n*0.05),'lm',opts);
% edata = logZ + 0.5*sum(log(Deig));
% L=[];
else
if strcmpi(gp.lik.type,'LGPC')
R=zeros(n);
RKR=K;
for k1=1:n1
R((1:n2)+(k1-1)*n2,(1:n2)+(k1-1)*n2)=sqrt(ny2(k1))*(diag(g2sq((1:n2)+(k1-1)*n2))-g2((1:n2)+(k1-1)*n2)*g2sq((1:n2)+(k1-1)*n2)');
RKR(:,(1:n2)+(k1-1)*n2)=RKR(:,(1:n2)+(k1-1)*n2)*R((1:n2)+(k1-1)*n2,(1:n2)+(k1-1)*n2);
end
for k1=1:n1
RKR((1:n2)+(k1-1)*n2,:)=R((1:n2)+(k1-1)*n2,(1:n2)+(k1-1)*n2)'*RKR((1:n2)+(k1-1)*n2,:);
end
%RKR=R'*K*R;
else
KR=bsxfun(@times,K,g2sq')-(K*g2)*g2sq';
RKR=ny*(bsxfun(@times,g2sq,KR)-g2sq*(g2'*KR));
end
RKR(1:(n+1):end)=RKR(1:(n+1):end)+1;
[L,notpositivedefinite] = chol(RKR,'lower');
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
edata = logZ + sum(log(diag(L)));
end
M=[];
E=[];
case {'Softmax', 'Multinom'}
% Initialize latent values
% zero seems to be a robust choice (Jarno)
f = zeros(size(y(:)));
K = zeros(n,n,nout);
if multicf
for i1=1:nout
K(:,:,i1) = gp_trcov(gp, x, gp.comp_cf{i1});
end
else
Ktmp=gp_trcov(gp, x);
for i1=1:nout
K(:,:,i1) = Ktmp;
end
end
% Main newton algorithm, see Rasmussen & Williams (2006),
% p. 50
tol = 1e-12;
a = f;
f2=reshape(f,n,nout);
% lp_new = log(p(y|f))
lp_new = gp.lik.fh.ll(gp.lik, y, f2, z);
lp_old = -Inf;
c=zeros(n*nout,1);
ERMMRc=zeros(n*nout,1);
E=zeros(n,n,nout);
L=zeros(n,n,nout);
RER = zeros(n,n,nout);
while lp_new - lp_old > tol
lp_old = lp_new; a_old = a;
% llg = d(log(p(y|f)))/df
llg = gp.lik.fh.llg(gp.lik, y, f2, 'latent', z);
% Second derivatives
[pi2_vec, pi2_mat] = gp.lik.fh.llg2(gp.lik, y, f2, 'latent', z);
% W = -diag(pi2_vec) + pi2_mat*pi2_mat'
pi2 = reshape(pi2_vec,size(y));
R = repmat(1./pi2_vec,1,n).*pi2_mat;
for i1=1:nout
Dc=sqrt(pi2(:,i1));
Lc=(Dc*Dc').*K(:,:,i1);
Lc(1:n+1:end)=Lc(1:n+1:end)+1;
[Lc,notpositivedefinite]=chol(Lc);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
L(:,:,i1)=Lc;
Ec=Lc'\diag(Dc);
Ec=Ec'*Ec;
E(:,:,i1)=Ec;
RER(:,:,i1) = R((1:n)+(i1-1)*n,:)'*Ec*R((1:n)+(i1-1)*n,:);
end
[M, notpositivedefinite]=chol(sum(RER,3));
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
b = pi2_vec.*f - pi2_mat*(pi2_mat'*f) + llg;
for i1=1:nout
c((1:n)+(i1-1)*n)=E(:,:,i1)*(K(:,:,i1)*b((1:n)+(i1-1)*n));
end
RMMRc=R*(M\(M'\(R'*c)));
for i1=1:nout
ERMMRc((1:n)+(i1-1)*n) = E(:,:,i1)*RMMRc((1:n)+(i1-1)*n,:);
end
a=b-c+ERMMRc;
for i1=1:nout
f((1:n)+(i1-1)*n)=K(:,:,i1)*a((1:n)+(i1-1)*n);
end
f2=reshape(f,n,nout);
lp_new = -a'*f/2 + gp.lik.fh.ll(gp.lik, y, f2, z);
i = 0;
while i < 10 && lp_new < lp_old || isnan(sum(f))
% reduce step size by half
a = (a_old+a)/2;
for i1=1:nout
f((1:n)+(i1-1)*n)=K(:,:,i1)*a((1:n)+(i1-1)*n);
end
f2=reshape(f,n,nout);
lp_new = -a'*f/2 + gp.lik.fh.ll(gp.lik, y, f2, z);
i = i+1;
end
end
[pi2_vec, pi2_mat] = gp.lik.fh.llg2(gp.lik, y, f2, 'latent', z);
pi2 = reshape(pi2_vec,size(y));
zc=0;
Detn=0;
R = repmat(1./pi2_vec,1,n).*pi2_mat;
for i1=1:nout
Dc=sqrt( pi2(:,i1) );
Lc=(Dc*Dc').*K(:,:,i1);
Lc(1:n+1:end)=Lc(1:n+1:end)+1;
[Lc, notpositivedefinite]=chol(Lc);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
L(:,:,i1)=Lc;
pi2i = pi2_mat((1:n)+(i1-1)*n,:);
pipi = pi2i'/diag(Dc);
Detn = Detn + pipi*(Lc\(Lc'\diag(Dc)))*K(:,:,i1)*pi2i;
zc = zc + sum(log(diag(Lc)));
Ec=Lc'\diag(Dc);
Ec=Ec'*Ec;
E(:,:,i1)=Ec;
RER(:,:,i1) = R((1:n)+(i1-1)*n,:)'*Ec*R((1:n)+(i1-1)*n,:);
end
[M, notpositivedefinite]=chol(sum(RER,3));
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
zc = zc + sum(log(diag(chol( eye(size(K(:,:,i1))) - Detn))));
logZ = a'*f/2 - gp.lik.fh.ll(gp.lik, y, f2, z) + zc;
edata = logZ;
otherwise
if ~isfield(gp, 'comp_cf') || isempty(gp.comp_cf)
error('Define multiple covariance functions for latent processes using gp.comp_cf (see gp_set)');
end
if isfield(gp.lik,'xtime')
xtime=gp.lik.xtime;
if isfield(gp.lik, 'stratificationVariables')
ebc_ind=gp.lik.stratificationVariables;
ux = unique(x(:,ebc_ind), 'rows');
gp.lik.n_u = size(ux,1);
for i1=1:size(ux,1)
gp.lik.stratind{i1}=(x(:,ebc_ind)==ux(i1));
end
[xtime1, xtime2] = meshgrid(ux, xtime);
xtime = [xtime2(:) xtime1(:)];
if isfield(gp.lik, 'removeStratificationVariables') && gp.lik.removeStratificationVariables
x(:,ebc_ind)=[];
end
end
ntime = size(xtime,1);
nl=[ntime n];
else
nl=repmat(n,1,length(gp.comp_cf));
end
nlp=length(nl); % number of latent processes
% Initialize latent values
% zero seems to be a robust choice (Jarno)
% with mean functions, initialize to mean function values
if ~isfield(gp,'meanf')
f = zeros(sum(nl),1);
if isequal(gp.lik.type, 'Inputdependentnoise')
% Inputdependent-noise needs initialization to mean
Kf = gp_trcov(gp,x,gp.comp_cf{1});
f(1:n) = Kf*((Kf+gp.lik.sigma2.*eye(n))\y);
end
else
[H,b_m,B_m]=mean_prep(gp,x,[]);
Hb_m=H'*b_m;
f = Hb_m;
end
% K is block-diagonal covariance matrix where blocks
% correspond to latent processes
K = zeros(sum(nl));
if isfield(gp.lik,'xtime')
K(1:ntime,1:ntime)=gp_trcov(gp, xtime, gp.comp_cf{1});
K((1:n)+ntime,(1:n)+ntime) = gp_trcov(gp, x, gp.comp_cf{2});
else
for i1=1:nlp
K((1:n)+(i1-1)*n,(1:n)+(i1-1)*n) = gp_trcov(gp, x, gp.comp_cf{i1});
end
end
% Mean function contribution to K
if isfield(gp,'meanf')
K=K+H'*B_m*H;
iKHb_m=K\Hb_m;
end
% Main Newton algorithm, see Rasmussen & Williams (2006),
% p. 46
tol = 1e-12;
a = f;
if isfield(gp,'meanf')
a = a-Hb_m;
end
% Second derivatives of log-likelihood
if isfield(gp.lik,'xtime')
[llg2diag, llg2mat] = gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
% W = [diag(Wdiag(1:ntime)) Wmat; Wmat' diag(Wdiag(ntime+1:end)]
Wdiag=-llg2diag; Wmat=-llg2mat;
W=[];
else
Wvec = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
% W = [diag(Wvec(1:n,1)) diag(Wvec(1:n,2)) diag(Wvec(n+1:end,1)) diag(Wvec(n+1:end,2))]
Wdiag=[Wvec(1:nl(1),1); Wvec(nl(1)+(1:nl(2)),2)];
end
% dlp = d(log(p(y|f)))/df
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
% lp_new = log(p(y|f))
lp_new = gp.lik.fh.ll(gp.lik, y, f, z);
lp_old = -Inf;
WK=zeros(sum(nl));
iter=0;
while (abs(lp_new - lp_old) > tol && iter < maxiter)
iter = iter + 1;
lp_old = lp_new; a_old = a;
% b = W*f - d(log(p(y|f)))/df
if isfield(gp.lik,'xtime')
b=Wdiag.*f+[Wmat*f((ntime+1):end); Wmat'*f(1:ntime)]+dlp;
else
b = sum(Wvec.*repmat(reshape(f,n,nlp),nlp,1),2)+dlp;
end
WK(1:nl(1),1:nl(1))=bsxfun(@times, Wdiag(1:nl(1)),K(1:nl(1),1:nl(1)));
WK(nl(1)+(1:nl(2)),nl(1)+(1:nl(2)))=bsxfun(@times, Wdiag(nl(1)+(1:nl(2))),K(nl(1)+(1:nl(2)),nl(1)+(1:nl(2))));
if isfield(gp.lik,'xtime')
WK(1:nl(1),nl(1)+(1:nl(2)))=Wmat*K(nl(1)+(1:nl(2)),nl(1)+(1:nl(2)));
WK(nl(1)+(1:nl(2)),1:nl(1))=Wmat'*K(1:nl(1),1:nl(1));
else
WK(1:nl(1),nl(1)+(1:nl(2)))=bsxfun(@times, Wvec(1:nl(1),2),K(nl(1)+(1:nl(2)),nl(1)+(1:nl(2))));
WK(nl(1)+(1:nl(2)),1:nl(1))=bsxfun(@times, Wvec(nl(1)+(1:nl(2)),1),K(1:nl(1),1:nl(1)));
end
% B = I + WK
B=WK;
B(1:sum(nl)+1:end) = B(1:sum(nl)+1:end) + 1;
[ll,uu]=lu(B);
% a = inv(I+WK)*(W*f - d(log(p(y|f)))/df)
a=uu\(ll\b);
% a=B\b;
f = K*a;
lp = gp.lik.fh.ll(gp.lik, y, f, z);
if ~isfield(gp,'meanf')
lp_new = -a'*f/2 + lp;
else
%lp_new = -(f-H'*b_m)'*(a-K\(H'*b_m))/2 + lp; %f^=f-H'*b_m,
lp_new = -(f-Hb_m)'*(a-iKHb_m)/2 + lp; %f^=f-Hb_m,
end
i = 0;
while i < 10 && lp_new < lp_old && ~isnan(sum(f))
% reduce step size by half
a = (a_old+a)/2;
f = K*a;
lp = gp.lik.fh.ll(gp.lik, y, f, z);
if ~isfield(gp,'meanf')
lp_new = -a'*f/2 + lp;
else
%lp_new = -(f-H'*b_m)'*(a-K\(H'*b_m))/2 + lp;
lp_new = -(f-Hb_m)'*(a-iKHb_m)/2 + lp;
end
i = i+1;
end
if isfield(gp.lik,'xtime')
[llg2diag, llg2mat] = gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
Wdiag=-llg2diag; Wmat=-llg2mat;
else
Wvec = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
Wdiag=[Wvec(1:nl(1),1); Wvec(nl(1)+(1:nl(2)),2)];
end
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
end
% evaluate the approximate log marginal likelihood
if isfield(gp.lik,'xtime')
[llg2diag, llg2mat] = gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
Wdiag=-llg2diag; Wmat=-llg2mat;
else
Wvec = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
Wdiag=[Wvec(1:nl(1),1); Wvec(nl(1)+(1:nl(2)),2)];
end
if ~isfield(gp,'meanf')
logZ = 0.5 *f'*a - gp.lik.fh.ll(gp.lik, y, f, z);
else
% logZ = 0.5 *((f-H'*b_m)'*(a-K\(H'*b_m))) - gp.lik.fh.ll(gp.lik, y, f, z);
logZ = 0.5 *((f-Hb_m)'*(a-iKHb_m)) - gp.lik.fh.ll(gp.lik, y, f, z);
end
WK(1:nl(1),1:nl(1))=bsxfun(@times, Wdiag(1:nl(1)),K(1:nl(1),1:nl(1)));
WK(nl(1)+(1:nl(2)),nl(1)+(1:nl(2)))=bsxfun(@times, Wdiag(nl(1)+(1:nl(2))),K(nl(1)+(1:nl(2)),nl(1)+(1:nl(2))));
if isfield(gp.lik,'xtime')
WK(1:ntime,ntime+(1:n))=Wmat*K(nl(1)+(1:nl(2)),nl(1)+(1:nl(2)));
WK(nl(1)+(1:nl(2)),1:nl(1))=Wmat'*K(1:nl(1),1:nl(1));
else
WK(1:nl(1),nl(1)+(1:nl(2)))=bsxfun(@times, Wvec(1:nl(1),2),K(nl(1)+(1:nl(2)),nl(1)+(1:nl(2))));
WK(nl(1)+(1:nl(2)),1:nl(1))=bsxfun(@times, Wvec(nl(1)+(1:nl(2)),1),K(1:nl(2),1:nl(2)));
end
% B = I + WK
B=WK;
B(1:sum(nl)+1:end) = B(1:sum(nl)+1:end) + 1;
[Ll,Lu]=lu(B);
edata = logZ + 0.5*det(Ll)*prod(sign(diag(Lu))).*sum(log(abs(diag(Lu))));
% Return help parameters for gradient and prediction
% calculations
L=B;
E=Ll;
M=Lu;
end
La2=E;
p=M;
end
% ============================================================
% FIC
% ============================================================
case 'FIC'
u = gp.X_u;
m = length(u);
% First evaluate needed covariance matrices
% v defines that parameter is a vector
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % f x 1 vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
[Luu, notpositivedefinite] = chol(K_uu, 'lower');
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
% Here we need only the diag(Q_ff), which is evaluated below
B=Luu\(K_fu'); % u x f
Qv_ff=sum(B.^2)';
Lav = Cv_ff-Qv_ff; % f x 1, Vector of diagonal elements
iLaKfu = repmat(Lav,1,m).\K_fu; % f x u
A = K_uu+K_fu'*iLaKfu; A = (A+A')./2; % Ensure symmetry
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
L = iLaKfu/A;
switch gp.latent_opt.optim_method
% --------------------------------------------------------------------------------
% find the posterior mode of latent variables by fminunc large scale method
case 'fminunc_large'
fhm = @(W, f, varargin) (f./repmat(Lav,1,size(f,2)) - L*(L'*f) + repmat(W,1,size(f,2)).*f); % hessian*f; %
defopts=struct('GradObj','on','Hessian','on','HessMult', fhm,'TolX', 1e-8,'TolFun', 1e-8,'LargeScale', 'on','Display', 'off');
if ~isfield(gp.latent_opt, 'fminunc_opt')
opt = optimset(defopts);
else
opt = optimset(defopts,gp.latent_opt.fminunc_opt);
end
fe = @(f, varargin) (0.5*f*(f'./repmat(Lav,1,size(f',2)) - L*(L'*f')) - gp.lik.fh.ll(gp.lik, y, f', z));
fg = @(f, varargin) (f'./repmat(Lav,1,size(f',2)) - L*(L'*f') - gp.lik.fh.llg(gp.lik, y, f', 'latent', z))';
fh = @(f, varargin) (-gp.lik.fh.llg2(gp.lik, y, f', 'latent', z));
mydeal = @(varargin)varargin{1:nargout};
[f,fval,exitflag,output] = fminunc(@(ww) mydeal(fe(ww), fg(ww), fh(ww)), f', opt);
f = f';
a = f./Lav - L*L'*f;
% --------------------------------------------------------------------------------
% find the posterior mode of latent variables by Newton method
case 'newton'
tol = 1e-12;
a = f;
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
lp_new = gp.lik.fh.ll(gp.lik, y, f, z);
lp_old = -Inf;
iter = 0;
while lp_new - lp_old > tol && iter < maxiter
iter = iter + 1;
lp_old = lp_new; a_old = a;
sW = sqrt(W);
Lah = 1 + sW.*Lav.*sW;
sWKfu = repmat(sW,1,m).*K_fu;
A = K_uu + sWKfu'*(repmat(Lah,1,m).\sWKfu); A = (A+A')./2;
Lb = (repmat(Lah,1,m).\sWKfu)/chol(A);
b = W.*f+dlp;
b2 = sW.*(Lav.*b + B'*(B*b));
a = b - sW.*(b2./Lah - Lb*(Lb'*b2));
f = Lav.*a + B'*(B*a);
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
lp = gp.lik.fh.ll(gp.lik, y, f, z);
lp_new = -a'*f/2 + lp;
i = 0;
while i < 10 && lp_new < lp_old && ~isnan(sum(f))
% reduce step size by half
a = (a_old+a)/2;
f = Lav.*a + B'*(B*a);
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
lp = gp.lik.fh.ll(gp.lik, y, f, z);
lp_new = -a'*f/2 + lp;
i = i+1;
end
end
% --------------------------------------------------------------------------------
% find the posterior mode of latent variables with likelihood specific algorithm
% For example, with Student-t likelihood this mean EM-algorithm which is coded in the
% lik_t file.
case 'lik_specific'
[f, a] = gp.lik.fh.optimizef(gp, y, K_uu, Lav, K_fu);
if isnan(f)
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
otherwise
error('gpla_e: Unknown optimization method ! ')
end
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
logZ = 0.5*f'*a - gp.lik.fh.ll(gp.lik, y, f, z);
if W >= 0
sqrtW = sqrt(W);
Lah = 1 + sqrtW.*Lav.*sqrtW;
sWKfu = repmat(sqrtW,1,m).*K_fu;
A = K_uu + sWKfu'*(repmat(Lah,1,m).\sWKfu); A = (A+A')./2;
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
edata = sum(log(Lah)) - 2*sum(log(diag(Luu))) + 2*sum(log(diag(A)));
edata = logZ + 0.5*edata;
else
% This is with full matrices. Needs to be rewritten.
K = diag(Lav) + B'*B;
% $$$ [W,I] = sort(W, 1, 'descend');
% $$$ K = K(I,I);
[W2,I] = sort(W, 1, 'descend');
[L, notpositivedefinite] = chol(K);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
L1 = L;
for jj=1:size(K,1)
i = I(jj);
ll = sum(L(:,i).^2);
l = L'*L(:,i);
upfact = W(i)./(1 + W(i).*ll);
% Check that Cholesky factorization will remain positive definite
if 1 + W(i).*ll <= 0 | upfact > 1./ll
warning('gpla_e: 1 + W(i).*ll < 0')
ind = 1:i-1;
if isempty(z)
mu = K(i,ind)*gp.lik.fh.llg(gp.lik, y(I(ind)), f(I(ind)), 'latent', z);
else
mu = K(i,ind)*gp.lik.fh.llg(gp.lik, y(I(ind)), f(I(ind)), 'latent', z(I(ind)));
end
upfact = gp.lik.fh.upfact(gp, y(I(i)), mu, ll);
% $$$ W2 = -1./(ll+1e-3);
% $$$ upfact = W2./(1 + W2.*ll);
end
if upfact > 0
L = cholupdate(L, l.*sqrt(upfact), '-');
else
L = cholupdate(L, l.*sqrt(-upfact));
end
end
edata = logZ + sum(log(diag(L1))) - sum(log(diag(L))); % sum(log(diag(chol(K)))) + sum(log(diag(chol((inv(K) + W)))));
end
La2 = Lav;
% ============================================================
% PIC
% ============================================================
case {'PIC' 'PIC_BLOCK'}
ind = gp.tr_index;
u = gp.X_u;
m = length(u);
% First evaluate needed covariance matrices
% v defines that parameter is a vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
[Luu, notpositivedefinite] = chol(K_uu, 'lower');
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
% Here we need only the diag(Q_ff), which is evaluated below
B=Luu\(K_fu'); % u x f
% First some helper parameters
iLaKfu = zeros(size(K_fu)); % f x u
for i=1:length(ind)
Qbl_ff = B(:,ind{i})'*B(:,ind{i});
[Kbl_ff, Cbl_ff] = gp_trcov(gp, x(ind{i},:));
Labl{i} = Cbl_ff - Qbl_ff;
[LLabl{i}, notpositivedefinite] = chol(Labl{i});
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
iLaKfu(ind{i},:) = LLabl{i}\(LLabl{i}'\K_fu(ind{i},:));
end
A = K_uu+K_fu'*iLaKfu;
A = (A+A')./2; % Ensure symmetry
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
L = iLaKfu/A;
% Begin optimization
switch gp.latent_opt.optim_method
% --------------------------------------------------------------------------------
% find the posterior mode of latent variables by fminunc large scale method
case 'fminunc_large'
fhm = @(W, f, varargin) (iKf(f) + repmat(W,1,size(f,2)).*f);
defopts=struct('GradObj','on','Hessian','on','HessMult', fhm,'TolX', 1e-8,'TolFun', 1e-8,'LargeScale', 'on','Display', 'off');
if ~isfield(gp.latent_opt, 'fminunc_opt')
opt = optimset(defopts);
else
opt = optimset(defopts,gp.latent_opt.fminunc_opt);
end
[f,fval,exitflag,output] = fminunc(@(ww) egh(ww), f', opt);
f = f';
a = iKf(f);
% find the mode by Newton's method
% --------------------------------------------------------------------------------
case 'newton'
tol = 1e-12;
a = f;
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
lp_new = gp.lik.fh.ll(gp.lik, y, f, z);
lp_old = -Inf;
iter = 0;
while lp_new - lp_old > tol && iter < maxiter
iter = iter + 1;
lp_old = lp_new; a_old = a;
sW = sqrt(W);
V = repmat(sW,1,m).*K_fu;
for i=1:length(ind)
Lah{i} = eye(size(Labl{i})) + diag(sW(ind{i}))*Labl{i}*diag(sW(ind{i}));
[LLah{i}, notpositivedefinite] = chol(Lah{i});
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
V2(ind{i},:) = LLah{i}\(LLah{i}'\V(ind{i},:));
end
A = K_uu + V'*V2; A = (A+A')./2;
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
Lb = V2/A;
b = W.*f+dlp;
b2 = B'*(B*b);
bt = zeros(size(b2));
for i=1:length(ind)
b2(ind{i}) = sW(ind{i}).*(Labl{i}*b(ind{i}) + b2(ind{i}));
bt(ind{i}) = LLah{i}\(LLah{i}'\b2(ind{i}));
end
a = b - sW.*(bt - Lb*(Lb'*b2));
f = B'*(B*a);
for i=1:length(ind)
f(ind{i}) = Labl{i}*a(ind{i}) + f(ind{i}) ;
end
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
lp = gp.lik.fh.ll(gp.lik, y, f, z);
lp_new = -a'*f/2 + lp;
i = 0;
while i < 10 && lp_new < lp_old || isnan(sum(f))
% reduce step size by half
a = (a_old+a)/2;
f = B'*(B*a);
for i=1:length(ind)
f(ind{i}) = Labl{i}*a(ind{i}) + f(ind{i}) ;
end
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
lp = gp.lik.fh.ll(gp.lik, y, f, z);
lp_new = -a'*f/2 + lp;
i = i+1;
end
end
otherwise
error('gpla_e: Unknown optimization method ! ')
end
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
sqrtW = sqrt(W);
logZ = 0.5*f'*a - gp.lik.fh.ll(gp.lik, y, f, z);
WKfu = repmat(sqrtW,1,m).*K_fu;
edata = 0;
for i=1:length(ind)
Lahat = eye(size(Labl{i})) + diag(sqrtW(ind{i}))*Labl{i}*diag(sqrtW(ind{i}));
[LLahat, notpositivedefinite] = chol(Lahat);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
iLahatWKfu(ind{i},:) = LLahat\(LLahat'\WKfu(ind{i},:));
edata = edata + 2.*sum(log(diag(LLahat)));
end
A = K_uu + WKfu'*iLahatWKfu; A = (A+A')./2;
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
edata = edata - 2*sum(log(diag(Luu))) + 2*sum(log(diag(A)));
edata = logZ + 0.5*edata;
La2 = Labl;
% ============================================================
% CS+FIC
% ============================================================
case 'CS+FIC'
u = gp.X_u;
m = length(u);
cf_orig = gp.cf;
cf1 = {};
cf2 = {};
j = 1;
k = 1;
for i = 1:ncf
if ~isfield(gp.cf{i},'cs')
cf1{j} = gp.cf{i};
j = j + 1;
else
cf2{k} = gp.cf{i};
k = k + 1;
end
end
gp.cf = cf1;
% First evaluate needed covariance matrices
% v defines that parameter is a vector
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % f x 1 vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
[Luu, notpositivedefinite] = chol(K_uu, 'lower');
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
B=Luu\(K_fu'); % u x f
Qv_ff=sum(B.^2)';
Lav = Cv_ff-Qv_ff; % f x 1, Vector of diagonal elements
gp.cf = cf2;
K_cs = gp_trcov(gp,x);
La = sparse(1:n,1:n,Lav,n,n) + K_cs;
gp.cf = cf_orig;
% Find fill reducing permutation and permute all the
% matrices
p = analyze(La);
r(p) = 1:n;
if ~isempty(z)
z = z(p,:);
end
f = f(p);
y = y(p);
La = La(p,p);
K_fu = K_fu(p,:);
B = B(:,p);
[VD, notpositivedefinite] = ldlchol(La);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
iLaKfu = ldlsolve(VD,K_fu);
%iLaKfu = La\K_fu;
A = K_uu+K_fu'*iLaKfu; A = (A+A')./2; % Ensure symmetry
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
L = iLaKfu/A;
% Begin optimization
switch gp.latent_opt.optim_method
% --------------------------------------------------------------------------------
% find the posterior mode of latent variables by fminunc large scale method
case 'fminunc_large'
fhm = @(W, f, varargin) (ldlsolve(VD,f) - L*(L'*f) + repmat(W,1,size(f,2)).*f); % Hessian*f; % La\f
defopts=struct('GradObj','on','Hessian','on','HessMult', fhm,'TolX', 1e-8,'TolFun', 1e-8,'LargeScale', 'on','Display', 'off');
if ~isfield(gp.latent_opt, 'fminunc_opt')
opt = optimset(defopts);
else
opt = optimset(defopts,gp.latent_opt.fminunc_opt);
end
[f,fval,exitflag,output] = fminunc(@(ww) egh(ww), f', opt);
f = f';
a = ldlsolve(VD,f) - L*L'*f;
% --------------------------------------------------------------------------------
% find the posterior mode of latent variables by Newton method
case 'newton'
tol = 1e-8;
a = f;
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
lp_new = gp.lik.fh.ll(gp.lik, y, f, z);
lp_old = -Inf;
I = sparse(1:n,1:n,1,n,n);
iter = 0;
while lp_new - lp_old > tol && iter < maxiter
iter = iter + 1;
lp_old = lp_new; a_old = a;
sW = sqrt(W);
sqrtW = sparse(1:n,1:n,sW,n,n);
Lah = I + sqrtW*La*sqrtW;
[VDh, notpositivedefinite] = ldlchol(Lah);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
V = repmat(sW,1,m).*K_fu;
Vt = ldlsolve(VDh,V);
A = K_uu + V'*Vt; A = (A+A')./2;
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
Lb = Vt/A;
b = W.*f+dlp;
b2 = sW.*(La*b + B'*(B*b));
a = b - sW.*(ldlsolve(VDh,b2) - Lb*(Lb'*b2) );
f = La*a + B'*(B*a);
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
lp = gp.lik.fh.ll(gp.lik, y, f, z);
lp_new = -a'*f/2 + lp;
i = 0;
while i < 10 && lp_new < lp_old
a = (a_old+a)/2;
f = La*a + B'*(B*a);
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
lp = gp.lik.fh.ll(gp.lik, y, f, z);
lp_new = -a'*f/2 + lp;
i = i+1;
end
end
otherwise
error('gpla_e: Unknown optimization method ! ')
end
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
sqrtW = sqrt(W);
logZ = 0.5*f'*a - gp.lik.fh.ll(gp.lik, y, f, z);
WKfu = repmat(sqrtW,1,m).*K_fu;
sqrtW = sparse(1:n,1:n,sqrtW,n,n);
Lahat = sparse(1:n,1:n,1,n,n) + sqrtW*La*sqrtW;
[LDh, notpositivedefinite] = ldlchol(Lahat);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
A = K_uu + WKfu'*ldlsolve(LDh,WKfu); A = (A+A')./2;
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
edata = sum(log(diag(LDh))) - 2*sum(log(diag(Luu))) + 2*sum(log(diag(A)));
edata = logZ + 0.5*edata;
La2 = La;
% Reorder all the returned and stored values
a = a(r);
L = L(r,:);
La2 = La2(r,r);
y = y(r);
f = f(r);
W = W(r);
if ~isempty(z)
z = z(r,:);
end
% ============================================================
% DTC, SOR
% ============================================================
case {'DTC' 'VAR' 'SOR'}
u = gp.X_u;
m = length(u);
% First evaluate needed covariance matrices
% v defines that parameter is a vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
[Luu, notpositivedefinite] = chol(K_uu, 'lower');
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
B=Luu\(K_fu'); % u x f
% Qv_ff=sum(B.^2)';
% Lav = zeros(size(Qv_ff));
% Lav = Cv_ff-Qv_ff; % f x 1, Vector of diagonal elements
La2 = [];
switch gp.latent_opt.optim_method
% --------------------------------------------------------------------------------
% find the posterior mode of latent variables by fminunc large scale method
case 'fminunc_large'
% fhm = @(W, f, varargin) (f./repmat(Lav,1,size(f,2)) - L*(L'*f) + repmat(W,1,size(f,2)).*f); % hessian*f; %
% defopts=struct('GradObj','on','Hessian','on','HessMult', fhm,'TolX', 1e-8,'TolFun', 1e-8,'LargeScale', 'on','Display', 'off');
% if ~isfield(gp.latent_opt, 'fminunc_opt')
% opt = optimset(defopts);
% else
% opt = optimset(defopts,gp.latent_opt.fminunc_opt);
% end
%
% fe = @(f, varargin) (0.5*f*(f'./repmat(Lav,1,size(f',2)) - L*(L'*f')) - gp.lik.fh.ll(gp.lik, y, f', z));
% fg = @(f, varargin) (f'./repmat(Lav,1,size(f',2)) - L*(L'*f') - gp.lik.fh.llg(gp.lik, y, f', 'latent', z))';
% fh = @(f, varargin) (-gp.lik.fh.llg2(gp.lik, y, f', 'latent', z));
% mydeal = @(varargin)varargin{1:nargout};
% [f,fval,exitflag,output] = fminunc(@(ww) mydeal(fe(ww), fg(ww), fh(ww)), f', opt);
% f = f';
%
% a = f./Lav - L*L'*f;
% --------------------------------------------------------------------------------
% find the posterior mode of latent variables by Newton method
case 'newton'
tol = 1e-12;
a = f;
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
lp_new = gp.lik.fh.ll(gp.lik, y, f, z);
lp_old = -Inf;
iter = 0;
while lp_new - lp_old > tol && iter < maxiter
iter = iter + 1;
lp_old = lp_new; a_old = a;
sW = sqrt(W);
sWKfu = repmat(sW,1,m).*K_fu;
A = K_uu + sWKfu'*sWKfu; A = (A+A')./2;
[A, notpositivedefinite]=chol(A);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
Lb = sWKfu/A;
b = W.*f+dlp;
b2 = sW.*(B'*(B*b));
a = b - sW.*(b2 - Lb*(Lb'*b2));
f = B'*(B*a);
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
dlp = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
lp = gp.lik.fh.ll(gp.lik, y, f, z);
lp_new = -a'*f/2 + lp;
i = 0;
while i < 10 && lp_new < lp_old && ~isnan(sum(f))
% reduce step size by half
a = (a_old+a)/2;
f = B'*(B*a);
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
lp = gp.lik.fh.ll(gp.lik, y, f, z);
lp_new = -a'*f/2 + lp;
i = i+1;
end
end
% --------------------------------------------------------------------------------
% find the posterior mode of latent variables with likelihood specific algorithm
% For example, with Student-t likelihood this mean EM-algorithm which is coded in the
% lik_t file.
case 'lik_specific'
[f, a] = gp.lik.fh.optimizef(gp, y, K_uu, zeros(n,1), K_fu);
otherwise
error('gpla_e: Unknown optimization method ! ')
end
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
logZ = 0.5*f'*a - gp.lik.fh.ll(gp.lik, y, f, z);
if W >= 0
sqrtW = sqrt(W);
% L = chol(eye(n) + diag(sqrtW)*(B'*B)*diag(sqrtW), 'lower');
% edata = logZ + sum(log(diag(L)));
sWKfu = bsxfun(@times, sqrtW, K_fu);
A = K_uu + sWKfu'*sWKfu; A = (A+A')./2;
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
edata = -sum(log(diag(Luu))) + sum(log(diag(A)));
edata = logZ + edata;
if strcmp(gp.type,'VAR')
Kv_ff = gp_trvar(gp, x);
Qv_ff = sum(B.^2)';
edata = edata + 0.5*sum((Kv_ff-Qv_ff).*W);
La2=Kv_ff-Qv_ff;
end
else
% This is with full matrices. Needs to be rewritten.
% K = diag(Lav) + B'*B;
% % $$$ [W,I] = sort(W, 1, 'descend');
% % $$$ K = K(I,I);
% [W2,I] = sort(W, 1, 'descend');
%
% [L, notpositivedefinite] = chol(K);
% if notpositivedefinite
% [edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
% return
% end
% L1 = L;
% for jj=1:size(K,1)
% i = I(jj);
% ll = sum(L(:,i).^2);
% l = L'*L(:,i);
% upfact = W(i)./(1 + W(i).*ll);
%
% % Check that Cholesky factorization will remain positive definite
% if 1 + W(i).*ll <= 0 | upfact > 1./ll
% warning('gpla_e: 1 + W(i).*ll < 0')
%
% ind = 1:i-1;
% if isempty(z)
% mu = K(i,ind)*gp.lik.fh.llg(gp.lik, y(I(ind)), f(I(ind)), 'latent', z);
% else
% mu = K(i,ind)*gp.lik.fh.llg(gp.lik, y(I(ind)), f(I(ind)), 'latent', z(I(ind)));
% end
% upfact = gp.lik.fh.upfact(gp, y(I(i)), mu, ll);
%
% % $$$ W2 = -1./(ll+1e-3);
% % $$$ upfact = W2./(1 + W2.*ll);
% end
% if upfact > 0
% L = cholupdate(L, l.*sqrt(upfact), '-');
% else
% L = cholupdate(L, l.*sqrt(-upfact));
% end
% end
% edata = logZ + sum(log(diag(L1))) - sum(log(diag(L))); % sum(log(diag(chol(K)))) + sum(log(diag(chol((inv(K) + W)))));
end
L=A;
% ============================================================
% SSGP
% ============================================================
case 'SSGP' % Predictions with sparse spectral sampling approximation for GP
% The approximation is proposed by M. Lazaro-Gredilla, J. Quinonero-Candela and A. Figueiras-Vidal
% in Microsoft Research technical report MSR-TR-2007-152 (November 2007)
% NOTE! This does not work at the moment.
% First evaluate needed covariance matrices
% v defines that parameter is a vector
[Phi, S] = gp_trcov(gp, x); % n x m matrix and nxn sparse matrix
Sv = diag(S);
m = size(Phi,2);
A = eye(m,m) + Phi'*(S\Phi);
[A, notpositivedefinite] = chol(A, 'lower');
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
L = (S\Phi)/A';
switch gp.latent_opt.optim_method
% find the mode by fminunc large scale method
case 'fminunc_large'
fhm = @(W, f, varargin) (f./repmat(Sv,1,size(f,2)) - L*(L'*f) + repmat(W,1,size(f,2)).*f); % Hessian*f; %
defopts=struct('GradObj','on','Hessian','on','HessMult', fhm,'TolX', 1e-8,'TolFun', 1e-8,'LargeScale', 'on','Display', 'off');
if ~isfield(gp.latent_opt, 'fminunc_opt')
opt=optimset(defopts);
else
opt = optimset(defopts,gp.latent_opt.fminunc_opt);
end
fe = @(f, varargin) (0.5*f*(f'./repmat(Sv,1,size(f',2)) - L*(L'*f')) - gp.lik.fh.ll(gp.lik, y, f', z));
fg = @(f, varargin) (f'./repmat(Sv,1,size(f',2)) - L*(L'*f') - gp.lik.fh.llg(gp.lik, y, f', 'latent', z))';
fh = @(f, varargin) (-gp.lik.fh.llg2(gp.lik, y, f', 'latent', z));
mydeal = @(varargin)varargin{1:nargout};
[f,fval,exitflag,output] = fminunc(@(ww) mydeal(fe(ww), fg(ww), fh(ww)), f', opt);
f = f';
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
sqrtW = sqrt(W);
b = L'*f;
logZ = 0.5*(f'*(f./Sv) - b'*b) - gp.lik.fh.ll(gp.lik, y, f, z);
case 'Newton'
error('The Newton''s method is not implemented for FIC!\n')
end
WPhi = repmat(sqrtW,1,m).*Phi;
A = eye(m,m) + WPhi'./repmat((1+Sv.*W)',m,1)*WPhi; A = (A+A')./2;
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite();
return
end
edata = sum(log(1+Sv.*W)) + 2*sum(log(diag(A)));
edata = logZ + 0.5*edata;
La2 = Sv;
otherwise
error('Unknown type of Gaussian process!')
end
% ======================================================================
% Evaluate the prior contribution to the error from covariance functions
% ======================================================================
eprior = 0;
for i1=1:ncf
gpcf = gp.cf{i1};
eprior = eprior - gpcf.fh.lp(gpcf);
end
% ======================================================================
% Evaluate the prior contribution to the error from likelihood function
% ======================================================================
if isfield(gp, 'lik') && isfield(gp.lik, 'p')
lik = gp.lik;
eprior = eprior - lik.fh.lp(lik);
end
e = edata + eprior;
% store values to the cache
ch.w = w;
ch.e = e;
ch.edata = edata;
ch.eprior = eprior;
ch.f = f;
ch.L = L;
% ch.W = W;
ch.n = size(x,1);
ch.La2 = La2;
ch.a = a;
ch.p=p;
ch.datahash=datahash;
end
% assert(isreal(edata))
% assert(isreal(eprior))
%
% ==============================================================
% Begin of the nested functions
% ==============================================================
%
function [e, g, h] = egh(f, varargin)
ikf = iKf(f');
e = 0.5*f*ikf - gp.lik.fh.ll(gp.lik, y, f', z);
g = (ikf - gp.lik.fh.llg(gp.lik, y, f', 'latent', z))';
h = -gp.lik.fh.llg2(gp.lik, y, f', 'latent', z);
end
function ikf = iKf(f, varargin)
switch gp.type
case {'PIC' 'PIC_BLOCK'}
iLaf = zeros(size(f));
for i=1:length(ind)
iLaf(ind2depo{i},:) = LLabl{i}\(LLabl{i}'\f(ind{i},:));
end
ikf = iLaf - L*(L'*f);
case 'CS+FIC'
ikf = ldlsolve(VD,f) - L*(L'*f);
end
end
end
function [edata,e,eprior,f,L,a,La2,p,ch] = set_output_for_notpositivedefinite()
% Instead of stopping to chol error, return NaN
edata=NaN;
e=NaN;
eprior=NaN;
f=NaN;
L=NaN;
a=NaN;
La2=NaN;
p=NaN;
datahash = NaN;
w = NaN;
ch.e = e;
ch.edata = edata;
ch.eprior = eprior;
ch.f = f;
ch.L = L;
ch.La2 = La2;
ch.a = a;
ch.p=p;
ch.datahash=datahash;
ch.w = NaN;
end
end
|
github
|
lcnhappe/happe-master
|
lik_qgp.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_qgp.m
| 19,707 |
utf_8
|
85ad9907ffd15fdea5cb48eef340f8c1
|
function lik = lik_qgp(varargin)
%LIK_QGP Create a Quantile Gaussian Process likelihood (utility) structure
%
% Description
% LIK = LIK_QGP('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a quantile gp likelihood structure in which the named
% parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% LIK = LIK_QGP(LIK,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a likelihood function structure with the named
% parameters altered with the specified values.
%
% Parameters for QGP likelihood function [default]
% sigma2 - variance [0.1]
% sigma2_prior - prior for sigma2 [prior_logunif]
% quantile - Quantile of interest [0.5]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The likelihood is defined as follows:
% __ n
% p(y|f, sigma2, tau) = || i=1 tau*(1-tau)/sigma*exp(-(y-f)/sigma*
% (tau - I(t <= f)))
%
% where tau is the quantile of interest, sigma is the standard deviation
% of the distribution and I(t <= f) = 1 if t <= f, 0 otherwise.
%
% Note that because the form of the likelihood, second order derivatives
% with respect to latent values are 0. Because this, EP should be used
% instead of Laplace approximation.
%
% See also
% GP_SET, PRIOR_*, LIK_*
%
% References
% Boukouvalas et al. (2012). Direct Gaussian Process Quantile Regression
% Using Expectation Propagation. Appearing in Proceedings of the 29th
% International Conference on Machine Learning, Edinburg, Scotland, UK,
% 2012.
%
% Copyright (c) 2012 Ville Tolvanen
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_QGP';
ip.addOptional('lik', [], @isstruct);
ip.addParamValue('sigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('sigma2_prior',prior_logunif(), @(x) isstruct(x) || isempty(x));
ip.addParamValue('quantile',0.5, @(x) isscalar(x) && x>0 && x<1);
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'QGP';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'QGP')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('sigma2',ip.UsingDefaults)
lik.sigma2 = ip.Results.sigma2;
end
if init || ~ismember('quantile',ip.UsingDefaults)
lik.quantile = ip.Results.quantile;
end
% Initialize prior structure
if init
lik.p=[];
end
if init || ~ismember('sigma2_prior',ip.UsingDefaults)
lik.p.sigma2=ip.Results.sigma2_prior;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_qgp_pak;
lik.fh.unpak = @lik_qgp_unpak;
lik.fh.lp = @lik_qgp_lp;
lik.fh.lpg = @lik_qgp_lpg;
lik.fh.ll = @lik_qgp_ll;
lik.fh.llg = @lik_qgp_llg;
lik.fh.llg2 = @lik_qgp_llg2;
lik.fh.llg3 = @lik_qgp_llg3;
lik.fh.tiltedMoments = @lik_qgp_tiltedMoments;
lik.fh.siteDeriv = @lik_qgp_siteDeriv;
lik.fh.predy = @lik_qgp_predy;
lik.fh.invlink = @lik_qgp_invlink;
lik.fh.recappend = @lik_qgp_recappend;
end
end
function [w s] = lik_qgp_pak(lik)
%LIK_QGP_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_QGP_PAK(LIK) takes a likelihood structure LIK
% and combines the parameters into a single row vector W.
% This is a mandatory subfunction used for example in
% energy and gradient computations.
%
% w = [ log(lik.sigma2)
% (hyperparameters of lik.magnSigma2)]'
%
% See also
% LIK_QGP_UNPAK
w = []; s = {};
if ~isempty(lik.p.sigma2)
w = [w log(lik.sigma2)];
s = [s; 'log(qgp.sigma2)'];
% Hyperparameters of sigma2
[wh sh] = lik.p.sigma2.fh.pak(lik.p.sigma2);
w = [w wh];
s = [s; sh];
end
end
function [lik, w] = lik_qgp_unpak(lik, w)
%LIK_QGP_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_QGP_UNPAK(W, LIK) takes a likelihood structure
% LIK and extracts the parameters from the vector W to the LIK
% structure. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(lik.sigma2)
% (hyperparameters of lik.magnSigma2)]'
%
% See also
% LIK_QGP_PAK
if ~isempty(lik.p.sigma2)
lik.sigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of sigma2
[p, w] = lik.p.sigma2.fh.unpak(lik.p.sigma2, w);
lik.p.sigma2 = p;
end
end
function lp = lik_qgp_lp(lik)
%LIK_QGP_LP Evaluate the log prior of likelihood parameters
%
% Description
% LP = LIK_QGP_LP(LIK) takes a likelihood structure LIK and
% returns log(p(th)), where th collects the parameters. This
% subfunction is needed when there are likelihood parameters.
%
% See also
% LIK_QGP_PAK, LIK_QGP_UNPAK, LIK_QGP_G, GP_E
lp = 0;
if ~isempty(lik.p.sigma2)
likp=lik.p;
lp = likp.sigma2.fh.lp(lik.sigma2, likp.sigma2) + log(lik.sigma2);
end
end
function lpg = lik_qgp_lpg(lik)
%LIK_QGP_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = LIK_QGP_LPG(LIK) takes a QGP likelihood
% function structure LIK and returns LPG = d log (p(th))/dth,
% where th is the vector of parameters. This subfunction is
% needed when there are likelihood parameters.
%
% See also
% LIK_QGP_PAK, LIK_QGP_UNPAK, LIK_QGP_E, GP_G
lpg = [];
if ~isempty(lik.p.sigma2)
likp=lik.p;
lpgs = likp.sigma2.fh.lpg(lik.sigma2, likp.sigma2);
lpg = lpgs(1).*lik.sigma2 + 1;
if length(lpgs) > 1
lpg = [lpg lpgs(2:end)];
end
end
end
function ll = lik_qgp_ll(lik, y, f, z)
%LIK_QGP_LL Log likelihood
%
% Description
% LL = LIK_QGP_LL(LIK, Y, F, Z) takes a likelihood
% structure LIK, observations Y and latent values F.
% Returns the log likelihood, log p(y|f,z). This subfunction
% is needed when using Laplace approximation or MCMC for
% inference with non-Gaussian likelihoods. This subfunction
% is also used in information criteria (DIC, WAIC) computations.
%
% See also
% LIK_QGP_LLG, LIK_QGP_LLG3, LIK_QGP_LLG2, GPLA_E
tau=lik.quantile;
sigma=sqrt(lik.sigma2);
ll = sum(log(tau*(1-tau)/sigma) - (y-f)./sigma.*(tau-(y<=f)));
end
function llg = lik_qgp_llg(lik, y, f, param, z)
%LIK_QGP_LLG Gradient of the log likelihood
%
% Description
% LLG = LIK_QGP_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, observations Y and latent values F. Returns
% the gradient of the log likelihood with respect to PARAM.
% At the moment PARAM can be 'param' or 'latent'. This subfunction
% is needed when using Laplace approximation or MCMC for inference
% with non-Gaussian likelihoods.
%
% See also
% LIK_QGP_LL, LIK_QGP_LLG2, LIK_QGP_LLG3, GPLA_E
tau=lik.quantile;
sigma2=sqrt(lik.sigma2);
switch param
case 'param'
llg = sum(-1/(2.*sigma2) + (y-f)./(2.*sigma2^(3/2)).*(tau-(y<=f)));
% correction for the log transformation
llg = llg.*lik.sigma2;
case 'latent'
llg = (tau-(y<=f))/sqrt(sigma2);
end
end
function llg2 = lik_qgp_llg2(lik, y, f, param, z)
%LIK_QGP_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_QGP_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, observations Y and latent values F. Returns
% the Hessian of the log likelihood with respect to PARAM.
% At the moment PARAM can be 'param' or 'latent'. LLG2 is
% a vector with diagonal elements of the Hessian matrix
% (off diagonals are zero). This subfunction is needed
% when using Laplace approximation or EP for inference
% with non-Gaussian likelihoods.
%
% See also
% LIK_QGP_LL, LIK_QGP_LLG, LIK_QGP_LLG3, GPLA_E
tau=lik.quantile;
sigma2=lik.sigma2;
switch param
case 'param'
llg2 = sum(1/(2*sigma2^2) - 3.*(tau-(y<=f)).*(y-f)./(4.*sigma2^(5/2)));
% correction due to the log transformation
llg2 = llg2.*lik.sigma2;
case 'latent'
llg2 = zeros(size(f));
case 'latent+param'
llg2 = -(tau-(y<=f))./(2*sigma2^(3/2));
% correction due to the log transformation
llg2 = llg2.*lik.disper;
end
end
function llg3 = lik_qgp_llg3(lik, y, f, param, z)
%LIK_QGP_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_QGP_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, observations Y and latent values F and
% returns the third gradients of the log likelihood with
% respect to PARAM. At the moment PARAM can be 'param' or
% 'latent'. LLG3 is a vector with third gradients. This
% subfunction is needed when using Laplace approximation for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_QGP_LL, LIK_QGP_LLG, LIK_QGP_LLG2, GPLA_E, GPLA_G
tau=lik.quantile;
sigma2=lik.sigma2;
switch param
case 'param'
llg3 = sum(-1/sigma2^3 + 15.*(tau-(y<=f)).*(y-f)./(8.*sigma2^(7/2)));
case 'latent'
llg3 = 0;
case 'latent2+param'
llg3 = 0;
% correction due to the log transformation
llg3 = llg3.*lik.sigma2;
end
end
function [logM_0, m_1, sigm2hati1] = lik_qgp_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_QGP_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_QGP_TILTEDMOMENTS(LIK, Y, I, S2,
% MYY, Z) takes a likelihood structure LIK, observations
% Y, index I and cavity variance S2 and mean MYY. Returns
% the zeroth moment M_0, mean M_1 and variance M_2 of the
% posterior marginal (see Rasmussen and Williams (2006):
% Gaussian processes for Machine Learning, page 55). This
% subfunction is needed when using EP for inference with
% non-Gaussian likelihoods.
%
% See also
% GPEP_E
yy = y(i1);
sigma2 = lik.sigma2;
tau=lik.quantile;
logM_0=zeros(size(yy));
m_1=zeros(size(yy));
sigm2hati1=zeros(size(yy));
for i=1:length(i1)
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Quantile-GP * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_qgp_norm(yy(i),myy_i(i),sigm2_i(i),sigma2,tau);
% Integrate with quadrature
RTOL = 1.e-6;
ATOL = 1.e-10;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
% If the second central moment is less than cavity variance
% integrate more precisely. Theoretically for log-concave
% likelihood should be sigm2hati1 < sigm2_i.
if sigm2hati1(i) >= sigm2_i(i)
ATOL = ATOL.^2;
RTOL = RTOL.^2;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
if sigm2hati1(i) >= sigm2_i(i)
error('lik_qgp_tilted_moments: sigm2hati1 >= sigm2_i');
end
end
logM_0(i) = log(m_0);
end
end
function [g_i] = lik_qgp_siteDeriv(lik, y, i1, sigm2_i, myy_i, z)
%LIK_QGP_SITEDERIV Evaluate the expectation of the gradient
% of the log likelihood term with respect
% to the likelihood parameters for EP
%
% Description [M_0, M_1, M2] =
% LIK_QGP_SITEDERIV(LIK, Y, I, S2, MYY, Z) takes a
% likelihood structure LIK, observations Y, index I
% and cavity variance S2 and mean MYY. Returns E_f
% [d log p(y_i|f_i) /d a], where a is the likelihood
% parameter and the expectation is over the marginal posterior.
% This term is needed when evaluating the gradients of
% the marginal likelihood estimate Z_EP with respect to
% the likelihood parameters (see Seeger (2008):
% Expectation propagation for exponential families).This
% subfunction is needed when using EP for inference with
% non-Gaussian likelihoods and there are likelihood parameters.
%
% See also
% GPEP_G
yy = y(i1);
sigma2=lik.sigma2;
tau=lik.quantile;
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Quantile-GP * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_qgp_norm(yy,myy_i,sigm2_i,sigma2,tau);
% additionally get function handle for the derivative
td = @deriv;
% Integrate with quadgk
[m_0, fhncnt] = quadgk(tf, minf, maxf);
[g_i, fhncnt] = quadgk(@(f) td(f).*tf(f)./m_0, minf, maxf);
g_i = g_i.*sigma2;
function g = deriv(f)
g = -1/(2.*sigma2) + (yy-f)./(2.*sigma2^(3/2)).*(tau-(yy<=f));
end
end
function [lpy, Ey, Vary] = lik_qgp_predy(lik, Ef, Varf, yt, zt)
%LIK_QGP_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_QGP_PREDY(LIK, EF, VARF YT, ZT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | zt) = \int p(yt | f, zt) p(f|y) df.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [LPY, EY, VARY] = LIK_QGP_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This
% subfunction is needed when computing posterior predictive
% distributions for future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
sigma2=lik.sigma2;
tau=lik.quantile;
Ey=[];
Vary=[];
% Evaluate the posterior predictive densities of the given observations
lpy = zeros(length(yt),1);
for i1=1:length(yt)
% get a function handle of the likelihood times posterior
% (likelihood * posterior = Quantile-GP * Gaussian)
% and useful integration limits
[pdf,minf,maxf]=init_qgp_norm(...
yt(i1),Ef(i1),Varf(i1),sigma2, tau);
% integrate over the f to get posterior predictive distribution
lpy(i1) = log(quadgk(pdf, minf, maxf));
end
end
function [df,minf,maxf] = init_qgp_norm(yy,myy_i,sigm2_i,sigma2,tau)
%INIT_QGP_NORM
%
% Description
% Return function handle to a function evaluating
% Quantile-GP * Gaussian which is used for evaluating
% (likelihood * cavity) or (likelihood * posterior) Return
% also useful limits for integration. This is private function
% for lik_qgp. This subfunction is needed by subfunctions
% tiltedMoments, siteDeriv and predy.
%
% See also
% LIK_QGP_TILTEDMOMENTS, LIK_QGP_SITEDERIV,
% LIK_QGP_PREDY
sigma=sqrt(sigma2);
% avoid repetitive evaluation of constant part
ldconst = log(tau*(1-tau)/sigma) ...
- log(sigm2_i)/2 - log(2*pi)/2;
% Create function handle for the function to be integrated
df = @qgp_norm;
% use log to avoid underflow, and derivates for faster search
ld = @log_qgp_norm;
ldg = @log_qgp_norm_g;
% ldg2 = @log_qgp_norm_g2;
% Set the limits for integration
% Quantile-GP likelihood is log-concave so the qgp_norm
% function is unimodal, which makes things easier
if yy==0
% with yy==0, the mode of the likelihood is not defined
% use the mode of the Gaussian (cavity or posterior) as a first guess
modef = myy_i;
else
% use precision weighted mean of the Gaussian approximation
% of the Quantile-GP likelihood and Gaussian
modef = (myy_i/sigm2_i + yy/sigma2)/(1/sigm2_i + 1/sigma2);
end
% find the mode of the integrand using Newton iterations
% few iterations is enough, since the first guess in the right direction
niter=8; % number of Newton iterations
minf=modef-6*sigm2_i;
while ldg(minf) < 0
minf=minf-2*sigm2_i;
end
maxf=modef+6*sigm2_i;
while ldg(maxf) > 0
maxf=maxf+2*sigm2_i;
end
for ni=1:niter
% h=ldg2(modef);
modef=0.5*(minf+maxf);
if ldg(modef) < 0
maxf=modef;
else
minf=modef;
end
end
% integrand limits based on Gaussian approximation at mode
minf=modef-6*sqrt(sigm2_i);
maxf=modef+6*sqrt(sigm2_i);
modeld=ld(modef);
iter=0;
% check that density at end points is low enough
lddiff=20; % min difference in log-density between mode and end-points
minld=ld(minf);
step=1;
while minld>(modeld-lddiff)
minf=minf-step*sqrt(sigm2_i);
minld=ld(minf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_qgp -> init_qgp_norm: ' ...
'integration interval minimun not found ' ...
'even after looking hard!'])
end
end
maxld=ld(maxf);
step=1;
while maxld>(modeld-lddiff)
maxf=maxf+step*sqrt(sigm2_i);
maxld=ld(maxf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_qgp -> init_qgp_norm: ' ...
'integration interval maximun not found ' ...
'even after looking hard!'])
end
end
function integrand = qgp_norm(f)
% Quantile-GP * Gaussian
integrand = exp(ldconst ...
-(yy-f)./sqrt(sigma2).*(tau-(yy<=f)) ...
-0.5*(f-myy_i).^2./sigm2_i);
end
function log_int = log_qgp_norm(f)
% log(Quantile-GP * Gaussian)
% log_qgp_norm is used to avoid underflow when searching
% integration interval
log_int = ldconst...
-(yy-f)./sqrt(sigma2).*(tau-(yy<=f)) ...
-0.5*(f-myy_i).^2./sigm2_i;
end
function g = log_qgp_norm_g(f)
% d/df log(Quantile-GP * Gaussian)
% derivative of log_qgp_norm
g = (tau-(yy<=f))/sqrt(sigma2) ...
+ (myy_i - f)./sigm2_i;
end
end
function mu = lik_qgp_invlink(lik, f, z)
%LIK_QGP_INVLINK Returns values of inverse link function
%
% Description
% MU = LIK_QGP_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values MU of inverse link function.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_QGP_LL, LIK_QGP_PREDY
mu = f;
end
function reclik = lik_qgp_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = LIK_QGP_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
% Initialize the record
reclik.type = 'Quantile-GP';
% Initialize parameter
reclik.sigma2 = [];
% Set the function handles
reclik.fh.pak = @lik_qgp_pak;
reclik.fh.unpak = @lik_qgp_unpak;
reclik.fh.lp = @lik_qgp_lp;
reclik.fh.lpg = @lik_qgp_lpg;
reclik.fh.ll = @lik_qgp_ll;
reclik.fh.llg = @lik_qgp_llg;
reclik.fh.llg2 = @lik_qgp_llg2;
reclik.fh.llg3 = @lik_qgp_llg3;
reclik.fh.tiltedMoments = @lik_qgp_tiltedMoments;
reclik.fh.predy = @lik_qgp_predy;
reclik.fh.invlink = @lik_qgp_invlink;
reclik.fh.recappend = @lik_qgp_recappend;
reclik.p=[];
reclik.p.sigma2=[];
if ~isempty(ri.p.sigma2)
reclik.p.sigma2 = ri.p.sigma2;
end
else
% Append to the record
reclik.sigma2(ri,:)=lik.sigma2;
if ~isempty(lik.p)
reclik.p.sigma2 = lik.p.sigma2.fh.recappend(reclik.p.sigma2, ri, lik.p.sigma2);
end
end
end
|
github
|
lcnhappe/happe-master
|
gpcf_periodic.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_periodic.m
| 34,635 |
utf_8
|
a5da1d3e5513f698a47e34234bbd27e4
|
function gpcf = gpcf_periodic(varargin)
%GPCF_PERIODIC Create a periodic covariance function for Gaussian Process
%
% Description
% GPCF = GPCF_PERIODIC('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates periodic covariance function structure in which the
% named parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% GPCF = GPCF_PERIODIC(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Periodic covariance function with squared exponential decay
% part as in Rasmussen & Williams (2006) Gaussian processes for
% Machine Learning.
%
% Parameters for periodic covariance function [default]
% magnSigma2 - magnitude (squared) [0.1]
% lengthScale - length scale for each input [10]
% This can be either scalar
% corresponding isotropic or vector
% corresponding ARD
% period - length of the periodic component(s) [1]
% lengthScale_sexp - length scale for the squared exponential
% component [10] This can be either scalar
% corresponding isotropic or vector
% corresponding ARD.
% decay - determines whether the squared exponential
% decay term is used (1) or not (0).
% Not a hyperparameter for the function.
% magnSigma2_prior - prior structure for magnSigma2 [prior_logunif]
% lengthScale_prior - prior structure for lengthScale [prior_t]
% lengthScale_sexp_prior - prior structure for lengthScale_sexp
% [prior_fixed]
% period_prior - prior structure for period [prior_fixed]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% See also
% GP_SET, GPCF_*, PRIOR_*
% Copyright (c) 2009-2010 Heikki Peura
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPCF_PERIODIC';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('magnSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('lengthScale',10, @(x) isvector(x) && all(x>0));
ip.addParamValue('period',1, @(x) isscalar(x) && x>0);
ip.addParamValue('lengthScale_sexp',10, @(x) isvector(x) && all(x>0));
ip.addParamValue('decay',0, @(x) isscalar(x) && (x==0||x==1));
ip.addParamValue('magnSigma2_prior',prior_logunif, @(x) isstruct(x) || isempty(x));
ip.addParamValue('lengthScale_prior',prior_t, @(x) isstruct(x) || isempty(x));
ip.addParamValue('lengthScale_sexp_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('period_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isempty(x) || ...
(isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_periodic';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_periodic')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init || ~ismember('magnSigma2',ip.UsingDefaults)
gpcf.magnSigma2 = ip.Results.magnSigma2;
end
if init || ~ismember('lengthScale',ip.UsingDefaults)
gpcf.lengthScale = ip.Results.lengthScale;
end
if init || ~ismember('period',ip.UsingDefaults)
gpcf.period = ip.Results.period;
end
if init || ~ismember('lengthScale_sexp',ip.UsingDefaults)
gpcf.lengthScale_sexp=ip . Results.lengthScale_sexp;
end
if init || ~ismember('decay',ip.UsingDefaults)
gpcf.decay = ip.Results.decay;
end
if init || ~ismember('magnSigma2_prior',ip.UsingDefaults)
gpcf.p.magnSigma2 = ip.Results.magnSigma2_prior;
end
if init || ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.p.lengthScale = ip.Results.lengthScale_prior;
end
if init || ~ismember('lengthScale_sexp_prior',ip.UsingDefaults)
gpcf.p.lengthScale_sexp = ip.Results.lengthScale_sexp_prior;
end
if init || ~ismember('period_prior',ip.UsingDefaults)
gpcf.p.period = ip.Results.period_prior;
end
if ~ismember('selectedVariables',ip.UsingDefaults)
gpcf.selectedVariables = ip.Results.selectedVariables;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_periodic_pak;
gpcf.fh.unpak = @gpcf_periodic_unpak;
gpcf.fh.lp = @gpcf_periodic_lp;
gpcf.fh.lpg = @gpcf_periodic_lpg;
gpcf.fh.cfg = @gpcf_periodic_cfg;
gpcf.fh.ginput = @gpcf_periodic_ginput;
gpcf.fh.cov = @gpcf_periodic_cov;
gpcf.fh.covvec = @gpcf_periodic_covvec;
gpcf.fh.trcov = @gpcf_periodic_trcov;
gpcf.fh.trvar = @gpcf_periodic_trvar;
gpcf.fh.recappend = @gpcf_periodic_recappend;
end
end
function [w, s] = gpcf_periodic_pak(gpcf)
%GPCF_PERIODIC_PAK Combine GP covariance function parameters into
% one vector
%
% Description
% W = GPCF_PERIODIC_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)
% log(gpcf.lengthScale_sexp)
% (hyperparameters of gpcf.lengthScale_sexp)
% log(gpcf.period)
% (hyperparameters of gpcf.period)]'
%
% See also
% GPCF_PERIODIC_UNPAK
if isfield(gpcf,'metric')
error('Periodic covariance function not compatible with metrics.');
else
i1=0;i2=1;
w = []; s = {};
if ~isempty(gpcf.p.magnSigma2)
w = [w log(gpcf.magnSigma2)];
s = [s; 'log(periodic.magnSigma2)'];
% Hyperparameters of magnSigma2
[wh sh] = gpcf.p.magnSigma2.fh.pak(gpcf.p.magnSigma2);
w = [w wh];
s = [s; sh];
end
if ~isempty(gpcf.p.lengthScale)
w = [w log(gpcf.lengthScale)];
s = [s; 'log(periodic.lengthScale)'];
% Hyperparameters of lengthScale
[wh sh] = gpcf.p.lengthScale.fh.pak(gpcf.p.lengthScale);
w = [w wh];
s = [s; sh];
end
if ~isempty(gpcf.p.lengthScale_sexp) && gpcf.decay == 1
w = [w log(gpcf.lengthScale_sexp)];
s = [s; 'log(periodic.lengthScale_sexp)'];
% Hyperparameters of lengthScale_sexp
[wh sh] = gpcf.p.lengthScale_sexp.fh.pak(gpcf.p.lengthScale_sexp);
w = [w wh];
s = [s; sh];
end
if ~isempty(gpcf.p.period)
w = [w log(gpcf.period)];
s = [s; 'log(periodic.period)'];
% Hyperparameters of period
[wh sh] = gpcf.p.period.fh.pak(gpcf.p.period);
w = [w wh];
s = [s; sh];
end
end
end
function [gpcf, w] = gpcf_periodic_unpak(gpcf, w)
%GPCF_PERIODIC_UNPAK Sets the covariance function parameters into
% the structure
%
% Description
% [GPCF, W] = GPCF_PERIODIC_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W, and
% returns a covariance function structure identical to the
% input, except that the covariance hyper-parameters have been
% set to the values in W. Deletes the values set to GPCF from
% W and returns the modified W. This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)
% log(gpcf.lengthScale_sexp)
% (hyperparameters of gpcf.lengthScale_sexp)
% log(gpcf.period)
% (hyperparameters of gpcf.period)]'
%
% See also
% GPCF_PERIODIC_PAK
if isfield(gpcf,'metric')
error('Covariance function not compatible with metrics');
else
gpp=gpcf.p;
if ~isempty(gpp.magnSigma2)
i1=1;
gpcf.magnSigma2 = exp(w(i1));
w = w(i1+1:end);
end
if ~isempty(gpp.lengthScale)
i2=length(gpcf.lengthScale);
i1=1;
gpcf.lengthScale = exp(w(i1:i2));
w = w(i2+1:end);
end
if ~isempty(gpp.lengthScale_sexp) && gpcf.decay == 1
i2=length(gpcf.lengthScale_sexp);
i1=1;
gpcf.lengthScale_sexp = exp(w(i1:i2));
w = w(i2+1:end);
end
if ~isempty(gpp.period)
i2=length(gpcf.period);
i1=1;
gpcf.period = exp(w(i1:i2));
w = w(i2+1:end);
end
% hyperparameters
if ~isempty(gpp.magnSigma2)
[p, w] = gpcf.p.magnSigma2.fh.unpak(gpcf.p.magnSigma2, w);
gpcf.p.magnSigma2 = p;
end
if ~isempty(gpp.lengthScale)
[p, w] = gpcf.p.lengthScale.fh.unpak(gpcf.p.lengthScale, w);
gpcf.p.lengthScale = p;
end
if ~isempty(gpp.lengthScale_sexp)
[p, w] = gpcf.p.lengthScale_sexp.fh.unpak(gpcf.p.lengthScale_sexp, w);
gpcf.p.lengthScale_sexp = p;
end
if ~isempty(gpp.period)
[p, w] = gpcf.p.period.fh.unpak(gpcf.p.period, w);
gpcf.p.period = p;
end
end
end
function lp = gpcf_periodic_lp(gpcf)
%GPCF_PERIODIC_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_PERIODIC_LP(GPCF) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% Also the log prior of the hyperparameters of the covariance
% function parameters is added to E if hyperprior is
% defined.
%
% See also
% GPCF_PERIODIC_PAK, GPCF_PERIODIC_UNPAK, GPCF_PERIODIC_LPG, GP_E
lp = 0;
gpp=gpcf.p;
if isfield(gpcf,'metric')
error('Covariance function not compatible with metrics');
else
% Evaluate the prior contribution to the error. The parameters that
% are sampled are from space W = log(w) where w is all the "real" samples.
% On the other hand errors are evaluated in the W-space so we need take
% into account also the Jacobian of transformation W -> w = exp(W).
% See Gelman et.al., 2004, Bayesian data Analysis, second edition, p24.
if ~isempty(gpcf.p.magnSigma2)
lp = gpp.magnSigma2.fh.lp(gpcf.magnSigma2, gpp.magnSigma2) +log(gpcf.magnSigma2);
end
if ~isempty(gpp.lengthScale)
lp = lp +gpp.lengthScale.fh.lp(gpcf.lengthScale, gpp.lengthScale) +sum(log(gpcf.lengthScale));
end
if ~isempty(gpp.lengthScale_sexp) && gpcf.decay == 1
lp = lp +gpp.lengthScale_sexp.fh.lp(gpcf.lengthScale_sexp, gpp.lengthScale_sexp) +sum(log(gpcf.lengthScale_sexp));
end
if ~isempty(gpcf.p.period)
lp = gpp.period.fh.lp(gpcf.period, gpp.period) +sum(log(gpcf.period));
end
end
end
function lpg = gpcf_periodic_lpg(gpcf)
%GPCF_PERIODIC_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_PERIODIC_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_PERIODIC_PAK, GPCF_PERIODIC_UNPAK, GPCF_PERIODIC_LP, GP_G
lpg = [];
gpp=gpcf.p;
if isfield(gpcf,'metric')
error('Covariance function not compatible with metrics');
end
if ~isempty(gpcf.p.magnSigma2)
lpgs = gpp.magnSigma2.fh.lpg(gpcf.magnSigma2, gpp.magnSigma2);
lpg = [lpg lpgs(1).*gpcf.magnSigma2+1 lpgs(2:end)];
end
if ~isempty(gpcf.p.lengthScale)
lll = length(gpcf.lengthScale);
lpgs = gpp.lengthScale.fh.lpg(gpcf.lengthScale, gpp.lengthScale);
lpg = [lpg lpgs(1:lll).*gpcf.lengthScale+1 lpgs(lll+1:end)];
end
if gpcf.decay == 1 && ~isempty(gpcf.p.lengthScale_sexp)
lll = length(gpcf.lengthScale_sexp);
lpgs = gpp.lengthScale_sexp.fh.lpg(gpcf.lengthScale_sexp, gpp.lengthScale_sexp);
lpg = [lpg lpgs(1:lll).*gpcf.lengthScale_sexp+1 lpgs(lll+1:end)];
end
if ~isempty(gpcf.p.period)
lpgs = gpp.period.fh.lpg(gpcf.period, gpp.period);
lpg = [lpg lpgs(1).*gpcf.period+1 lpgs(2:end)];
end
end
function DKff = gpcf_periodic_cfg(gpcf, x, x2, mask, i1)
%GPCF_PERIODIC_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_PERIODIC_CFG(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to th (cell array with matrix elements).
% This is a mandatory subfunction used in gradient computations.
%
% DKff = GPCF_PERIODIC_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_PERIODIC_CFG(GPCF, X, [], MASK) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the diagonal of gradients of
% covariance matrix Kff = k(X,X2) with respect to th (cell
% array with matrix elements). This subfunction is needed
% when using sparse approximations (e.g. FIC).
%
% DKff = GPCF_PERIODIC_CFG(GPCF, X, X2, [], i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using memory
% save option in gp_set.
%
% See also
% GPCF_PERIODIC_PAK, GPCF_PERIODIC_UNPAK, GPCF_PERIODIC_LP, GP_G
gpp=gpcf.p;
i2=1;
gp_period=gpcf.period;
DKff={};
gprior=[];
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
i=0;
if ~isempty(gpcf.p.magnSigma2)
i=1;
end
if ~isempty(gpcf.p.lengthScale)
i=i+length(gpcf.lengthScale);
end
if gpcf.decay==1 && ~isempty(gpcf.p.lengthScale_sexp)
i=i+length(gpcf.lengthScale_sexp);
end
if ~isempty(gpcf.p.period)
i=i+length(gpcf.lengthScale);
end
DKff=i;
return
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters are transformed
% through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
Cdm = gpcf_periodic_trcov(gpcf, x);
ii1=0;
if ~isempty(gpcf.p.magnSigma2) && (~savememory || all(i1==1));
ii1=1;
DKff{ii1} = Cdm;
end
if isfield(gpcf,'metric')
error('Covariance function not compatible with metrics');
else
if isfield(gpcf,'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
else
i1=i1-1;
end
end
if ~isempty(gpcf.p.lengthScale) && (~savememory || all(i1 <= length(gpcf.lengthScale)))
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic PERIODIC
s = 2./gpcf.lengthScale.^2;
dist = 0;
for i=1:m
D = sin(pi.*bsxfun(@minus,x(:,i),x(:,i)')./gp_period);
dist = dist + 2.*D.^2;
end
D = Cdm.*s.*dist;
ii1 = ii1+1;
DKff{ii1} = D;
else
% In the case ARD is used
for i=i1
s = 2./gpcf.lengthScale(i).^2;
dist = sin(pi.*bsxfun(@minus,x(:,i),x(:,i)')./gp_period);
D = Cdm.*s.*2.*dist.^2;
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
if savememory
if length(DKff) == 1
DKff=DKff{1};
return
end
i1=i1-length(gpcf.lengthScale);
end
if gpcf.decay == 1
if ~isempty(gpcf.p.lengthScale_sexp) && (~savememory || all(i1 <= length(gpcf.lengthScale_sexp)))
if length(gpcf.lengthScale_sexp) == 1
% In the case of isotropic PERIODIC
s = 1./gpcf.lengthScale_sexp.^2;
dist = 0;
for i=1:m
D = bsxfun(@minus,x(:,i),x(:,i)');
dist = dist + D.^2;
end
D = Cdm.*s.*dist;
ii1 = ii1+1;
DKff{ii1} = D;
else
% In the case ARD is used
for i=i1
s = 1./gpcf.lengthScale_sexp(i).^2;
dist = bsxfun(@minus,x(:,i),x(:,i)');
D = Cdm.*s.*dist.^2;
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
if savememory
if length(DKff) == 1
DKff=DKff{1};
return
end
i1=i1-length(gpcf.lengthScale_sexp);
end
if ~isempty(gpcf.p.period)
% Evaluate help matrix for calculations of derivatives
% with respect to the period
if length(gpcf.lengthScale) == 1
% In the case of an isotropic PERIODIC
s = repmat(1./gpcf.lengthScale.^2, 1, m);
dist = 0;
for i=1:m
dist = dist + 2.*pi./gp_period.*sin(2.*pi.*bsxfun(@minus,x(:,i),x(:,i)')./gp_period).*bsxfun(@minus,x(:,i),x(:,i)').*s(i);
end
D = Cdm.*dist;
ii1=ii1+1;
DKff{ii1} = D;
else
% In the case ARD is used
for i=i1
s = 1./gpcf.lengthScale(i).^2; % set the length
dist = 2.*pi./gp_period.*sin(2.*pi.*bsxfun(@minus,x(:,i),x(:,i)')./gp_period).*bsxfun(@minus,x(:,i),x(:,i)');
D = Cdm.*s.*dist;
ii1=ii1+1;
DKff{ii1} = D;
end
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_periodic -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
K = gpcf.fh.cov(gpcf, x, x2);
ii1=0;
if ~isempty(gpcf.p.magnSigma2) && (~savememory || all(i1==1))
ii1=1;
DKff{ii1} = K;
end
if isfield(gpcf,'metric')
error('Covariance function not compatible with metrics');
else
if isfield(gpcf,'selectedVariables')
x = x(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
end
% Evaluate help matrix for calculations of derivatives with respect to the lengthScale
if ~isempty(gpcf.p.lengthScale) && (~savememory || all(i1 <= length(gpcf.lengthScale)))
if length(gpcf.lengthScale) == 1
% In the case of an isotropic PERIODIC
s = 1./gpcf.lengthScale.^2;
dist = 0; dist2 = 0;
for i=1:m
dist = dist + 2.*sin(pi.*bsxfun(@minus,x(:,i),x2(:,i)')./gp_period).^2;
end
DK_l = 2.*s.*K.*dist;
ii1=ii1+1;
DKff{ii1} = DK_l;
else
% In the case ARD is used
for i=i1
s = 1./gpcf.lengthScale(i).^2; % set the length
dist = 2.*sin(pi.*bsxfun(@minus,x(:,i),x2(:,i)')./gp_period);
DK_l = 2.*s.*K.*dist.^2;
ii1=ii1+1;
DKff{ii1} = DK_l;
end
end
end
if savememory
if length(DKff) == 1
DKff=DKff{1};
return
end
i1=i1-length(gpcf.lengthScale);
end
if gpcf.decay == 1 && (~savememory || all(i1 <= length(gpcf.lengthScale_sexp)))
% Evaluate help matrix for calculations of derivatives with
% respect to the lengthScale_sexp
if length(gpcf.lengthScale_sexp) == 1
% In the case of an isotropic PERIODIC
s = 1./gpcf.lengthScale_sexp.^2;
dist = 0; dist2 = 0;
for i=1:m
dist = dist + bsxfun(@minus,x(:,i),x2(:,i)').^2;
end
DK_l = s.*K.*dist;
ii1=ii1+1;
DKff{ii1} = DK_l;
else
% In the case ARD is used
for i=i1
s = 1./gpcf.lengthScale_sexp(i).^2; % set the length
dist = bsxfun(@minus,x(:,i),x2(:,i)');
DK_l = s.*K.*dist.^2;
ii1=ii1+1;
DKff{ii1} = DK_l;
end
end
end
if savememory
if length(DKff) == 1
DKff=DKff{1};
return
end
i1=i1-length(gpcf.lengthScale_sexp);
end
if ~isempty(gpcf.p.period)
% Evaluate help matrix for calculations of derivatives
% with respect to the period
if length(gpcf.lengthScale) == 1
% In the case of an isotropic PERIODIC
s = repmat(1./gpcf.lengthScale.^2, 1, m);
dist = 0; dist2 = 0;
for i=1:m
dist = dist + 2.*pi./gp_period.*sin(2.*pi.*bsxfun(@minus,x(:,i),x2(:,i)')./gp_period).*bsxfun(@minus,x(:,i),x2(:,i)').*s(i);
end
DK_l = K.*dist;
ii1=ii1+1;
DKff{ii1} = DK_l;
else
% In the case ARD is used
for i=i1
s = 1./gpcf.lengthScale(i).^2; % set the length
dist = 2.*pi./gp_period.*sin(2.*pi.*bsxfun(@minus,x(:,i),x2(:,i)')./gp_period).*bsxfun(@minus,x(:,i),x2(:,i)');
DK_l = s.*K.*dist;
ii1=ii1+1;
DKff{ii1} = DK_l;
end
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale etc.
elseif nargin == 4 || nargin == 5
[n, m] =size(x);
if isfield(gpcf,'metric')
error('Covariance function not compatible with metrics');
else
ii1=0;
if ~isempty(gpcf.p.magnSigma2) && (~savememory || all(i1==1))
ii1=1;
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
end
for i2=1:length(gpcf.lengthScale)
ii1 = ii1+1;
DKff{ii1} = 0; % d mask(Kff,I) / d lengthScale
end
if gpcf.decay == 1
for i2=1:length(gpcf.lengthScale_sexp)
ii1 = ii1+1;
DKff{ii1} = 0; % d mask(Kff,I) / d lengthScale_sexp
end
end
if ~isempty(gpcf.p.period)
ii1 = ii1+1; % d mask(Kff,I) / d period
DKff{ii1} = 0;
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_periodic_ginput(gpcf, x, x2, i1)
%GPCF_PERIODIC_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_PERIODIC_GINPUT(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_PERIODIC_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_PERIODIC_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X. This subfunction is needed when using memory
% save option in gp_set.
%
% See also
% GPCF_PERIODIC_PAK, GPCF_PERIODIC_UNPAK, GPCF_PERIODIC_LP, GP_G
[n, m] =size(x);
gp_period=gpcf.period;
ii1 = 0;
if length(gpcf.lengthScale) == 1
% In the case of an isotropic PERIODIC
s = repmat(1./gpcf.lengthScale.^2, 1, m);
%gp_period = repmat(1./gp_period, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
if gpcf.decay == 1
if length(gpcf.lengthScale_sexp) == 1
% In the case of an isotropic PERIODIC
s_sexp = repmat(1./gpcf.lengthScale_sexp.^2, 1, m);
else
s_sexp = 1./gpcf.lengthScale_sexp.^2;
end
end
if nargin<4
i1=1:m;
else
% Use memory save option
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
end
if nargin == 2 || isempty(x2)
K = gpcf.fh.trcov(gpcf, x);
if isfield(gpcf,'metric')
error('Covariance function not compatible with metrics');
else
for i=i1
for j = 1:n
DK = zeros(size(K));
DK(j,:) = -s(i).*2.*pi./gp_period.*sin(2.*pi.*bsxfun(@minus,x(j,i),x(:,i)')./gp_period);
if gpcf.decay == 1
DK(j,:) = DK(j,:)-s_sexp(i).*bsxfun(@minus,x(j,i),x(:,i)');
end
DK = DK + DK';
DK = DK.*K; % dist2 = dist2 + dist2' - diag(diag(dist2));
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
end
elseif nargin == 3
K = gpcf.fh.cov(gpcf, x, x2);
if isfield(gpcf,'metric')
error('Covariance function not compatible with metrics');
else
ii1 = 0;
for i=i1
for j = 1:n
DK= zeros(size(K));
if gpcf.decay == 1
DK(j,:) = -s(i).*2.*pi./gp_period.*sin(2.*pi.*bsxfun(@minus,x(j,i),x2(:,i)')./gp_period)-s_sexp(i).*bsxfun(@minus,x(j,i),x2(:,i)');
else
DK(j,:) = -s(i).*2.*pi./gp_period.*sin(2.*pi.*bsxfun(@minus,x(j,i),x2(:,i)')./gp_period);
end
DK = DK.*K;
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
end
end
end
function C = gpcf_periodic_cov(gpcf, x1, x2)
%GP_PERIODIC_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_PERIODIC_COV(GP, TX, X) takes in covariance function
% of a Gaussian process GP and two matrixes TX and X that
% contain input vectors to GP. Returns covariance matrix C.
% Every element ij of C contains covariance between inputs i
% in TX and j in X. This is a mandatory subfunction used for
% example in prediction and energy computations.
%
% See also
% GPCF_PERIODIC_TRCOV, GPCF_PERIODIC_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
% [n1,m1]=size(x1);
% [n2,m2]=size(x2);
gp_period=gpcf.period;
if size(x1,2)~=size(x2,2)
error('the number of columns of X1 and X2 has to be same')
end
if isfield(gpcf,'metric')
error('Covariance function not compatible with metrics');
else
if isfield(gpcf,'selectedVariables')
x1 = x1(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
C=zeros(n1,n2);
ma2 = gpcf.magnSigma2;
% Evaluate the covariance
if ~isempty(gpcf.lengthScale)
s = 1./gpcf.lengthScale.^2;
if gpcf.decay == 1
s_sexp = 1./gpcf.lengthScale_sexp.^2;
end
if m1==1 && m2==1
dd = bsxfun(@minus,x1,x2');
dist=2.*sin(pi.*dd./gp_period).^2.*s;
if gpcf.decay == 1
dist = dist + dd.^2.*s_sexp./2;
end
else
% If ARD is not used make s a vector of
% equal elements
if size(s)==1
s = repmat(s,1,m1);
end
if gpcf.decay == 1
if size(s_sexp)==1
s_sexp = repmat(s_sexp,1,m1);
end
end
dist=zeros(n1,n2);
for j=1:m1
dd = bsxfun(@minus,x1(:,j),x2(:,j)');
dist = dist + 2.*sin(pi.*dd./gp_period).^2.*s(:,j);
if gpcf.decay == 1
dist = dist +dd.^2.*s_sexp(:,j)./2;
end
end
end
dist(dist<eps) = 0;
C = ma2.*exp(-dist);
end
end
end
function C = gpcf_periodic_trcov(gpcf, x)
%GP_PERIODIC_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_PERIODIC_TRCOV(GP, TX) takes in covariance function
% of a Gaussian process GP and matrix TX that contains
% training input vectors. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i and j
% in TX. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_PERIODIC_COV, GPCF_PERIODIC_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
error('Covariance function not compatible with metrics');
else
% Try to use the C-implementation
C=trcov(gpcf, x);
% C = NaN;
if isnan(C)
% If there wasn't C-implementation do here
if isfield(gpcf,'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
gp_period=gpcf.period;
s = 1./(gpcf.lengthScale);
s2 = s.^2;
if size(s)==1
s2 = repmat(s2,1,m);
gp_period = repmat(gp_period,1,m);
end
if gpcf.decay == 1
s_sexp = 1./(gpcf.lengthScale_sexp);
s_sexp2 = s_sexp.^2;
if size(s_sexp)==1
s_sexp2 = repmat(s_sexp2,1,m);
end
end
ma = gpcf.magnSigma2;
C = zeros(n,n);
for ii1=1:n-1
d = zeros(n-ii1,1);
col_ind = ii1+1:n;
for ii2=1:m
d = d+2.*s2(ii2).*sin(pi.*(x(col_ind,ii2)-x(ii1,ii2))./gp_period(ii2)).^2;
if gpcf.decay == 1
d=d+s_sexp2(ii2)./2.*(x(col_ind,ii2)-x(ii1,ii2)).^2;
end
end
C(col_ind,ii1) = d;
end
C(C<eps) = 0;
C = C+C';
C = ma.*exp(-C);
end
end
end
function C = gpcf_periodic_trvar(gpcf, x)
%GP_PERIODIC_TRVAR Evaluate training variance vector
%
% Description
% C = GP_PERIODIC_TRVAR(GPCF, TX) takes in covariance function
% of a Gaussian process GPCF and matrix TX that contains
% training inputs. Returns variance vector C. Every
% element i of C contains variance of input i in TX. This is a
% mandatory subfunction used for example in prediction and
% energy computations.
%
% See also
% GPCF_PERIODIC_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C = ones(n,1)*gpcf.magnSigma2;
C(C<eps)=0;
end
function reccf = gpcf_periodic_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_PERIODIC_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_periodic';
% Initialize parameters
reccf.lengthScale= [];
reccf.magnSigma2 = [];
reccf.lengthScale_sexp = [];
reccf.period = [];
% Set the function handles
reccf.fh.pak = @gpcf_periodic_pak;
reccf.fh.unpak = @gpcf_periodic_unpak;
reccf.fh.e = @gpcf_periodic_lp;
reccf.fh.lpg = @gpcf_periodic_lpg;
reccf.fh.cfg = @gpcf_periodic_cfg;
reccf.fh.cov = @gpcf_periodic_cov;
reccf.fh.trcov = @gpcf_periodic_trcov;
reccf.fh.trvar = @gpcf_periodic_trvar;
reccf.fh.recappend = @gpcf_periodic_recappend;
reccf.p=[];
reccf.p.lengthScale=[];
reccf.p.magnSigma2=[];
if ri.decay == 1
reccf.p.lengthScale_sexp=[];
if ~isempty(ri.p.lengthScale_sexp)
reccf.p.lengthScale_sexp = ri.p.lengthScale_sexp;
end
end
reccf.p.period=[];
if ~isempty(ri.p.period)
reccf.p.period= ri.p.period;
end
if isfield(ri.p,'lengthScale') && ~isempty(ri.p.lengthScale)
reccf.p.lengthScale = ri.p.lengthScale;
end
if ~isempty(ri.p.magnSigma2)
reccf.p.magnSigma2 = ri.p.magnSigma2;
end
else
% Append to the record
gpp = gpcf.p;
% record lengthScale
reccf.lengthScale(ri,:)=gpcf.lengthScale;
if isfield(gpp,'lengthScale') && ~isempty(gpp.lengthScale)
reccf.p.lengthScale = gpp.lengthScale.fh.recappend(reccf.p.lengthScale, ri, gpcf.p.lengthScale);
end
% record magnSigma2
reccf.magnSigma2(ri,:)=gpcf.magnSigma2;
if isfield(gpp,'magnSigma2') && ~isempty(gpp.magnSigma2)
reccf.p.magnSigma2 = gpp.magnSigma2.fh.recappend(reccf.p.magnSigma2, ri, gpcf.p.magnSigma2);
end
% record lengthScale_sexp
if ~isempty(gpcf.lengthScale_sexp) && gpcf.decay == 1
reccf.lengthScale_sexp(ri,:)=gpcf.lengthScale_sexp;
if isfield(gpp,'lengthScale_sexp') && ~isempty(gpp.lengthScale_sexp)
reccf.p.lengthScale_sexp = gpp.lengthScale_sexp.fh.recappend(reccf.p.lengthScale_sexp, ri, gpcf.p.lengthScale_sexp);
end
end
% record period
reccf.period(ri,:)=gpcf.period;
if isfield(gpp,'period') && ~isempty(gpp.period)
reccf.p.period = gpp.period.fh.recappend(reccf.p.period, ri, gpcf.p.period);
end
% record decay
if ~isempty(gpcf.decay)
reccf.decay(ri,:)=gpcf.decay;
end
end
end
|
github
|
lcnhappe/happe-master
|
metric_euclidean.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/metric_euclidean.m
| 15,487 |
utf_8
|
77201668d68ae283b913304f9a6bc846
|
function metric = metric_euclidean(varargin)
%METRIC_EUCLIDEAN An euclidean metric function
%
% Description
% METRIC = METRIC_EUCLIDEAN('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a an euclidean metric function structure in which the
% named parameters have the specified values. Either
% 'components' or 'deltadist' has to be specified. Any
% unspecified parameters are set to default values.
%
% METRIC = METRIC_EUCLIDEAN(METRIC,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a metric function structure with the named parameters
% altered with the specified values.
%
% Parameters for Euclidean metric function [default]
% components - cell array of vectors specifying which
% inputs are grouped together with a same
% scaling parameter. For example, the
% component specification {[1 2] [3]}
% means that distance between 3
% dimensional vectors computed as
% r = (r_1^2 + r_2^2 )/l_1 + r_3^2/l_2,
% where r_i are distance along component
% i, and l_1 and l_2 are lengthscales for
% corresponding component sets. If
% 'components' is not specified, but
% 'deltadist' is specified, then default
% is {1 ... length(deltadist)}
% deltadist - indicator vector telling which component sets
% are handled using the delta distance
% (0 if x=x', and 1 otherwise). Default is
% false for all component sets.
% lengthScale - lengthscales for each input component set
% Default is 1 for each set
% lengthScale_prior - prior for lengthScales [prior_unif]
%
% See also
% GP_SET, GPCF_SEXP
% Copyright (c) 2008 Jouni Hartikainen
% Copyright (c) 2008 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'METRIC_EUCLIDEAN';
ip.addOptional('metric', [], @isstruct);
ip.addParamValue('components',[], @(x) isempty(x) || iscell(x));
ip.addParamValue('deltadist',[], @(x) isvector(x));
ip.addParamValue('lengthScale',[], @(x) isvector(x) && all(x>0));
ip.addParamValue('lengthScale_prior',prior_unif, ...
@(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
metric=ip.Results.metric;
if isempty(metric)
% Initialize a Gaussian process
init=true;
else
% Modify a Gaussian process
if ~isfield(metric,'type') && isequal(metric.type,'metric_euclidean')
error('First argument does not seem to be a metric structure')
end
init=false;
end
if init
% Type
metric.type = 'metric_euclidean';
end
% Components
if init || ~ismember('components',ip.UsingDefaults)
metric.components = ip.Results.components;
end
% Deltadist
if init || ~ismember('deltadist',ip.UsingDefaults)
metric.deltadist = ip.Results.deltadist;
end
% Components+Deltadist check and defaults
if isempty(metric.components) && isempty(metric.deltadist)
error('Either ''components'' or ''deltadist'' has to be specified')
elseif isempty(metric.components)
metric.components=num2cell(1:length(metric.components));
elseif isempty(metric.deltadist)
metric.deltadist = false(1,length(metric.components));
end
% Lengthscale
if init || ~ismember('lengthScale',ip.UsingDefaults)
metric.lengthScale = ip.Results.lengthScale;
if isempty(metric.lengthScale)
metric.lengthScale = repmat(1,1,length(metric.components));
end
end
% Prior for lengthscale
if init || ~ismember('lengthScale_prior',ip.UsingDefaults)
metric.p=[];
metric.p.lengthScale = ip.Results.lengthScale_prior;
end
if init
% Set the function handles to the subfunctions
metric.fh.pak = @metric_euclidean_pak;
metric.fh.unpak = @metric_euclidean_unpak;
metric.fh.lp = @metric_euclidean_lp;
metric.fh.lpg = @metric_euclidean_lpg;
metric.fh.dist = @metric_euclidean_dist;
metric.fh.distg = @metric_euclidean_distg;
metric.fh.ginput = @metric_euclidean_ginput;
metric.fh.recappend = @metric_euclidean_recappend;
end
end
function [w s] = metric_euclidean_pak(metric)
%METRIC_EUCLIDEAN_PAK Combine GP covariance function
% parameters into one vector.
%
% Description
% W = METRIC_EUCLIDEAN_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W and takes a logarithm of the covariance function
% parameters.
%
% w = [ log(metric.lengthScale(:))
% (hyperparameters of metric.lengthScale)]'
%
% See also
% METRIC_EUCLIDEAN_UNPAK
w = []; s = {};
if ~isempty(metric.p.lengthScale)
w = log(metric.lengthScale);
if numel(metric.lengthScale)>1
s = [s; sprintf('log(metric.lengthScale x %d)',numel(metric.lengthScale))];
else
s = [s; 'log(metric.lengthScale)'];
end
% Hyperparameters of lengthScale
[wh sh] = metric.p.lengthScale.fh.pak(metric.p.lengthScale);
w = [w wh];
s = [s; sh];
end
end
function [metric, w] = metric_euclidean_unpak(metric, w)
%METRIC_EUCLIDEAN_UNPAK Separate metric parameter vector into components
%
% Description
% METRIC, W] = METRIC_EUCLIDEAN_UNPAK(METRIC, W) takes a
% metric structure GPCF and a parameter vector W, and returns
% a covariance function structure identical to the input,
% except that the covariance parameters have been set to the
% values in W. Deletes the values set to GPCF from W and
% returns the modified W.
%
% The covariance function parameters are transformed via exp
% before setting them into the structure.
%
% See also
% METRIC_EUCLIDEAN_PAK
%
if ~isempty(metric.p.lengthScale)
i2=length(metric.lengthScale);
i1=1;
metric.lengthScale = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of lengthScale
[p, w] = metric.p.lengthScale.fh.unpak(metric.p.lengthScale, w);
metric.p.lengthScale = p;
end
end
function lp = metric_euclidean_lp(metric)
%METRIC_EUCLIDEAN_LP Evaluate the log prior of metric parameters
%
% Description
% LP = METRIC_EUCLIDEAN_LP(METRIC) takes a metric structure
% METRIC and returns log(p(th)), where th collects the
% parameters.
%
% See also
% METRIC_EUCLIDEAN_PAK, METRIC_EUCLIDEAN_UNPAK, METRIC_EUCLIDEAN_G, GP_E
%
% Evaluate the prior contribution to the error. The parameters that
% are sampled are from space W = log(w) where w is all the "real" samples.
% On the other hand errors are evaluated in the W-space so we need take
% into account also the Jakobian of transformation W -> w = exp(W).
% See Gelman et.all., 2004, Bayesian data Analysis, second edition, p24.
if ~isempty(metric.p.lengthScale)
lp = metric.p.lengthScale.fh.lp(metric.lengthScale, metric.p.lengthScale) + sum(log(metric.lengthScale));
else
lp=0;
end
end
function lpg = metric_euclidean_lpg(metric)
%METRIC_EUCLIDEAN_LPG d log(prior)/dth of the metric parameters th
%
% Description
% LPG = METRIC_EUCLIDEAN_LPG(METRIC) takes a likelihood
% structure METRIC and returns d log(p(th))/dth, where th
% collects the parameters.
%
% See also
% METRIC_EUCLIDEAN_PAK, METRIC_EUCLIDEAN_UNPAK, METRIC_EUCLIDEAN, GP_E
%
% Evaluate the prior contribution of gradient with respect to lengthScale
if ~isempty(metric.p.lengthScale)
i1=1;
lll = length(metric.lengthScale);
lpgs = metric.p.lengthScale.fh.lpg(metric.lengthScale, metric.p.lengthScale);
lpg(i1:i1-1+lll) = lpgs(1:lll).*metric.lengthScale + 1;
lpg = [lpg lpgs(lll+1:end)];
end
end
function gdist = metric_euclidean_distg(metric, x, x2, mask)
%METRIC_EUCLIDEAN_DISTG Evaluate the gradient of the metric function
%
% Description
% DISTG = METRIC_EUCLIDEAN_DISTG(METRIC, X) takes a metric
% structure METRIC together with a matrix X of input
% vectors and return the gradient matrices GDIST and
% GPRIOR_DIST for each parameter.
%
% DISTG = METRIC_EUCLIDEAN_DISTG(METRIC, X, X2) forms the
% gradient matrices between two input vectors X and X2.
%
% DISTG = METRIC_EUCLIDEAN_DISTG(METRIC, X, X2, MASK) forms
% the gradients for masked covariances matrices used in sparse
% approximations.
%
% See also
% METRIC_EUCLIDEAN_PAK, METRIC_EUCLIDEAN_UNPAK, METRIC_EUCLIDEAN, GP_E
%
gdist=[];
components = metric.components;
n = size(x,1);
m = length(components);
i1=0;i2=1;
% NOTE! Here we have already taken into account that the parameters
% are transformed through log() and thus dK/dlog(p) = p * dK/dp
if ~isempty(metric.p.lengthScale)
if nargin <= 3
if nargin == 2
x2 = x;
end
ii1=0;
dist = 0;
distc = cell(1,m);
% Compute the distances for each component set
for i=1:m
if length(metric.lengthScale)==1
s=1./metric.lengthScale.^2;
else
s=1./metric.lengthScale(i).^2;
end
distc{i} = 0;
for j = 1:length(components{i})
if metric.deltadist(i)
distc{i} = distc{i} + double(bsxfun(@ne,x(:,components{i}(j)),x2(:,components{i}(j))'));
else
distc{i} = distc{i} + bsxfun(@minus,x(:,components{i}(j)),x2(:,components{i}(j))').^2;
end
end
distc{i} = distc{i}.*s;
% Accumulate to the total distance
dist = dist + distc{i};
end
dist = sqrt(dist);
% Loop through component sets
if length(metric.lengthScale)==1
D = -distc{1};
D(dist~=0) = D(dist~=0)./dist(dist~=0);
ii1 = ii1+1;
gdist{ii1} = D;
else
for i=1:m
D = -distc{i};
ind = dist~=0;
D(ind) = D(ind)./dist(ind);
ii1 = ii1+1;
gdist{ii1} = D;
end
end
% $$$ elseif nargin == 3
% $$$ if size(x,2) ~= size(x2,2)
% $$$ error('metric_euclidean -> _ghyper: The number of columns in x and x2 has to be the same. ')
% $$$ end
elseif nargin == 4
gdist = cell(1,length(metric.lengthScale));
end
% Evaluate the prior contribution of gradient with respect to lengthScale
if ~isempty(metric.p.lengthScale)
i1=1;
lll = length(metric.lengthScale);
gg = -metric.p.lengthScale.fh.lpg(metric.lengthScale, metric.p.lengthScale);
gprior(i1:i1-1+lll) = gg(1:lll).*metric.lengthScale - 1;
gprior = [gprior gg(lll+1:end)];
end
end
end
function dist = metric_euclidean_dist(metric, x1, x2)
%METRIC_EUCLIDEAN_DIST Compute the euclidean distence between
% one or two matrices.
%
% Description
% DIST = METRIC_EUCLIDEAN_DIST(METRIC, X) takes a metric
% structure METRIC together with a matrix X of input
% vectors and calculates the euclidean distance matrix DIST.
%
% DIST = METRIC_EUCLIDEAN_DIST(METRIC, X1, X2) takes a
% metric structure METRIC together with a matrices X1 and
% X2 of input vectors and calculates the euclidean distance
% matrix DIST.
%
% See also
% METRIC_EUCLIDEAN_PAK, METRIC_EUCLIDEAN_UNPAK, METRIC_EUCLIDEAN, GP_E
%
if (nargin == 2 || isempty(x2))
% use fast c-code for self-distance
x2=x1;
% force deltadist to be logical for simplified c-code
metric.deltadist=logical(metric.deltadist);
dist = dist_euclidean(metric,x1);
if ~any(isnan(dist))
% if c-code was available, result is not NaN
return
end
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
if m1~=m2
error('the number of columns of X1 and X2 has to be same')
end
components = metric.components;
m = length(components);
dist = 0;
s=1./metric.lengthScale.^2;
if m>numel(s)
s=repmat(s,1,m);
end
for i=1:m
for j = 1:length(components{i})
if metric.deltadist(i)
dist = dist + s(i).*double(bsxfun(@ne,x1(:,components{i}(j)),x2(:,components{i}(j))'));
else
dist = dist + s(i).*bsxfun(@minus,x1(:,components{i}(j)),x2(:,components{i}(j))').^2;
end
end
end
dist=sqrt(dist); % euclidean distance
end
function [ginput, gprior_input] = metric_euclidean_ginput(metric, x1, x2)
%METRIC_EUCLIDEAN_GINPUT Compute the gradient of the
% euclidean distance function with
% respect to input. [n, m]=size(x);
ii1 = 0;
components = metric.components;
if nargin == 2 || isempty(x2)
x2=x1;
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
if m1~=m2
error('the number of columns of X1 and X2 has to be same')
end
s = 1./metric.lengthScale.^2;
dist = 0;
for i=1:length(components)
for j = 1:length(components{i})
if metric.deltadist(i)
dist = dist + s(i).*double(bsxfun(@ne,x1(:,components{i}(j)),x2(:,components{i}(j))'));
else
dist = dist + s(i).*bsxfun(@minus,x1(:,components{i}(j)),x2(:,components{i}(j))').^2;
end
end
end
dist = sqrt(dist);
for i=1:m1
for j = 1:n1
DK = zeros(n1,n2);
for k = 1:length(components)
if ismember(i,components{k})
if metric.deltadist(i)
DK(j,:) = DK(j,:)+s(k).*double(bsxfun(@ne,x1(j,i),x2(:,i)'));
else
DK(j,:) = DK(j,:)+s(k).*bsxfun(@minus,x1(j,i),x2(:,i)');
end
end
end
if nargin == 2
DK = DK + DK';
end
DK(dist~=0) = DK(dist~=0)./dist(dist~=0);
ii1 = ii1 + 1;
ginput{ii1} = DK;
gprior_input(ii1) = 0;
end
end
%size(ginput)
%ginput
end
function recmetric = metric_euclidean_recappend(recmetric, ri, metric)
%RECAPPEND Record append
%
% Description
% RECMETRIC = METRIC_EUCLIDEAN_RECAPPEND(RECMETRIC, RI,
% METRIC) takes old metric function record RECMETRIC, record
% index RI and metric function structure. Appends the
% parameters of METRIC to the RECMETRIC in the ri'th place.
%
% See also
% GP_MC and GP_MC -> RECAPPEND
% Initialize record
if nargin == 2
recmetric.type = 'metric_euclidean';
metric.components = recmetric.components;
% Initialize parameters
recmetric.lengthScale = [];
% Set the function handles
recmetric.fh.pak = @metric_euclidean_pak;
recmetric.fh.unpak = @metric_euclidean_unpak;
recmetric.fh.lp = @metric_euclidean_lp;
recmetric.fh.lpg = @metric_euclidean_lpg;
recmetric.fh.dist = @metric_euclidean_dist;
recmetric.fh.distg = @metric_euclidean_distg;
recmetric.fh.ginput = @metric_euclidean_ginput;
recmetric.fh.recappend = @metric_euclidean_recappend;
return
end
mp = metric.p;
% record parameters
if ~isempty(metric.lengthScale)
recmetric.lengthScale(ri,:)=metric.lengthScale;
recmetric.p.lengthScale = metric.p.lengthScale.fh.recappend(recmetric.p.lengthScale, ri, metric.p.lengthScale);
elseif ri==1
recmetric.lengthScale=[];
end
end
|
github
|
lcnhappe/happe-master
|
lik_negbinztr.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_negbinztr.m
| 30,673 |
utf_8
|
5795ceaf5a6a09a0df89fbf0b48d3023
|
function lik = lik_negbinztr(varargin)
%LIK_NEGBINZTR Create a zero-truncated Negative-binomial likelihood structure
%
% Description
% LIK = LIK_NEGBINZTR('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates zero-truncated Negative-binomial likelihood structure
% in which the named parameters have the specified values. Any
% unspecified parameters are set to default values.
%
% Zero-truncated Negative-binomial can be used as a part of Hurdle model.
%
% LIK = LIK_NEGBINZTR(LIK,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a likelihood structure with the named parameters
% altered with the specified values.
%
% Parameters for zero-truncated Negative-binomial likelihood [default]
% disper - dispersion parameter r [10]
% disper_prior - prior for disper [prior_logunif]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The likelihood is defined as follows:
% __ n
% p(y|f, z) = || i=1 [ (r/(r+mu_i))^r * gamma(r+y_i)
% / ( gamma(r)*gamma(y_i+1) )
% * (mu/(r+mu_i))^y_i / (1-(r/(r+mu_i))^r)]
%
% where mu_i = z_i*exp(f_i) and r is the dispersion parameter. z
% is a vector of expected mean and f the latent value vector
% whose components are transformed to relative risk exp(f_i).
% The last term (1-(r/(r+mu_i))^r) normalizes the truncated
% distribution.
%
% When using the zero-truncated Negbin likelihood you need to
% give the vector z as an extra parameter to each function that
% requires also y. For example, you should call gpla_e as
% follows: gpla_e(w, gp, x, y, 'z', z)
%
% See also
% GP_SET, LIK_*, PRIOR_*
%
% Copyright (c) 2007-2010 Jarno Vanhatalo & Jouni Hartikainen
% Copyright (c) 2010-2011 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_NEGBINZTR';
ip.addOptional('lik', [], @isstruct);
ip.addParamValue('disper',10, @(x) isscalar(x) && x>0);
ip.addParamValue('disper_prior',prior_logunif(), @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Negbinztr';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Negbinztr')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('disper',ip.UsingDefaults)
lik.disper = ip.Results.disper;
end
% Initialize prior structure
if init
lik.p=[];
end
if init || ~ismember('disper_prior',ip.UsingDefaults)
lik.p.disper=ip.Results.disper_prior;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_negbinztr_pak;
lik.fh.unpak = @lik_negbinztr_unpak;
lik.fh.lp = @lik_negbinztr_lp;
lik.fh.lpg = @lik_negbinztr_lpg;
lik.fh.ll = @lik_negbinztr_ll;
lik.fh.llg = @lik_negbinztr_llg;
lik.fh.llg2 = @lik_negbinztr_llg2;
lik.fh.llg3 = @lik_negbinztr_llg3;
lik.fh.tiltedMoments = @lik_negbinztr_tiltedMoments;
lik.fh.siteDeriv = @lik_negbinztr_siteDeriv;
lik.fh.upfact = @lik_negbinztr_upfact;
lik.fh.predy = @lik_negbinztr_predy;
lik.fh.predprcty = @lik_negbinztr_predprcty;
lik.fh.invlink = @lik_negbinztr_invlink;
lik.fh.recappend = @lik_negbinztr_recappend;
end
end
function [w,s] = lik_negbinztr_pak(lik)
%LIK_NEGBINZTR_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_NEGBINZTR_PAK(LIK) takes a likelihood structure LIK and
% combines the parameters into a single row vector W. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
% w = log(lik.disper)
%
% See also
% LIK_NEGBINZTR_UNPAK, GP_PAK
w=[];s={};
if ~isempty(lik.p.disper)
w = log(lik.disper);
s = [s; 'log(negbinztr.disper)'];
[wh sh] = lik.p.disper.fh.pak(lik.p.disper);
w = [w wh];
s = [s; sh];
end
end
function [lik, w] = lik_negbinztr_unpak(lik, w)
%LIK_NEGBINZTR_UNPAK Extract likelihood parameters from the vector.
%
% Description
% [LIK, W] = LIK_NEGBINZTR_UNPAK(W, LIK) takes a likelihood
% structure LIK and extracts the parameters from the vector W
% to the LIK structure. This is a mandatory subfunction used
% for example in energy and gradient computations.
%
% Assignment is inverse of
% w = log(lik.disper)
%
% See also
% LIK_NEGBINZTR_PAK, GP_UNPAK
if ~isempty(lik.p.disper)
lik.disper = exp(w(1));
w = w(2:end);
[p, w] = lik.p.disper.fh.unpak(lik.p.disper, w);
lik.p.disper = p;
end
end
function lp = lik_negbinztr_lp(lik, varargin)
%LIK_NEGBINZTR_LP log(prior) of the likelihood parameters
%
% Description
% LP = LIK_NEGBINZTR_LP(LIK) takes a likelihood structure LIK and
% returns log(p(th)), where th collects the parameters. This subfunction
% is needed when there are likelihood parameters.
%
% See also
% LIK_NEGBINZTR_LLG, LIK_NEGBINZTR_LLG3, LIK_NEGBINZTR_LLG2, GPLA_E
% If prior for dispersion parameter, add its contribution
lp=0;
if ~isempty(lik.p.disper)
lp = lik.p.disper.fh.lp(lik.disper, lik.p.disper) +log(lik.disper);
end
end
function lpg = lik_negbinztr_lpg(lik)
%LIK_NEGBINZTR_LPG d log(prior)/dth of the likelihood
% parameters th
%
% Description
% E = LIK_NEGBINZTR_LPG(LIK) takes a likelihood structure LIK and
% returns d log(p(th))/dth, where th collects the parameters. This
% subfunction is needed when there are likelihood parameters.
%
% See also
% LIK_NEGBINZTR_LLG, LIK_NEGBINZTR_LLG3, LIK_NEGBINZTR_LLG2, GPLA_G
lpg=[];
if ~isempty(lik.p.disper)
% Evaluate the gprior with respect to disper
ggs = lik.p.disper.fh.lpg(lik.disper, lik.p.disper);
lpg = ggs(1).*lik.disper + 1;
if length(ggs) > 1
lpg = [lpg ggs(2:end)];
end
end
end
function ll = lik_negbinztr_ll(lik, y, f, z)
%LIK_NEGBINZTR_LL Log likelihood
%
% Description
% LL = LIK_NEGBINZTR_LL(LIK, Y, F, Z) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z, and
% latent values F. Returns the log likelihood, log p(y|f,z).
% This subfunction is needed when using Laplace approximation
% or MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria (DIC, WAIC)
% computations.
%
% See also
% LIK_NEGBINZTR_LLG, LIK_NEGBINZTR_LLG3, LIK_NEGBINZTR_LLG2, GPLA_E
if isempty(z)
error(['lik_negbinztr -> lik_negbinztr_ll: missing z! '...
'Negbinztr likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_negbinztr and gpla_e. ']);
end
r = lik.disper;
mu = exp(f).*z;
lp0=r.*(log(r) - log(r+mu));
ll = sum(r.*(log(r) - log(r+mu)) + gammaln(r+y) - gammaln(r) - gammaln(y+1) + y.*(log(mu) - log(r+mu)) -log(1-exp(lp0)));
end
function llg = lik_negbinztr_llg(lik, y, f, param, z)
%LIK_NEGBINZTR_LLG Gradient of the log likelihood
%
% Description
% LLG = LIK_NEGBINZTR_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z and
% latent values F. Returns the gradient of the log likelihood
% with respect to PARAM. At the moment PARAM can be 'param' or
% 'latent'. This subfunction is needed when using Laplace
% approximation or MCMC for inference with non-Gaussian likelihoods.
%
% See also
% LIK_NEGBINZTR_LL, LIK_NEGBINZTR_LLG2, LIK_NEGBINZTR_LLG3, GPLA_E
if isempty(z)
error(['lik_negbinztr -> lik_negbinztr_llg: missing z! '...
'Negbinztr likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_negbinztr and gpla_e. ']);
end
mu = exp(f).*z;
r = lik.disper;
switch param
case 'param'
% Derivative using the psi function
llg = sum(1 + log(r./(r+mu)) - (r+y)./(r+mu) + psi(r + y) - psi(r));
% add gradient of the normalization due to the truncation
lp0=r.*(log(r) - log(r+mu));
llg=llg-sum(1./(1 - exp(-lp0)).*(log(r./(mu + r)) - r./(mu + r) + 1));
% correction for the log transformation
llg = llg.*lik.disper;
% $$$ % Derivative using sum formulation
% $$$ llg = 0;
% $$$ for i1 = 1:length(y)
% $$$ llg = llg + log(r/(r+mu(i1))) + 1 - (r+y(i1))/(r+mu(i1));
% $$$ for i2 = 0:y(i1)-1
% $$$ llg = llg + 1 / (i2 + r);
% $$$ end
% $$$ end
% $$$ % correction for the log transformation
% $$$ llg = llg.*lik.disper;
case 'latent'
llg = y - (r+y).*mu./(r+mu);
% add gradient of the normalization due to the truncation
lp0=r.*(log(r) - log(r+mu));
llg = llg -(1./(1-exp(-lp0)).*-r./(mu + r).*mu);
end
end
function llg2 = lik_negbinztr_llg2(lik, y, f, param, z)
%LIK_NEGBINZTR_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_NEGBINZTR_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z, and
% latent values F. Returns the Hessian of the log likelihood
% with respect to PARAM. At the moment PARAM can be only
% 'latent'. LLG2 is a vector with diagonal elements of the
% Hessian matrix (off diagonals are zero). This subfunction
% is needed when using Laplace approximation or EP for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_NEGBINZTR_LL, LIK_NEGBINZTR_LLG, LIK_NEGBINZTR_LLG3, GPLA_E
if isempty(z)
error(['lik_negbinztr -> lik_negbinztr_llg2: missing z! '...
'Negbinztr likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_negbinztr and gpla_e. ']);
end
mu = exp(f).*z;
r = lik.disper;
switch param
case 'param'
case 'latent'
llg2 = - mu.*(r.^2 + y.*r)./(r+mu).^2;
% add gradient of the normalization due to the truncation
lp0=r.*(log(r) - log(r+mu));
llg2=llg2+...
(r.^2 + r.^2.*exp(-lp0).*(mu-1))./((mu + r).^2.*(exp(-lp0)-1).^2).*mu;
case 'latent+param'
llg2 = (y.*mu - mu.^2)./(r+mu).^2;
% add gradient of the normalization due to the truncation
lp0=r.*(log(r) - log(r+mu));
llg2=llg2+(exp(lp0)./(exp(lp0) - 1).^2 .* (log(r) - log(mu + r) - r.*(1./(mu + r) - 1./r)) .* (-r./(mu + r)) -1./(1 - exp(-lp0)).*-mu./(mu + r).^2).*mu;
% correction due to the log transformation
llg2 = llg2.*lik.disper;
end
end
function llg3 = lik_negbinztr_llg3(lik, y, f, param, z)
%LIK_NEGBINZTR_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_NEGBINZTR_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z and
% latent values F and returns the third gradients of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% only 'latent'. LLG3 is a vector with third gradients. This
% subfunction is needed when using Laplace approximation for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_NEGBINZTR_LL, LIK_NEGBINZTR_LLG, LIK_NEGBINZTR_LLG2, GPLA_E, GPLA_G
if isempty(z)
error(['lik_negbinztr -> lik_negbinztr_llg3: missing z! '...
'Negbinztr likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_negbinztr and gpla_e. ']);
end
mu = exp(f).*z;
r = lik.disper;
switch param
case 'param'
case 'latent'
llg3 = - mu.*(r.^2 + y.*r)./(r + mu).^2 + 2.*mu.^2.*(r.^2 + y.*r)./(r + mu).^3;
% add gradient of the normalization due to the truncation
lp0=r.*(log(r) - log(r+mu));
llg3=llg3+ ...
(exp(lp0).*(r.^2.*(r + r.*exp(2.*lp0)) + mu.^2.*r.^3 - mu.*r.^2.*(3.*r + exp(2.*lp0) + 1)) + exp(2.*lp0).*(mu.^2.*r.^3 - 2.*r.^3 + mu.*r.^2.*(3.*r + 2)))./((exp(lp0) - 1).^3.*(mu + r).^3).*mu;
case 'latent2+param'
llg3 = mu.*(y.*r - 2.*r.*mu - mu.*y)./(r+mu).^3;
% add gradient of the normalization due to the truncation
lp0=r.*(log(r) - log(r+mu));
ip0=exp(-lp0);
llg3=llg3+ ...
(mu.*(2.*r + 2.*r.*ip0.*(mu - 1) + r.^2.*ip0.*(mu - 1).*(log(mu + r) - log(r) + r./(mu + r) - 1)))./((mu + r).^2.*(ip0 - 1).^2) - (2.*mu.*(r.^2 + r.^2.*ip0.*(mu - 1)))./((mu + r).^3.*(ip0 - 1).^2) - (2.*mu.*ip0.*(r.^2 + r.^2.*ip0.*(mu - 1)).*(log(mu + r) - log(r) + r./(mu + r) - 1))./((mu + r).^2.*(ip0 - 1).^3);
% correction due to the log transformation
llg3 = llg3.*lik.disper;
end
end
function [logM_0, m_1, sigm2hati1] = lik_negbinztr_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_NEGBINZTR_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_NEGBINZTR_TILTEDMOMENTS(LIK, Y, I, S2,
% MYY, Z) takes a likelihood structure LIK, incedence counts
% Y, expected counts Z, index I and cavity variance S2 and
% mean MYY. Returns the zeroth moment M_0, mean M_1 and
% variance M_2 of the posterior marginal (see Rasmussen and
% Williams (2006): Gaussian processes for Machine Learning,
% page 55). This subfunction is needed when using EP for
% inference with non-Gaussian likelihoods.
%
% See also
% GPEP_E
% if isempty(z)
% error(['lik_negbinztr -> lik_negbinztr_tiltedMoments: missing z!'...
% 'Negbinztr likelihood needs the expected number of '...
% 'occurrences as an extra input z. See, for '...
% 'example, lik_negbinztr and gpep_e. ']);
% end
yy = y(i1);
avgE = z(i1);
r = lik.disper;
logM_0=zeros(size(yy));
m_1=zeros(size(yy));
sigm2hati1=zeros(size(yy));
for i=1:length(i1)
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Negative-binomial * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_negbinztr_norm(yy(i),myy_i(i),sigm2_i(i),avgE(i),r);
% Integrate with quadrature
RTOL = 1.e-6;
ATOL = 1.e-10;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
% If the second central moment is less than cavity variance
% integrate more precisely. Theoretically for log-concave
% likelihood should be sigm2hati1 < sigm2_i.
if sigm2hati1(i) >= sigm2_i(i)
ATOL = ATOL.^2;
RTOL = RTOL.^2;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
if sigm2hati1(i) >= sigm2_i(i)
warning('lik_negbinztr_tilted_moments: sigm2hati1 >= sigm2_i');
%sigm2hati1=sigm2_i-1e-9;
end
end
logM_0(i) = log(m_0);
end
end
function [g_i] = lik_negbinztr_siteDeriv(lik, y, i1, sigm2_i, myy_i, z)
%LIK_NEGBINZTR_SITEDERIV Evaluate the expectation of the gradient
% of the log likelihood term with respect
% to the likelihood parameters for EP
%
% Description [M_0, M_1, M2] =
% LIK_NEGBINZTR_SITEDERIV(LIK, Y, I, S2, MYY, Z) takes a
% likelihood structure LIK, incedence counts Y, expected
% counts Z, index I and cavity variance S2 and mean MYY.
% Returns E_f [d log p(y_i|f_i) /d a], where a is the
% likelihood parameter and the expectation is over the
% marginal posterior. This term is needed when evaluating the
% gradients of the marginal likelihood estimate Z_EP with
% respect to the likelihood parameters (see Seeger (2008):
% Expectation propagation for exponential families). This
% subfunction is needed when using EP for inference with
% non-Gaussian likelihoods and there are likelihood parameters.
%
% See also
% GPEP_G
if isempty(z)
error(['lik_negbinztr -> lik_negbinztr_siteDeriv: missing z!'...
'Negbinztr likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_negbinztr and gpla_e. ']);
end
yy = y(i1);
avgE = z(i1);
r = lik.disper;
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Negative-binomial * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_negbinztr_norm(yy,myy_i,sigm2_i,avgE,r);
% additionally get function handle for the derivative
td = @deriv;
% Integrate with quadgk
[m_0, fhncnt] = quadgk(tf, minf, maxf);
[g_i, fhncnt] = quadgk(@(f) td(f).*tf(f)./m_0, minf, maxf);
g_i = g_i.*r;
function g = deriv(f)
mu = avgE.*exp(f);
% Derivative using the psi function
g = 1 + log(r./(r+mu)) - (r+yy)./(r+mu) + psi(r + yy) - psi(r);
lp0=r.*(log(r) - log(r+mu));
g = g -(1./(1 - exp(-lp0)).*(log(r./(mu + r)) - r./(mu + r) + 1));
end
end
function upfact = lik_negbinztr_upfact(gp, y, mu, ll, z)
r = gp.lik.disper;
sll = sqrt(ll);
fh_e = @(f) negbinztr_pdf(y, exp(f).*z', r).*norm_pdf(f, mu, sll);
EE = quadgk(fh_e, max(mu-6*sll,-30), min(mu+6*sll,30));
fm = @(f) f.*negbinztr_pdf(y, exp(f).*z', r).*norm_pdf(f, mu, sll)./EE;
mm = quadgk(fm, max(mu-6*sll,-30), min(mu+6*sll,30));
fV = @(f) (f - mm).^2.*negbinztr_pdf(y, exp(f).*z', r).*norm_pdf(f, mu, sll)./EE;
Varp = quadgk(fV, max(mu-6*sll,-30), min(mu+6*sll,30));
upfact = -(Varp - ll)./ll^2;
end
function [lpy, Ey, Vary] = lik_negbinztr_predy(lik, Ef, Varf, yt, zt)
%LIK_NEGBINZTR_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_NEGBINZTR_PREDY(LIK, EF, VARF YT, ZT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the incedence counts YT, expected counts ZT.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [LPY, EY, VARY] = LIK_NEGBINZTR_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
if isempty(zt)
error(['lik_negbinztr -> lik_negbinztr_predy: missing zt!'...
'Negbinztr likelihood needs the expected number of '...
'occurrences as an extra input zt. See, for '...
'example, lik_negbinztr and gpla_e. ']);
end
avgE = zt;
r = lik.disper;
lpy = zeros(size(Ef));
Ey = zeros(size(Ef));
EVary = zeros(size(Ef));
VarEy = zeros(size(Ef));
if nargout > 1
% Evaluate Ey and Vary
for i1=1:length(Ef)
%%% With quadrature
myy_i = Ef(i1);
sigm_i = sqrt(Varf(i1));
minf=myy_i-6*sigm_i;
maxf=myy_i+6*sigm_i;
F = @(f) exp(log(avgE(i1))+f+norm_lpdf(f,myy_i,sigm_i));
Ey(i1) = quadgk(F,minf,maxf);
F2 = @(f) exp(log(avgE(i1).*exp(f)+((avgE(i1).*exp(f)).^2/r))+norm_lpdf(f,myy_i,sigm_i));
EVary(i1) = quadgk(F2,minf,maxf);
F3 = @(f) exp(2*log(avgE(i1))+2*f+norm_lpdf(f,myy_i,sigm_i));
VarEy(i1) = quadgk(F3,minf,maxf) - Ey(i1).^2;
end
Vary = EVary + VarEy;
end
% Evaluate the posterior predictive densities of the given observations
lpy = zeros(length(yt),1);
for i1=1:length(yt)
% get a function handle of the likelihood times posterior
% (likelihood * posterior = Negative-binomial * Gaussian)
% and useful integration limits
[pdf,minf,maxf]=init_negbinztr_norm(...
yt(i1),Ef(i1),Varf(i1),avgE(i1),r);
% integrate over the f to get posterior predictive distribution
lpy(i1) = log(quadgk(pdf, minf, maxf));
end
end
function [df,minf,maxf] = init_negbinztr_norm(yy,myy_i,sigm2_i,avgE,r)
%INIT_NEGBINZTR_NORM
%
% Description
% Return function handle to a function evaluating
% Negative-Binomial * Gaussian which is used for evaluating
% (likelihood * cavity) or (likelihood * posterior) Return
% also useful limits for integration. This is private function
% for lik_negbinztr. This subfunction is needed by subfunctions
% tiltedMoments, siteDeriv and predy.
%
% See also
% LIK_NEGBINZTR_TILTEDMOMENTS, LIK_NEGBINZTR_SITEDERIV,
% LIK_NEGBINZTR_PREDY
% avoid repetitive evaluation of constant part
ldconst = -gammaln(r)-gammaln(yy+1)+gammaln(r+yy)...
- log(sigm2_i)/2 - log(2*pi)/2;
% Create function handle for the function to be integrated
df = @negbinztr_norm;
% use log to avoid underflow, and derivates for faster search
ld = @log_negbinztr_norm;
ldg = @log_negbinztr_norm_g;
ldg2 = @log_negbinztr_norm_g2;
% Set the limits for integration
% Negative-binomial likelihood is log-concave so the negbinztr_norm
% function is unimodal, which makes things easier
if yy==0
% with yy==0, the mode of the likelihood is not defined
% use the mode of the Gaussian (cavity or posterior) as a first guess
modef = myy_i;
else
% use precision weighted mean of the Gaussian approximation
% of the Negative-Binomial likelihood and Gaussian
mu=log(yy/avgE);
s2=(yy+r)./(yy.*r);
modef = (myy_i/sigm2_i + mu/s2)/(1/sigm2_i + 1/s2);
end
% find the mode of the integrand using Newton iterations
% few iterations is enough, since the first guess in the right direction
niter=4; % number of Newton iterations
mindelta=1e-6; % tolerance in stopping Newton iterations
for ni=1:niter
g=ldg(modef);
h=ldg2(modef);
delta=-g/h;
modef=modef+delta;
if abs(delta)<mindelta
break
end
end
% integrand limits based on Gaussian approximation at mode
modes=sqrt(-1/h);
minf=modef-8*modes;
maxf=modef+8*modes;
modeld=ld(modef);
iter=0;
% check that density at end points is low enough
lddiff=20; % min difference in log-density between mode and end-points
minld=ld(minf);
step=1;
while minld<(modeld-lddiff) && minf<modef;
% sometimes minf is too small
minf=minf+step*modes;
minld=ld(minf);
end
while minld>(modeld-lddiff)
minf=minf-step*modes;
minld=ld(minf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_negbinztr -> init_negbinztr_norm: ' ...
'integration interval minimun not found ' ...
'even after looking hard!'])
end
end
maxld=ld(maxf);
step=1;
while maxld>(modeld-lddiff)
maxf=maxf+step*modes;
maxld=ld(maxf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_negbinztr -> init_negbinztr_norm: ' ...
'integration interval maximum not found ' ...
'even after looking hard!'])
end
end
function integrand = negbinztr_norm(f)
% Negative-binomial * Gaussian
mu = avgE.*exp(f);
lp0=r.*(log(r) - log(r+mu));
if lp0==0
% exp(lp0)->1, that is, almost all the mass is in the zero part
% approximate if yy=1, and give up if yy>1
if yy==1
integrand = exp(log(avgE)+f...
-0.5*(f-myy_i).^2./sigm2_i -log(sigm2_i)/2 -log(2*pi)/2);
else
integrand = 0;
end
else
integrand = exp(ldconst ...
+yy.*(log(mu)-log(r+mu))+r.*(log(r)-log(r+mu)) ...
-0.5*(f-myy_i).^2./sigm2_i ...
-log(1-exp(lp0)));
end
end
function log_int = log_negbinztr_norm(f)
% log(Negative-binomial * Gaussian)
% log_negbinztr_norm is used to avoid underflow when searching
% integration interval
mu = avgE.*exp(f);
lp0=r.*(log(r) - log(r+mu));
if lp0==0
% exp(lp0)->1, that is, almost all the mass is in the zero part
% approximate if yy=1, and give up if yy>1
if yy==1
log_int = log(avgE)+f ...
-0.5*(f-myy_i).^2./sigm2_i - log(sigm2_i)/2 - log(2*pi)/2;
else
log_int=-Inf;
end
else
log_int = ldconst...
+yy.*(log(mu)-log(r+mu))+r.*(log(r)-log(r+mu)) -gammaln(r)-gammaln(yy+1)+gammaln(r+yy) ...
-0.5*(f-myy_i).^2./sigm2_i ...
-log(1-exp(lp0));
end
end
function g = log_negbinztr_norm_g(f)
% d/df log(Negative-binomial * Gaussian)
% derivative of log_negbinztr_norm
mu = avgE.*exp(f);
lp0=r.*(log(r) - log(r+mu));
if lp0==0
% exp(lp0)->1, that is, almost all the mass is in the zero part
% approximate if yy=1, and give up if yy>1
g = 1+(myy_i - f)./sigm2_i;
else
g = -(r.*(mu - yy))./(mu.*(mu + r)).*mu ...
+ (myy_i - f)./sigm2_i ...
-1/(1 - exp(-lp0))*-r/(mu + r)*mu;
end
end
function g2 = log_negbinztr_norm_g2(f)
% d^2/df^2 log(Negative-binomial * Gaussian)
% second derivate of log_negbinztr_norm
mu = avgE.*exp(f);
lp0=r.*(log(r) - log(r+mu));
if lp0==0
% exp(lp0)->1, that is, almost all the mass is in the zero part
% approximate if yy=1, and give up if yy>1
g2 = -1/sigm2_i;
else
g2 = -(r*(r + yy))/(mu + r)^2.*mu ...
-1/sigm2_i ...
+ (r^2 + r^2*exp(-lp0)*(mu - 1))/((mu + r)^2*(exp(-lp0) - 1)^2)*mu;
end
end
end
function prctys = lik_negbinztr_predprcty(lik, Ef, Varf, zt, prcty)
%LIK_BINOMIAL_PREDPRCTY Returns the percentiled of predictive density of y
%
% Description
% PRCTY = LIK_BINOMIAL_PREDPRCTY(LIK, EF, VARF YT, ZT)
% Returns percentiles of the predictive density PY of YT, that is
% This requires also the succes counts YT, numbers of trials ZT.
% This subfunction is needed when using function gp_predprcty.
%
% See also
% GP_PREDPCTY
if isempty(zt)
error(['lik_negbin -> lik_negbinztr_predprcty: missing zt!'...
'Negbinztr likelihood needs the expected number of '...
'occurrences as an extra input zt. See, for '...
'example, lik_negbin and gpla_e. ']);
end
opt=optimset('TolX',1e-7,'Display','off');
nt=size(Ef,1);
prctys = zeros(nt,numel(prcty));
prcty=prcty/100;
r = lik.disper;
mu = zt.*exp(Ef);
for i1=1:nt
ci = sqrt(Varf(i1));
for i2=1:numel(prcty)
minf = floor(fminbnd(@(b) (quadgk(@(y) llvec(lik,y,Ef(i1)-1.96*ci,zt(i1)), 0, b)-prcty(i2)).^2,nbininv(prcty(i2), r, r./(r+zt(i1).*exp(Ef(i1))))-5,nbininv(prcty(i2), r, r./(r+zt(i1).*exp(Ef(i1))))+5,opt));
if minf<0
minf = 0;
end
maxf = floor(fminbnd(@(b) (quadgk(@(y) llvec(lik,y,Ef(i1)+1.96*ci,zt(i1)), 0, b)-prcty(i2)).^2,nbininv(prcty(i2), r, r./(r+zt(i1).*exp(Ef(i1))))-5,nbininv(prcty(i2), r, r./(r+zt(i1).*exp(Ef(i1))))+5,opt));
if maxf<0
maxf = 0;
end
% j=0;
% figure;
% for a=-2:0.1:50
% j=j+1;
% testi(j) = (quadgk(@(f) quadgk(@(y) llvec(lik,y,Ef(i1),zt(i1)), 0, a).*norm_pdf(f,Ef(i1),ci),Ef(i1)-6*ci,Ef(i1)+6*ci,'AbsTol',1e-4)-prcty(i2)).^2;
% end
% plot(-2:0.1:50,testi)
% if minf<maxf
% set(gca,'XTick',[minf maxf])
% else
% set(gca,'XTick',[minf minf+1])
% end
% hold on;
% a=floor(fminbnd(@(a) (quadgk(@(f) quadgk(@(y) llvec(lik,y,Ef(i1),zt(i1)), 0, a) ...
% .*norm_pdf(f,Ef(i1),ci),Ef(i1)-6*ci,Ef(i1)+6*ci,'AbsTol',1e-4)-prcty(i2)).^2, minf, maxf,opt));
a=floor(fminbnd(@(a) (quadgk(@(f) sum(llvec(lik,0:1:a,Ef(i1),zt(i1))) ...
.*norm_pdf(f,Ef(i1),ci),Ef(i1)-6*ci,Ef(i1)+6*ci,'AbsTol',1e-4)-prcty(i2)).^2, minf, maxf,opt));
if quadgk(@(f) sum(llvec(lik,0:1:a,Ef(i1),zt(i1))).*norm_pdf(f,Ef(i1),ci),Ef(i1)-6*ci,Ef(i1)+6*ci,'AbsTol',1e-4) < prcty(i2)
a=a+1;
end
prctys(i1,i2)=a;
end
end
function expll = llvec(lik,yt,f,z)
% Compute vector of likelihoods of single predictions
n = length(yt);
if n>0
for i=1:n
expll(i) = exp(lik.fh.ll(lik, yt(i), f, z));
end
else
expll = 0;
end
end
end
function mu = lik_negbinztr_invlink(lik, f, z)
%LIK_NEGBINZTR_INVLINK Returns values of inverse link function
%
% Description
% MU = LIK_NEGBINZTR_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values MU of inverse link function.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_NEGBINZTR_LL, LIK_NEGBINZTR_PREDY
mu = z.*exp(f);
end
function reclik = lik_negbinztr_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = GPCF_NEGBINZTR_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
% Initialize the record
reclik.type = 'Negbinztr';
% Initialize parameter
reclik.disper = [];
% Set the function handles
reclik.fh.pak = @lik_negbinztr_pak;
reclik.fh.unpak = @lik_negbinztr_unpak;
reclik.fh.lp = @lik_negbinztr_lp;
reclik.fh.lpg = @lik_negbinztr_lpg;
reclik.fh.ll = @lik_negbinztr_ll;
reclik.fh.llg = @lik_negbinztr_llg;
reclik.fh.llg2 = @lik_negbinztr_llg2;
reclik.fh.llg3 = @lik_negbinztr_llg3;
reclik.fh.tiltedMoments = @lik_negbinztr_tiltedMoments;
reclik.fh.predy = @lik_negbinztr_predy;
reclik.fh.predprcty = @lik_negbinztr_predprcty;
reclik.fh.invlink = @lik_negbinztr_invlink;
reclik.fh.recappend = @lik_negbinztr_recappend;
reclik.p=[];
reclik.p.disper=[];
if ~isempty(ri.p.disper)
reclik.p.disper = ri.p.disper;
end
else
% Append to the record
reclik.disper(ri,:)=lik.disper;
if ~isempty(lik.p)
reclik.p.disper = lik.p.disper.fh.recappend(reclik.p.disper, ri, lik.p.disper);
end
end
end
|
github
|
lcnhappe/happe-master
|
gpcf_neuralnetwork.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_neuralnetwork.m
| 25,549 |
utf_8
|
33309be52b0b606ff6a3855b8ac7e8c9
|
function gpcf = gpcf_neuralnetwork(varargin)
%GPCF_NEURALNETWORK Create a neural network covariance function
%
% Description
% GPCF = GPCF_NEURALNETWORK('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates neural network covariance function structure in which
% the named parameters have the specified values. Any
% unspecified parameters are set to default values.
%
% GPCF = GPCF_NEURALNETWORK(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for neural network covariance function [default]
% biasSigma2 - prior variance for bias in neural network [0.1]
% weightSigma2 - prior variance for weights in neural network [10]
% This can be either scalar corresponding
% to a common prior variance or vector
% defining own prior variance for each
% input.
% biasSigma2_prior - prior structure for magnSigma2 [prior_logunif]
% weightSigma2_prior - prior structure for weightSigma2 [prior_logunif]
% selectedVariables - vector defining which inputs are used
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% See also
% GP_SET, GPCF_*, PRIOR_*
% Copyright (c) 2007-2009 Jarno Vanhatalo
% Copyright (c) 2009 Jaakko Riihimaki
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPCF_NEURALNETWORK';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('biasSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('weightSigma2',10, @(x) isvector(x) && all(x>0));
ip.addParamValue('biasSigma2_prior',prior_logunif, @(x) isstruct(x) || isempty(x));
ip.addParamValue('weightSigma2_prior',prior_logunif, @(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isvector(x) && all(x>0));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_neuralnetwork';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_neuralnetwork')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('biasSigma2',ip.UsingDefaults)
gpcf.biasSigma2=ip.Results.biasSigma2;
end
if init || ~ismember('weightSigma2',ip.UsingDefaults)
gpcf.weightSigma2=ip.Results.weightSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('biasSigma2_prior',ip.UsingDefaults)
gpcf.p.biasSigma2=ip.Results.biasSigma2_prior;
end
if init || ~ismember('weightSigma2_prior',ip.UsingDefaults)
gpcf.p.weightSigma2=ip.Results.weightSigma2_prior;
end
if ~ismember('selectedVariables',ip.UsingDefaults)
selectedVariables=ip.Results.selectedVariables;
if ~isempty(selectedVariables)
gpcf.selectedVariables = selectedVariables;
end
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_neuralnetwork_pak;
gpcf.fh.unpak = @gpcf_neuralnetwork_unpak;
gpcf.fh.lp = @gpcf_neuralnetwork_lp;
gpcf.fh.lpg = @gpcf_neuralnetwork_lpg;
gpcf.fh.cfg = @gpcf_neuralnetwork_cfg;
gpcf.fh.ginput = @gpcf_neuralnetwork_ginput;
gpcf.fh.cov = @gpcf_neuralnetwork_cov;
gpcf.fh.trcov = @gpcf_neuralnetwork_trcov;
gpcf.fh.trvar = @gpcf_neuralnetwork_trvar;
gpcf.fh.recappend = @gpcf_neuralnetwork_recappend;
end
end
function [w, s] = gpcf_neuralnetwork_pak(gpcf, w)
%GPCF_NEURALNETWORK_PAK Combine GP covariance function parameters
% into one vector
%
% Description
% W = GPCF_NEURALNETWORK_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function parameters
% and their hyperparameters into a single row vector W. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
% w = [ log(gpcf.biasSigma2)
% (hyperparameters of gpcf.biasSigma2)
% log(gpcf.weightSigma2(:))
% (hyperparameters of gpcf.weightSigma2)]'
%
%
% See also
% GPCF_NEURALNETWORK_UNPAK
i1=0;i2=1;
w = []; s = {};
if ~isempty(gpcf.p.biasSigma2)
w = [w log(gpcf.biasSigma2)];
s = [s; 'log(neuralnetwork.biasSigma2)'];
% Hyperparameters of magnSigma2
[wh sh] = gpcf.p.biasSigma2.fh.pak(gpcf.p.biasSigma2);
w = [w wh];
s = [s; sh];
end
if ~isempty(gpcf.p.weightSigma2)
w = [w log(gpcf.weightSigma2)];
if numel(gpcf.weightSigma2)>1
s = [s; sprintf('log(neuralnetwork.weightSigma2 x %d)',numel(gpcf.weightSigma2))];
else
s = [s; 'log(neuralnetwork.weightSigma2)'];
end
% Hyperparameters of lengthScale
[wh sh] = gpcf.p.weightSigma2.fh.pak(gpcf.p.weightSigma2);
w = [w wh];
s = [s; sh];
end
end
function [gpcf, w] = gpcf_neuralnetwork_unpak(gpcf, w)
%GPCF_NEURALNETWORK_UNPAK Sets the covariance function parameters
% into the structure
%
% Description
% [GPCF, W] = GPCF_NEURALNETWORK_UNPAK(GPCF, W) takes a
% covariance function structure GPCF and a hyper-parameter
% vector W, and returns a covariance function structure
% identical to the input, except that the covariance
% hyper-parameters have been set to the values in W. Deletes
% the values set to GPCF from W and returns the modified W.
% This is a mandatory subfunction used for example in energy
% and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.coeffSigma2)
% (hyperparameters of gpcf.coeffSigma2)]'
%
% See also
% GPCF_NEURALNETWORK_PAK
gpp=gpcf.p;
if ~isempty(gpp.biasSigma2)
i1=1;
gpcf.biasSigma2 = exp(w(i1));
w = w(i1+1:end);
end
if ~isempty(gpp.weightSigma2)
i2=length(gpcf.weightSigma2);
i1=1;
gpcf.weightSigma2 = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.weightSigma2.fh.unpak(gpcf.p.weightSigma2, w);
gpcf.p.weightSigma2 = p;
end
if ~isempty(gpp.biasSigma2)
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.biasSigma2.fh.unpak(gpcf.p.biasSigma2, w);
gpcf.p.biasSigma2 = p;
end
end
function lp = gpcf_neuralnetwork_lp(gpcf)
%GPCF_NEURALNETWORK_LP Evaluate the log prior of covariance
% function parameters
%
% Description
% LP = GPCF_NEURALNETWORK_LP(GPCF) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_NEURALNETWORK_PAK, GPCF_NEURALNETWORK_UNPAK,
% GPCF_NEURALNETWORK_LPG, GP_E
lp = 0;
gpp=gpcf.p;
if ~isempty(gpp.biasSigma2)
lp = gpp.biasSigma2.fh.lp(gpcf.biasSigma2, gpp.biasSigma2) +log(gpcf.biasSigma2);
end
if ~isempty(gpp.weightSigma2)
lp = lp +gpp.weightSigma2.fh.lp(gpcf.weightSigma2, gpp.weightSigma2) +sum(log(gpcf.weightSigma2));
end
end
function lpg = gpcf_neuralnetwork_lpg(gpcf)
%GPCF_NEURALNETWORK_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_NEURALNETWORK_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_NEURALNETWORK_PAK, GPCF_NEURALNETWORK_UNPAK,
% GPCF_NEURALNETWORK_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.biasSigma2)
lpgs = gpp.biasSigma2.fh.lpg(gpcf.biasSigma2, gpp.biasSigma2);
lpg = [lpg lpgs(1).*gpcf.biasSigma2+1 lpgs(2:end)];
end
if ~isempty(gpcf.p.weightSigma2)
lll = length(gpcf.weightSigma2);
lpgs = gpp.weightSigma2.fh.lpg(gpcf.weightSigma2, gpp.weightSigma2);
lpg = [lpg lpgs(1:lll).*gpcf.weightSigma2+1 lpgs(lll+1:end)];
end
end
function DKff = gpcf_neuralnetwork_cfg(gpcf, x, x2, mask, i1)
%GPCF_NEURALNETWORK_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_NEURALNETWORK_CFG(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to th (cell array with matrix elements).
% This is a mandatory subfunction used in gradient computations.
%
% DKff = GPCF_NEURALNETWORK_CFG(GPCF, X, X2) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_NEURALNETWORK_CFG(GPCF, X, [], MASK) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the diagonal of gradients of
% covariance matrix Kff = k(X,X2) with respect to th (cell
% array with matrix elements). This subfunction is needed
% when using sparse approximations (e.g. FIC).
%
% DKff = GPCF_NEURALNETWORK_CFG(GPCF,X,X2,[],i) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradients of covariance matrix
% Kff = k(X,X2) with respect to ith hyperparameter(matrix).
% 5th input parameter can also be used without X2. If i = 0,
% number of hyperparameters is returned. This subfunction is
% needed when using memory save option in gp_set.
%
%
% See also
% GPCF_NEURALNETWORK_PAK, GPCF_NEURALNETWORK_UNPAK,
% GPCF_NEURALNETWORK_LP, GP_G
gpp=gpcf.p;
if isfield(gpcf, 'selectedVariables') && ~isempty(x)
x=x(:,gpcf.selectedVariables);
if nargin == 3
x2=x2(:,gpcf.selectedVariables);
end
end
[n, m] =size(x);
if nargin==5
% Use memory save option
if i1==0
% Return number of hyperparameters
if ~isempty(gpcf.p.biasSigma2)
i=1;
end
if ~isempty(gpcf.p.weightSigma2)
i=i+length(gpcf.weightSigma2);
end
DKff=i;
return
end
savememory=1;
else
savememory=0;
i1=1:m;
end
DKff = {};
gprior = [];
% Evaluate: DKff{1} = d Kff / d biasSigma2
% DKff{2} = d Kff / d weightSigma2
% NOTE! Here we have already taken into account that the parameters
% are transformed through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
x_aug=[ones(size(x,1),1) x];
if length(gpcf.weightSigma2) == 1
% In the case of an isotropic NEURALNETWORK
s = gpcf.weightSigma2*ones(1,m);
else
s = gpcf.weightSigma2;
end
S_nom=2*x_aug*diag([gpcf.biasSigma2 s])*x_aug';
S_den_tmp=(2*sum(repmat([gpcf.biasSigma2 s], n, 1).*x_aug.^2,2)+1);
S_den2=S_den_tmp*S_den_tmp';
S_den=sqrt(S_den2);
C_tmp=2/pi./sqrt(1-(S_nom./S_den).^2);
% C(abs(C)<=eps) = 0;
C_tmp = (C_tmp+C_tmp')./2;
ii1 = 0;
if ~savememory || i1==1
bnom_g=2*ones(n);
bden_g=(0.5./S_den).*(bnom_g.*repmat(S_den_tmp',n,1)+repmat(S_den_tmp,1,n).*bnom_g);
bg=gpcf.biasSigma2*C_tmp.*(bnom_g.*S_den-bden_g.*S_nom)./S_den2;
if ~isempty(gpcf.p.biasSigma2)
ii1 = ii1+1;
DKff{ii1}=(bg+bg')/2;
end
if savememory
DKff=DKff{ii1};
return
end
elseif savememory
i1=i1-1;
end
if ~isempty(gpcf.p.weightSigma2)
if length(gpcf.weightSigma2) == 1
wnom_g=2*x*x';
tmp_g=sum(2*x.^2,2);
wden_g=0.5./S_den.*(tmp_g*S_den_tmp'+S_den_tmp*tmp_g');
wg=s(1)*C_tmp.*(wnom_g.*S_den-wden_g.*S_nom)./S_den2;
ii1 = ii1+1;
DKff{ii1}=(wg+wg')/2;
else
for d1=i1
wnom_g=2*x(:,d1)*x(:,d1)';
tmp_g=2*x(:,d1).^2;
tmp=tmp_g*S_den_tmp';
wden_g=0.5./S_den.*(tmp+tmp');
wg=s(d1)*C_tmp.*(wnom_g.*S_den-wden_g.*S_nom)./S_den2;
ii1 = ii1+1;
DKff{ii1}=(wg+wg')/2;
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_neuralnetwork -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
n2 =size(x2,1);
x_aug=[ones(size(x,1),1) x];
x_aug2=[ones(size(x2,1),1) x2];
if length(gpcf.weightSigma2) == 1
% In the case of an isotropic NEURALNETWORK
s = gpcf.weightSigma2*ones(1,m);
else
s = gpcf.weightSigma2;
end
S_nom=2*x_aug*diag([gpcf.biasSigma2 s])*x_aug2';
S_den_tmp1=(2*sum(repmat([gpcf.biasSigma2 s], n, 1).*x_aug.^2,2)+1);
S_den_tmp2=(2*sum(repmat([gpcf.biasSigma2 s], n2, 1).*x_aug2.^2,2)+1);
S_den2=S_den_tmp1*S_den_tmp2';
S_den=sqrt(S_den2);
C_tmp=2/pi./sqrt(1-(S_nom./S_den).^2);
%C(abs(C)<=eps) = 0;
ii1 = 0;
if ~savememory || i1==1
bnom_g=2*ones(n, n2);
bden_g=(0.5./S_den).*(bnom_g.*repmat(S_den_tmp2',n,1)+repmat(S_den_tmp1,1,n2).*bnom_g);
if ~isempty(gpcf.p.biasSigma2)
ii1 = ii1 + 1;
DKff{ii1}=gpcf.biasSigma2*C_tmp.*(bnom_g.*S_den-bden_g.*S_nom)./S_den2;
end
if savememory
DKff=DKff{ii1};
return
end
elseif savememory
i1=i1-1;
end
if ~isempty(gpcf.p.weightSigma2)
if length(gpcf.weightSigma2) == 1
wnom_g=2*x*x2';
tmp_g1=sum(2*x.^2,2);
tmp_g2=sum(2*x2.^2,2);
wden_g=0.5./S_den.*(tmp_g1*S_den_tmp2'+S_den_tmp1*tmp_g2');
ii1 = ii1 + 1;
DKff{ii1}=s(1)*C_tmp.*(wnom_g.*S_den-wden_g.*S_nom)./S_den2;
else
for d1=i1
wnom_g=2*x(:,d1)*x2(:,d1)';
tmp_g1=2*x(:,d1).^2;
tmp_g2=2*x2(:,d1).^2;
wden_g=0.5./S_den.*(tmp_g1*S_den_tmp2'+S_den_tmp1*tmp_g2');
ii1 = ii1 + 1;
DKff{ii1}=s(d1)*C_tmp.*(wnom_g.*S_den-wden_g.*S_nom)./S_den2;
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d biasSigma2
% DKff{2...} = d mask(Kff,I) / d weightSigma2
elseif nargin == 4 || nargin == 5
x_aug=[ones(size(x,1),1) x];
if length(gpcf.weightSigma2) == 1
% In the case of an isotropic NEURALNETWORK
s = gpcf.weightSigma2*ones(1,m);
else
s = gpcf.weightSigma2;
end
S_nom=2*sum(repmat([gpcf.biasSigma2 s],n,1).*x_aug.^2,2);
S_den=(S_nom+1);
S_den2=S_den.^2;
C_tmp=2/pi./sqrt(1-(S_nom./S_den).^2);
%C(abs(C)<=eps) = 0;
bnom_g=2*ones(n,1);
bden_g=(0.5./S_den).*(2*bnom_g.*S_den);
ii1 = 0;
if ~isempty(gpcf.p.biasSigma2) && (~savememory || all(i1==1))
ii1 = ii1 + 1;
DKff{ii1}=gpcf.biasSigma2*C_tmp.*(bnom_g.*S_den-bden_g.*S_nom)./S_den2;
end
if savememory
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
end
if ~isempty(gpcf.p.weightSigma2)
if length(gpcf.weightSigma2) == 1
wnom_g=sum(2*x.^2,2);
wden_g=0.5./S_den.*(2*wnom_g.*S_den);
ii1 = ii1+1;
DKff{ii1}=s(1)*C_tmp.*(wnom_g.*S_den-wden_g.*S_nom)./S_den2;
else
for d1=i1
wnom_g=2*x(:,d1).^2;
wden_g=0.5./S_den.*(2*wnom_g.*S_den);
ii1 = ii1+1;
DKff{ii1}=s(d1)*C_tmp.*(wnom_g.*S_den-wden_g.*S_nom)./S_den2;
end
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_neuralnetwork_ginput(gpcf, x, x2, i1)
%GPCF_NEURALNETWORK_GINPUT Evaluate gradient of covariance function with
% respect to x.
%
% Description
% DKff = GPCF_NEURALNETWORK_GINPUT(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_NEURALNETWORK_GINPUT(GPCF, X, X2) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradients of covariance matrix
% Kff = k(X,X2) with respect to X (cell array with matrix
% elements). This subfunction is needed when computing gradients
% with respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_NEURALNETWORK_GINPUT(GPCF, X, X2, i) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradients of covariance matrix
% Kff = k(X,X2) with respect to ith covariate in X (matrix).
% This subfunction is needed when using memory save option in
% gp_set.
%
% See also
% GPCF_NEURALNETWORK_PAK, GPCF_NEURALNETWORK_UNPAK,
% GPCF_NEURALNETWORK_LP, GP_G
if isfield(gpcf, 'selectedVariables')
x=x(:,gpcf.selectedVariables);
if nargin == 3
x2=x2(:,gpcf.selectedVariables);
end
end
[n, m] =size(x);
if nargin==4
% Use memory save option
if i1==0
% Return number of covariates
DKff=m;
return
end
else
i1=1:m;
end
if nargin == 2 || isempty(x2)
if length(gpcf.weightSigma2) == 1
% In the case of an isotropic NEURALNETWORK
s = gpcf.weightSigma2*ones(1,m);
else
s = gpcf.weightSigma2;
end
x_aug=[ones(size(x,1),1) x];
S_nom=2*x_aug*diag([gpcf.biasSigma2 s])*x_aug';
S_den_tmp=(2*sum(repmat([gpcf.biasSigma2 s], n, 1).*x_aug.^2,2)+1);
S_den2=S_den_tmp*S_den_tmp';
S_den=sqrt(S_den2);
C_tmp=2/pi./sqrt(1-(S_nom./S_den).^2);
%C(abs(C)<=eps) = 0;
C_tmp = (C_tmp+C_tmp')./2;
ii1=0;
for d1=i1
for j=1:n
DK = zeros(n);
DK(j,:)=s(d1)*x(:,d1)';
DK = DK + DK';
inom_g=2*DK;
tmp_g=zeros(n);
tmp_g(j,:)=2*s(d1)*2*x(j,d1)*S_den_tmp';
tmp_g=tmp_g+tmp_g';
iden_g=0.5./S_den.*(tmp_g);
ii1=ii1+1;
DKff{ii1}=C_tmp.*(inom_g.*S_den-iden_g.*S_nom)./S_den2;
end
end
elseif nargin == 3 || nargin == 4
if length(gpcf.weightSigma2) == 1
% In the case of an isotropic NEURALNETWORK
s = gpcf.weightSigma2*ones(1,m);
else
s = gpcf.weightSigma2;
end
n2 =size(x2,1);
x_aug=[ones(size(x,1),1) x];
x_aug2=[ones(size(x2,1),1) x2];
S_nom=2*x_aug*diag([gpcf.biasSigma2 s])*x_aug2';
S_den_tmp1=(2*sum(repmat([gpcf.biasSigma2 s], n, 1).*x_aug.^2,2)+1);
S_den_tmp2=(2*sum(repmat([gpcf.biasSigma2 s], n2, 1).*x_aug2.^2,2)+1);
S_den2=S_den_tmp1*S_den_tmp2';
S_den=sqrt(S_den2);
C_tmp=2/pi./sqrt(1-(S_nom./S_den).^2);
% C(abs(C)<=eps) = 0;
ii1 = 0;
for d1=i1
for j = 1:n
DK = zeros(n, n2);
DK(j,:)=s(d1)*x2(:,d1)';
inom_g=2*DK;
tmp_g=zeros(n, n2);
tmp_g(j,:)=2*s(d1)*2*x(j,d1)*S_den_tmp2';
iden_g=0.5./S_den.*(tmp_g);
ii1=ii1+1;
DKff{ii1}=C_tmp.*(inom_g.*S_den-iden_g.*S_nom)./S_den2;
end
end
end
end
function C = gpcf_neuralnetwork_cov(gpcf, x1, x2, varargin)
%GP_NEURALNETWORK_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_NEURALNETWORK_COV(GP, TX, X) takes in covariance
% function of a Gaussian process GP and two matrixes TX and X
% that contain input vectors to GP. Returns covariance matrix
% C. Every element ij of C contains covariance between inputs
% i in TX and j in X. This is a mandatory subfunction used for
% example in prediction and energy computations.
%
%
% See also
% GPCF_NEURALNETWORK_TRCOV, GPCF_NEURALNETWORK_TRVAR, GP_COV,
% GP_TRCOV
if isfield(gpcf, 'selectedVariables')
x1=x1(:,gpcf.selectedVariables);
if nargin == 3
x2=x2(:,gpcf.selectedVariables);
end
end
if isempty(x2)
x2=x1;
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
if m1~=m2
error('the number of columns of X1 and X2 has to be same')
end
x_aug1=[ones(n1,1) x1];
x_aug2=[ones(n2,1) x2];
if length(gpcf.weightSigma2) == 1
% In the case of an isotropic NEURALNETWORK
s = gpcf.weightSigma2*ones(1,m1);
else
s = gpcf.weightSigma2;
end
S_nom=2*x_aug1*diag([gpcf.biasSigma2 s])*x_aug2';
S_den_tmp1=(2*sum(repmat([gpcf.biasSigma2 s], n1, 1).*x_aug1.^2,2)+1);
S_den_tmp2=(2*sum(repmat([gpcf.biasSigma2 s], n2, 1).*x_aug2.^2,2)+1);
S_den2=S_den_tmp1*S_den_tmp2';
C=2/pi*asin(S_nom./sqrt(S_den2));
C(abs(C)<=eps) = 0;
end
function C = gpcf_neuralnetwork_trcov(gpcf, x)
%GP_NEURALNETWORK_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_NEURALNETWORK_TRCOV(GP, TX) takes in covariance
% function of a Gaussian process GP and matrix TX that
% contains training input vectors. Returns covariance matrix
% C. Every element ij of C contains covariance between inputs
% i and j in TX. This is a mandatory subfunction used for
% example in prediction and energy computations.
%
% See also
% GPCF_NEURALNETWORK_COV, GPCF_NEURALNETWORK_TRVAR, GP_COV,
% GP_TRCOV
if isfield(gpcf, 'selectedVariables')
x=x(:,gpcf.selectedVariables);
end
[n,m]=size(x);
x_aug=[ones(n,1) x];
if length(gpcf.weightSigma2) == 1
% In the case of an isotropic NEURALNETWORK
s = gpcf.weightSigma2*ones(1,m);
else
s = gpcf.weightSigma2;
end
S_nom=2*x_aug*diag([gpcf.biasSigma2 s])*x_aug';
S_den_tmp=(2*sum(repmat([gpcf.biasSigma2 s], n, 1).*x_aug.^2,2)+1);
S_den2=S_den_tmp*S_den_tmp';
C=2/pi*asin(S_nom./sqrt(S_den2));
C(abs(C)<=eps) = 0;
C = (C+C')./2;
end
function C = gpcf_neuralnetwork_trvar(gpcf, x)
%GP_NEURALNETWORK_TRVAR Evaluate training variance vector
%
% Description
% C = GP_NEURALNETWORK_TRVAR(GPCF, TX) takes in covariance
% function of a Gaussian process GPCF and matrix TX that
% contains training inputs. Returns variance vector C. Every
% element i of C contains variance of input i in TX. This is
% a mandatory subfunction used for example in prediction and
% energy computations.
%
%
% See also
% GPCF_NEURALNETWORK_COV, GP_COV, GP_TRCOV
if isfield(gpcf, 'selectedVariables')
x=x(:,gpcf.selectedVariables);
end
[n,m]=size(x);
x_aug=[ones(n,1) x];
if length(gpcf.weightSigma2) == 1
% In the case of an isotropic NEURALNETWORK
s = gpcf.weightSigma2*ones(1,m);
else
s = gpcf.weightSigma2;
end
s_tmp=sum(repmat([gpcf.biasSigma2 s], n, 1).*x_aug.^2,2);
C=2/pi*asin(2*s_tmp./(1+2*s_tmp));
C(C<eps)=0;
end
function reccf = gpcf_neuralnetwork_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_NEURALNETWORK_RECAPPEND(RECCF, RI, GPCF) takes
% a covariance function record structure RECCF, record index
% RI and covariance function structure GPCF with the current
% MCMC samples of the parameters. Returns RECCF which contains
% all the old samples and the current samples from GPCF.
% This subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_neuralnetwork';
reccf.nin = ri;
reccf.nout = 1;
% Initialize parameters
reccf.weightSigma2= [];
reccf.biasSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_neuralnetwork_pak;
reccf.fh.unpak = @gpcf_neuralnetwork_unpak;
reccf.fh.lp = @gpcf_neuralnetwork_lp;
reccf.fh.lpg = @gpcf_neuralnetwork_lpg;
reccf.fh.cfg = @gpcf_neuralnetwork_cfg;
reccf.fh.cov = @gpcf_neuralnetwork_cov;
reccf.fh.trcov = @gpcf_neuralnetwork_trcov;
reccf.fh.trvar = @gpcf_neuralnetwork_trvar;
reccf.fh.recappend = @gpcf_neuralnetwork_recappend;
reccf.p=[];
reccf.p.weightSigma2=[];
reccf.p.biasSigma2=[];
if ~isempty(ri.p.weightSigma2)
reccf.p.weightSigma2 = ri.p.weightSigma2;
end
if ~isempty(ri.p.biasSigma2)
reccf.p.biasSigma2 = ri.p.biasSigma2;
end
else
% Append to the record
gpp = gpcf.p;
% record weightSigma2
reccf.weightSigma2(ri,:)=gpcf.weightSigma2;
if isfield(gpp,'weightSigma2') && ~isempty(gpp.weightSigma2)
reccf.p.weightSigma2 = gpp.weightSigma2.fh.recappend(reccf.p.weightSigma2, ri, gpcf.p.weightSigma2);
end
% record biasSigma2
reccf.biasSigma2(ri,:)=gpcf.biasSigma2;
if isfield(gpp,'biasSigma2') && ~isempty(gpp.biasSigma2)
reccf.p.biasSigma2 = gpp.biasSigma2.fh.recappend(reccf.p.biasSigma2, ri, gpcf.p.biasSigma2);
end
if isfield(gpcf, 'selectedVariables')
reccf.selectedVariables = gpcf.selectedVariables;
end
end
end
|
github
|
lcnhappe/happe-master
|
gpcf_ppcs1.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_ppcs1.m
| 38,503 |
utf_8
|
c39681a25f1ff058f0e2986776407dfe
|
function gpcf = gpcf_ppcs1(varargin)
%GPCF_PPCS1 Create a piece wise polynomial (q=1) covariance function
%
% Description
% GPCF = GPCF_PPCS1('nin',nin,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates piece wise polynomial (q=1) covariance function
% structure in which the named parameters have the specified
% values. Any unspecified parameters are set to default values.
% Obligatory parameter is 'nin', which tells the dimension
% of input space.
%
% GPCF = GPCF_PPCS1(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for piece wise polynomial (q=1) covariance function [default]
% magnSigma2 - magnitude (squared) [0.1]
% lengthScale - length scale for each input. [1]
% This can be either scalar corresponding
% to an isotropic function or vector
% defining own length-scale for each input
% direction.
% l_nin - order of the polynomial [floor(nin/2) + 2]
% Has to be greater than or equal to default.
% magnSigma2_prior - prior for magnSigma2 [prior_logunif]
% lengthScale_prior - prior for lengthScale [prior_t]
% metric - metric structure used by the covariance function []
% selectedVariables - vector defining which inputs are used [all]
% selectedVariables is shorthand for using
% metric_euclidean with corresponding components
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The piecewise polynomial function is the following:
%
% k_pp1(x_i, x_j) = ma2*cs^(l+1)*((l+1)*r + 1)
%
% where r = sum( (x_i,d - x_j,d)^2/l^2_d )
% l = floor(l_nin/2) + 2
% cs = max(0,1-r)
% and l_nin must be greater or equal to gpcf.nin
%
% NOTE! Use of gpcf_ppcs1 requires that you have installed
% GPstuff with SuiteSparse.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, METRIC_*
% Copyright (c) 2009-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if nargin>0 && ischar(varargin{1}) && ismember(varargin{1},{'init' 'set'})
% remove init and set
varargin(1)=[];
end
ip=inputParser;
ip.FunctionName = 'GPCF_PPCS1';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('nin',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('magnSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('lengthScale',1, @(x) isvector(x) && all(x>0));
ip.addParamValue('l_nin',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('metric',[], @isstruct);
ip.addParamValue('magnSigma2_prior', prior_logunif(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('lengthScale_prior',prior_t(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isempty(x) || ...
(isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
% Check that SuiteSparse is available
if ~exist('ldlchol')
error('SuiteSparse is not installed (or it is not in the path). gpcf_ppcs1 cannot be used!')
end
init=true;
gpcf.nin=ip.Results.nin;
if isempty(gpcf.nin)
error('nin has to be given for ppcs: gpcf_ppcs1(''nin'',NIN,...)')
end
gpcf.type = 'gpcf_ppcs1';
% cf is compactly supported
gpcf.cs = 1;
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_ppcs1')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_ppcs1_pak;
gpcf.fh.unpak = @gpcf_ppcs1_unpak;
gpcf.fh.lp = @gpcf_ppcs1_lp;
gpcf.fh.lpg = @gpcf_ppcs1_lpg;
gpcf.fh.cfg = @gpcf_ppcs1_cfg;
gpcf.fh.ginput = @gpcf_ppcs1_ginput;
gpcf.fh.cov = @gpcf_ppcs1_cov;
gpcf.fh.trcov = @gpcf_ppcs1_trcov;
gpcf.fh.trvar = @gpcf_ppcs1_trvar;
gpcf.fh.recappend = @gpcf_ppcs1_recappend;
end
% Initialize parameters
if init || ~ismember('l_nin',ip.UsingDefaults)
gpcf.l=ip.Results.l_nin;
if isempty(gpcf.l)
gpcf.l = floor(gpcf.nin/2) + 2;
end
if gpcf.l < gpcf.nin
error('The l_nin has to be greater than or equal to the number of inputs!')
end
end
if init || ~ismember('lengthScale',ip.UsingDefaults)
gpcf.lengthScale = ip.Results.lengthScale;
end
if init || ~ismember('magnSigma2',ip.UsingDefaults)
gpcf.magnSigma2 = ip.Results.magnSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.p.lengthScale=ip.Results.lengthScale_prior;
end
if init || ~ismember('magnSigma2_prior',ip.UsingDefaults)
gpcf.p.magnSigma2=ip.Results.magnSigma2_prior;
end
%Initialize metric
if ~ismember('metric',ip.UsingDefaults)
if ~isempty(ip.Results.metric)
gpcf.metric = ip.Results.metric;
gpcf = rmfield(gpcf, 'lengthScale');
gpcf.p = rmfield(gpcf.p, 'lengthScale');
elseif isfield(gpcf,'metric')
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
% selectedVariables options implemented using metric_euclidean
if ~ismember('selectedVariables',ip.UsingDefaults)
if ~isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.selectedVariables = ip.Results.selectedVariables;
% gpcf.metric=metric_euclidean('components',...
% num2cell(ip.Results.selectedVariables),...
% 'lengthScale',gpcf.lengthScale,...
% 'lengthScale_prior',gpcf.p.lengthScale);
% gpcf = rmfield(gpcf, 'lengthScale');
% gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
elseif isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.metric=metric_euclidean(gpcf.metric,...
'components',...
num2cell(ip.Results.selectedVariables));
if ~ismember('lengthScale',ip.UsingDefaults)
gpcf.metric.lengthScale=ip.Results.lengthScale;
gpcf = rmfield(gpcf, 'lengthScale');
end
if ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.metric.p.lengthScale=ip.Results.lengthScale_prior;
gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
else
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
end
end
function [w,s] = gpcf_ppcs1_pak(gpcf)
%GPCF_PPCS1_PAK Combine GP covariance function parameters into
% one vector
%
% Description
% W = GPCF_PPCS1_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_PPCS1_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.magnSigma2)
w = [w log(gpcf.magnSigma2)];
s = [s; 'log(ppcs1.magnSigma2)'];
% Hyperparameters of magnSigma2
[wh sh] = gpcf.p.magnSigma2.fh.pak(gpcf.p.magnSigma2);
w = [w wh];
s = [s; sh];
end
if isfield(gpcf,'metric')
[wh sh]=gpcf.metric.fh.pak(gpcf.metric);
w = [w wh];
s = [s; sh];
else
if ~isempty(gpcf.p.lengthScale)
w = [w log(gpcf.lengthScale)];
if numel(gpcf.lengthScale)>1
s = [s; sprintf('log(ppcs1.lengthScale x %d)',numel(gpcf.lengthScale))];
else
s = [s; 'log(ppcs1.lengthScale)'];
end
% Hyperparameters of lengthScale
[wh sh] = gpcf.p.lengthScale.fh.pak(gpcf.p.lengthScale);
w = [w wh];
s = [s; sh];
end
end
end
function [gpcf, w] = gpcf_ppcs1_unpak(gpcf, w)
%GPCF_PPCS1_UNPAK Sets the covariance function parameters into
% the structure
%
% Description
% [GPCF, W] = GPCF_PPCS1_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W,
% and returns a covariance function structure identical
% to the input, except that the covariance hyper-parameters
% have been set to the values in W. Deletes the values set to
% GPCF from W and returns the modified W. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_PPCS1_PAK
gpp=gpcf.p;
if ~isempty(gpp.magnSigma2)
gpcf.magnSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.magnSigma2.fh.unpak(gpcf.p.magnSigma2, w);
gpcf.p.magnSigma2 = p;
end
if isfield(gpcf,'metric')
[metric, w] = gpcf.metric.fh.unpak(gpcf.metric, w);
gpcf.metric = metric;
else
if ~isempty(gpp.lengthScale)
i1=1;
i2=length(gpcf.lengthScale);
gpcf.lengthScale = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.lengthScale.fh.unpak(gpcf.p.lengthScale, w);
gpcf.p.lengthScale = p;
end
end
end
function lp = gpcf_ppcs1_lp(gpcf)
%GPCF_PPCS1_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_PPCS1_LP(GPCF, X, T) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_PPCS1_PAK, GPCF_PPCS1_UNPAK, GPCF_PPCS1_LPG, GP_E
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lp = lp +gpp.magnSigma2.fh.lp(gpcf.magnSigma2, ...
gpp.magnSigma2) +log(gpcf.magnSigma2);
end
if isfield(gpcf,'metric')
lp = lp +gpcf.metric.fh.lp(gpcf.metric);
elseif ~isempty(gpp.lengthScale)
lp = lp +gpp.lengthScale.fh.lp(gpcf.lengthScale, ...
gpp.lengthScale) +sum(log(gpcf.lengthScale));
end
end
function lpg = gpcf_ppcs1_lpg(gpcf)
%GPCF_PPCS1_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_PPCS1_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_PPCS1_PAK, GPCF_PPCS1_UNPAK, GPCF_PPCS1_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lpgs = gpp.magnSigma2.fh.lpg(gpcf.magnSigma2, gpp.magnSigma2);
lpg = [lpg lpgs(1).*gpcf.magnSigma2+1 lpgs(2:end)];
end
if isfield(gpcf,'metric')
lpg_dist = gpcf.metric.fh.lpg(gpcf.metric);
lpg=[lpg lpg_dist];
else
if ~isempty(gpcf.p.lengthScale)
lll = length(gpcf.lengthScale);
lpgs = gpp.lengthScale.fh.lpg(gpcf.lengthScale, gpp.lengthScale);
lpg = [lpg lpgs(1:lll).*gpcf.lengthScale+1 lpgs(lll+1:end)];
end
end
end
function DKff = gpcf_ppcs1_cfg(gpcf, x, x2, mask, i1)
%GPCF_PPCS1_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_PPCS1_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_PPCS1_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_PPCS1_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_PPCS1_CFG(GPCF, X, X2, [], i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to ith hyperparameter. This subfunction
% is needed when using memory save option gp_set.
%
% See also
% GPCF_PPCS1_PAK, GPCF_PPCS1_UNPAK, GPCF_PPCS1_LP, GP_G
gpp=gpcf.p;
i2=1;
DKff = {};
gprior = [];
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
i=0;
if ~isempty(gpcf.p.magnSigma2)
i=i+1;
end
if ~isempty(gpcf.p.lengthScale)
i=i+length(gpcf.lengthScale);
end
DKff=i;
return
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters
% are transformed through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
Cdm = gpcf_ppcs1_trcov(gpcf, x);
ii1=0;
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = Cdm;
end
l = gpcf.l;
[I,J] = find(Cdm);
if isfield(gpcf,'metric')
% Compute the sparse distance matrix and its gradient.
[n, m] =size(x);
ntriplets = (nnz(Cdm)-n)./2;
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n-1
col_ind = ii + find(Cdm(ii+1:n,ii));
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii,:));
gd = gpcf.metric.fh.distg(gpcf.metric, x(col_ind,:), x(ii,:));
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(d);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = col_ind;
J(ind_tr) = ii;
dist(ind_tr) = d;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj};
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = l+1;
Dd = -(l+1).*cs.^l.*(const1.*d +1 );
Dd = Dd + cs.^(l+1).*const1;
Dd = ma2.*Dd;
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n,n);
DKff{ii1} = D + D';
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
ii1=ii1-1;
end
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic PPCS1
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
d2 = 0;
for i = 1:m
d2 = d2 + s2.*(x(I,i) - x(J,i)).^2;
end
d = sqrt(d2);
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
% Calculate the gradient matrix
const1 = l+1;
D = -(l+1).*cs.^l.*(const1.*d +1 );
D = D + cs.^(l+1).*const1;
D = -d.*ma2.*D;
D = sparse(I,J,D,n,n);
ii1 = ii1+1;
DKff{ii1} = D;
else
% In the case ARD is used
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
d2 = 0;
d_l2 = [];
for i = 1:m
d_l2(:,i) = s2(i).*(x(I,i) - x(J,i)).^2;
d2 = d2 + d_l2(:,i);
end
d = sqrt(d2);
d_l = d_l2;
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
const1 = l+1;
Dd = -(l+1).*cs.^l.*(const1.*d +1 );
Dd = Dd + cs.^(l+1).*const1;
Dd = -ma2.*Dd;
int = d ~= 0;
for i = i1
% Calculate the gradient matrix
D = d_l(:,i).*Dd;
% Divide by r in cases where r is non-zero
D(int) = D(int)./d(int);
D = sparse(I,J,D,n,n);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_ppcs -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = K;
end
l = gpcf.l;
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x);
[n2,m2]=size(x2);
ma = gpcf.magnSigma2;
% Compute the sparse distance matrix.
ntriplets = nnz(K);
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n2
d = zeros(n1,1);
d = gpcf.metric.fh.dist(gpcf.metric, x, x2(ii,:));
gd = gpcf.metric.fh.distg(gpcf.metric, x, x2(ii,:));
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric, x, x2(ii,:));
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = I2;
J(ind_tr) = ii;
dist(ind_tr) = R2;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj}(I2);
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = l+1;
Dd = -(l+1).*cs.^l.*(const1.*d +1 );
Dd = Dd + cs.^(l+1).*const1;
Dd = ma2.*Dd;
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n1,n2);
DKff{ii1} = D;
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
ii1=ii1-1;
end
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic PPCS1
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
dist1 = 0;
for i=1:m
dist1 = dist1 + s2.*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
d1 = sqrt(dist1);
cs1 = max(1-d1,0);
const1 = l+1;
DK_l = -(l+1).*cs1.^l.*(const1.*d1 +1 );
DK_l = DK_l + cs1.^(l+1).*const1;
DK_l = -d1.*ma2.*DK_l;
ii1=ii1+1;
DKff{ii1} = DK_l;
else
% In the case ARD is used
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
dist1 = 0;
d_l1 = [];
for i = 1:m
dist1 = dist1 + s2(i).*bsxfun(@minus,x(:,i),x2(:,i)').^2;
d_l1{i} = s2(i).*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
d1 = sqrt(dist1);
cs1 = max(1-d1,0);
const1 = l+1;
D = -(l+1).*cs1.^l.*(const1.*d1 +1 );
D = D + cs1.^(l+1).*const1;
D = ma2.*D;
for i = i1
% Calculate the gradient matrix
DK_l = -D.*d_l1{i};
% Divide by r in cases where r is non-zero
DK_l(d1 ~= 0) = DK_l(d1 ~= 0)./d1(d1 ~= 0);
ii1=ii1+1;
DKff{ii1} = DK_l;
end
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
ii1=0;
[n, m] =size(x);
if ~isempty(gpcf.p.magnSigma2) && (~savememory || all(i1==1))
ii1 = ii1+1;
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
end
if isfield(gpcf,'metric')
dist = 0;
distg = gpcf.metric.fh.distg(gpcf.metric, x, [], 1);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = 0;
end
else
if ~isempty(gpcf.p.lengthScale)
for i2=1:length(gpcf.lengthScale)
ii1 = ii1+1;
DKff{ii1} = 0; % d mask(Kff,I) / d lengthScale
end
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_ppcs1_ginput(gpcf, x, x2, i1)
%GPCF_PPCS1_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_PPCS1_GINPUT(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_PPCS1_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_PPCS1_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty with respect to ith
% covariate in X. This subfunction is needed when using memory
% save option in gp_set.
%
% See also
% GPCF_PPCS1_PAK, GPCF_PPCS1_UNPAK, GPCF_PPCS1_LP, GP_G
i2=1;
DKff = {};
gprior = [];
[n,m]=size(x);
if nargin<4
i1=1:m;
else
% Use memory save option
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
end
% evaluate the gradient for training covariance
if nargin == 2 || isempty(x2)
K = gpcf_ppcs1_trcov(gpcf, x);
ii1=0;
l = gpcf.l;
[I,J] = find(K);
if isfield(gpcf,'metric')
% Compute the sparse distance matrix and its gradient.
ntriplets = (nnz(Cdm)-n)./2;
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n-1
col_ind = ii + find(Cdm(ii+1:n,ii));
d = zeros(length(col_ind),1);
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii,:));
[gd, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x(col_ind,:), x(ii,:));
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(d);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = col_ind;
J(ind_tr) = ii;
dist(ind_tr) = d;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj};
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = l+1;
Dd = -(l+1).*cs.^l.*(const1.*d +1 );
Dd = Dd + cs.^(l+1).*const1;
Dd = ma2.*Dd;
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n,n);
DKff{ii1} = D + D';
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic PPCS1
s2 = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s2 = 1./gpcf.lengthScale.^2;
end
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
d2 = 0;
for i = 1:m
d2 = d2 + s2(i).*(x(I,i) - x(J,i)).^2;
end
d = sqrt(d2);
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
Dd = -(l+1).*cs.^l.*( (l+1).*d +1 );
Dd = Dd + cs.^(l+1).*(l+1);
Dd = sparse(I,J,ma2.*Dd,n,n);
d = sparse(I,J,d,n,n);
row = ones(n,1);
cols = 1:n;
for i = i1
for j = 1:n
% Calculate the gradient matrix
ind = find(d(:,j));
apu = full(Dd(:,j)).*s2(i).*(x(j,i)-x(:,i));
apu(ind) = apu(ind)./d(ind,j);
D = sparse(row*j, cols, apu, n, n);
D = D+D';
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3
if size(x,2) ~= size(x2,2)
error('gpcf_ppcs -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
n2 = size(x2,1);
l = gpcf.l;
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x);
[n2,m2]=size(x2);
ma = gpcf.magnSigma2;
% Compute the sparse distance matrix.
ntriplets = nnz(K);
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n2
d = zeros(n1,1);
d = gpcf.metric.fh.dist(gpcf.metric, x, x2(ii,:));
[gd, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x, x2(ii,:));
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = I2;
J(ind_tr) = ii;
dist(ind_tr) = R2;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj}(I2);
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = l+1;
Dd = -(l+1).*cs.^l.*(const1.*d +1 );
Dd = Dd + cs.^(l+1).*const1;
Dd = ma2.*Dd;
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n1,n2);
DKff{ii1} = D;
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic PPCS1
s2 = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s2 = 1./gpcf.lengthScale.^2;
end
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
dist1 = 0;
for i = 1:m
dist1 = dist1 + s2(i).*bsxfun(@minus,x(:,i),x2(:,i)').^2;
end
d = sqrt(dist1);
cs1 = max(1-d,0);
const1 = l+1;
Dd = -(l+1).*cs1.^l.*(const1.*d +1 );
Dd = Dd + cs1.^(l+1).*const1;
Dd = ma2.*Dd;
row = ones(n2,1);
cols = 1:n2;
for i = i1
for j = 1:n
% Calculate the gradient matrix
ind = find(d(j,:));
apu = Dd(j,:).*s2(i).*(x(j,i)-x2(:,i))';
apu(ind) = apu(ind)./d(j,ind);
D = sparse(row*j, cols, apu, n, n2);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
end
function C = gpcf_ppcs1_cov(gpcf, x1, x2, varargin)
%GP_PPCS1_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_PPCS1_COV(GP, TX, X) takes in covariance function of
% a Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_PPCS1_TRCOV, GPCF_PPCS1_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x1);
[n2,m2]=size(x2);
else
% If scaled euclidean metric
if isfield(gpcf, 'selectedVariables')
x1 = x1(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
s = 1./(gpcf.lengthScale);
s2 = s.^2;
if size(s)==1
s2 = repmat(s2,1,m1);
end
end
ma2 = gpcf.magnSigma2;
l = gpcf.l;
% Compute the sparse distance matrix.
ntriplets = max(1,floor(0.03*n1*n2));
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
ntriplets = 0;
I0=zeros(ntriplets,1);
J0=zeros(ntriplets,1);
nn0=0;
for ii1=1:n2
d = zeros(n1,1);
if isfield(gpcf, 'metric')
d = gpcf.metric.fh.dist(gpcf.metric, x1, x2(ii1,:));
else
for j=1:m1
d = d + s2(j).*(x1(:,j)-x2(ii1,j)).^2;
end
end
%d = sqrt(d);
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
R2=sqrt(R2);
%len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
I(ntrip_prev+1:ntriplets) = I2;
J(ntrip_prev+1:ntriplets) = ii1;
R(ntrip_prev+1:ntriplets) = R2;
I0(nn0+1:nn0+length(I0t)) = I0t;
J0(nn0+1:nn0+length(I0t)) = ii1;
nn0 = nn0+length(I0t);
end
r = sparse(I(1:ntriplets),J(1:ntriplets),R(1:ntriplets));
[I,J,r] = find(r);
cs = full(sparse(max(0, 1-r)));
const1 = l+1;
C = ma2.*cs.^(l+1).*(const1.*r + 1);
C = sparse(I,J,C,n1,n2) + sparse(I0,J0,ma2,n1,n2);
end
function C = gpcf_ppcs1_trcov(gpcf, x)
%GP_PPCS1_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_PPCS1_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This
% is a mandatory subfunction used for example in prediction and
% energy computations.
%
% See also
% GPCF_PPCS1_COV, GPCF_PPCS1_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n, m] =size(x);
else
% If a scaled euclidean metric try first mex-implementation
% and if there is not such...
C = trcov(gpcf,x);
% ... evaluate the covariance here.
if isnan(C)
if isfield(gpcf,'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
s = 1./(gpcf.lengthScale);
s2 = s.^2;
if size(s)==1
s2 = repmat(s2,1,m);
end
else
return
end
end
ma2 = gpcf.magnSigma2;
l = gpcf.l;
% Compute the sparse distance matrix.
ntriplets = max(1,floor(0.03*n*n));
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
ntriplets = 0;
ntripletsz = max(1,floor(0.03.^2*n*n));
Iz = zeros(ntripletsz,1);
Jz = zeros(ntripletsz,1);
ntripletsz = 0;
for ii1=1:n-1
d = zeros(n-ii1,1);
col_ind = ii1+1:n;
if isfield(gpcf, 'metric')
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii1,:));
else
for ii2=1:m
d = d+s2(ii2).*(x(col_ind,ii2)-x(ii1,ii2)).^2;
end
end
%d = sqrt(d);
% store zero distance index
[I2z,J2z] = find(d==0);
% create triplets for distances 0<d<1
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
if (ntriplets > len)
I(2*len) = 0;
J(2*len) = 0;
R(2*len) = 0;
end
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = ii1+I2;
J(ind_tr) = ii1;
R(ind_tr) = sqrt(R2);
% create triplets for distances d==0 (i~=j)
lenz = length(Iz);
ntrip_prevz = ntripletsz;
ntripletsz = ntripletsz + length(I2z);
if (ntripletsz > lenz)
Iz(2*lenz) = 0;
Jz(2*lenz) = 0;
end
ind_trz = ntrip_prevz+1:ntripletsz;
Iz(ind_trz) = ii1+I2z;
Jz(ind_trz) = ii1;
end
% create a lower triangular sparse distance matrix from the triplets for distances 0<d<1
R = sparse(I(1:ntriplets),J(1:ntriplets),R(1:ntriplets),n,n);
% create a lower triangular sparse covariance matrix from the
% triplets for distances d==0 (i~=j)
Rz = sparse(Iz(1:ntripletsz),Jz(1:ntripletsz),repmat(ma2,1,ntripletsz),n,n);
% Find the non-zero elements of R.
[I,J,rn] = find(R);
% Compute covariances for distances 0<d<1
const1 = l+1;
cs = max(0,1-rn);
C = ma2.*cs.^(l+1).*(const1.*rn + 1);
% create a lower triangular sparse covariance matrix from the triplets for distances 0<d<1
C = sparse(I,J,C,n,n);
% add the lower triangular covariance matrix for distances d==0 (i~=j)
C = C + Rz;
% form a square covariance matrix and add the covariance matrix for i==j (d==0)
C = C + C' + sparse(1:n,1:n,ma2,n,n);
end
function C = gpcf_ppcs1_trvar(gpcf, x)
%GP_PPCS1_TRVAR Evaluate training variance vector
%
% Description
% C = GP_PPCS1_TRVAR(GPCF, TX) takes in covariance function of
% a Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
% See also
% GPCF_PPCS1_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C = ones(n,1).*gpcf.magnSigma2;
C(C<eps)=0;
end
function reccf = gpcf_ppcs1_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_PPCS1_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_ppcs1';
reccf.nin = ri.nin;
reccf.l = floor(reccf.nin/2)+4;
% cf is compactly supported
reccf.cs = 1;
% Initialize parameters
reccf.lengthScale= [];
reccf.magnSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_ppcs1_pak;
reccf.fh.unpak = @gpcf_ppcs1_unpak;
reccf.fh.e = @gpcf_ppcs1_lp;
reccf.fh.lpg = @gpcf_ppcs1_lpg;
reccf.fh.cfg = @gpcf_ppcs1_cfg;
reccf.fh.cov = @gpcf_ppcs1_cov;
reccf.fh.trcov = @gpcf_ppcs1_trcov;
reccf.fh.trvar = @gpcf_ppcs1_trvar;
reccf.fh.recappend = @gpcf_ppcs1_recappend;
reccf.p=[];
reccf.p.lengthScale=[];
reccf.p.magnSigma2=[];
if isfield(ri.p,'lengthScale') && ~isempty(ri.p.lengthScale)
reccf.p.lengthScale = ri.p.lengthScale;
end
if ~isempty(ri.p.magnSigma2)
reccf.p.magnSigma2 = ri.p.magnSigma2;
end
if isfield(ri, 'selectedVariables')
reccf.selectedVariables = ri.selectedVariables;
end
else
% Append to the record
gpp = gpcf.p;
if ~isfield(gpcf,'metric')
% record lengthScale
reccf.lengthScale(ri,:)=gpcf.lengthScale;
if isfield(gpp,'lengthScale') && ~isempty(gpp.lengthScale)
reccf.p.lengthScale = gpp.lengthScale.fh.recappend(reccf.p.lengthScale, ri, gpcf.p.lengthScale);
end
end
% record magnSigma2
reccf.magnSigma2(ri,:)=gpcf.magnSigma2;
if isfield(gpp,'magnSigma2') && ~isempty(gpp.magnSigma2)
reccf.p.magnSigma2 = gpp.magnSigma2.fh.recappend(reccf.p.magnSigma2, ri, gpcf.p.magnSigma2);
end
end
end
|
github
|
lcnhappe/happe-master
|
gpcf_ppcs3.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_ppcs3.m
| 40,551 |
utf_8
|
ec0d6b23865d0e1dc82e7a575159c18d
|
function gpcf = gpcf_ppcs3(varargin)
%GPCF_PPCS3 Create a piece wise polynomial (q=3) covariance function
%
% Description
% GPCF = GPCF_PPCS3('nin',nin,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates piece wise polynomial (q=3) covariance function
% structure in which the named parameters have the specified
% values. Any unspecified parameters are set to default values.
% Obligatory parameter is 'nin', which tells the dimension
% of input space.
%
% GPCF = GPCF_PPCS3(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for piece wise polynomial (q=3) covariance function [default]
% magnSigma2 - magnitude (squared) [0.1]
% lengthScale - length scale for each input. [1]
% This can be either scalar corresponding
% to an isotropic function or vector
% defining own length-scale for each input
% direction.
% l_nin - order of the polynomial [floor(nin/2) + 4]
% Has to be greater than or equal to default.
% magnSigma2_prior - prior for magnSigma2 [prior_logunif]
% lengthScale_prior - prior for lengthScale [prior_t]
% metric - metric structure used by the covariance function []
% selectedVariables - vector defining which inputs are used [all]
% selectedVariables is shorthand for using
% metric_euclidean with corresponding components
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The piecewise polynomial function is the following:
%
% k(x_i, x_j) = ma2*cs^(l+3)*((l^3 + 9*l^2 + 23*l + 15)*r^3 +
% (6*l^2 + 36*l + 45)*r^2 + (15*l + 45)*r + 15)/15
%
% where r = sum( (x_i,d - x_j,d)^2/l^2_d )
% l = floor(l_nin/2) + 4
% cs = max(0,1-r)
% and l_nin must be greater or equal to gpcf.nin
%
% NOTE! Use of gpcf_ppcs3 requires that you have installed
% GPstuff with SuiteSparse.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, METRIC_*
% Copyright (c) 2009-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if nargin>0 && ischar(varargin{1}) && ismember(varargin{1},{'init' 'set'})
% remove init and set
varargin(1)=[];
end
ip=inputParser;
ip.FunctionName = 'GPCF_PPCS3';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('nin',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('magnSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('lengthScale',1, @(x) isvector(x) && all(x>0));
ip.addParamValue('l_nin',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('metric',[], @isstruct);
ip.addParamValue('magnSigma2_prior', prior_logunif(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('lengthScale_prior',prior_t(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isempty(x) || ...
(isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
% Check that SuiteSparse is available
if ~exist('ldlchol')
error('SuiteSparse is not installed (or it is not in the path). gpcf_ppcs3 cannot be used!')
end
init=true;
gpcf.nin=ip.Results.nin;
if isempty(gpcf.nin)
error('nin has to be given for ppcs: gpcf_ppcs3(''nin'',NIN,...)')
end
gpcf.type = 'gpcf_ppcs3';
% cf is compactly supported
gpcf.cs = 1;
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_ppcs3')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_ppcs3_pak;
gpcf.fh.unpak = @gpcf_ppcs3_unpak;
gpcf.fh.lp = @gpcf_ppcs3_lp;
gpcf.fh.lpg = @gpcf_ppcs3_lpg;
gpcf.fh.cfg = @gpcf_ppcs3_cfg;
gpcf.fh.ginput = @gpcf_ppcs3_ginput;
gpcf.fh.cov = @gpcf_ppcs3_cov;
gpcf.fh.trcov = @gpcf_ppcs3_trcov;
gpcf.fh.trvar = @gpcf_ppcs3_trvar;
gpcf.fh.recappend = @gpcf_ppcs3_recappend;
end
% Initialize parameters
if init || ~ismember('l_nin',ip.UsingDefaults)
gpcf.l=ip.Results.l_nin;
if isempty(gpcf.l)
gpcf.l = floor(gpcf.nin/2) + 4;
end
if gpcf.l < gpcf.nin
error('The l_nin has to be greater than or equal to the number of inputs!')
end
end
if init || ~ismember('lengthScale',ip.UsingDefaults)
gpcf.lengthScale = ip.Results.lengthScale;
end
if init || ~ismember('magnSigma2',ip.UsingDefaults)
gpcf.magnSigma2 = ip.Results.magnSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.p.lengthScale=ip.Results.lengthScale_prior;
end
if init || ~ismember('magnSigma2_prior',ip.UsingDefaults)
gpcf.p.magnSigma2=ip.Results.magnSigma2_prior;
end
%Initialize metric
if ~ismember('metric',ip.UsingDefaults)
if ~isempty(ip.Results.metric)
gpcf.metric = ip.Results.metric;
gpcf = rmfield(gpcf, 'lengthScale');
gpcf.p = rmfield(gpcf.p, 'lengthScale');
elseif isfield(gpcf,'metric')
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
% selectedVariables options implemented using metric_euclidean
if ~ismember('selectedVariables',ip.UsingDefaults)
if ~isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.selectedVariables = ip.Results.selectedVariables;
% gpcf.metric=metric_euclidean('components',...
% num2cell(ip.Results.selectedVariables),...
% 'lengthScale',gpcf.lengthScale,...
% 'lengthScale_prior',gpcf.p.lengthScale);
% gpcf = rmfield(gpcf, 'lengthScale');
% gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
elseif isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.metric=metric_euclidean(gpcf.metric,...
'components',...
num2cell(ip.Results.selectedVariables));
if ~ismember('lengthScale',ip.UsingDefaults)
gpcf.metric.lengthScale=ip.Results.lengthScale;
gpcf = rmfield(gpcf, 'lengthScale');
end
if ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.metric.p.lengthScale=ip.Results.lengthScale_prior;
gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
else
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
end
end
function [w,s] = gpcf_ppcs3_pak(gpcf)
%GPCF_PPCS3_PAK Combine GP covariance function parameters into
% one vector
%
% Description
% W = GPCF_PPCS3_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for
% example in energy and gradient computations.
%
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_PPCS3_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.magnSigma2)
w = [w log(gpcf.magnSigma2)];
s = [s; 'log(ppcs3.magnSigma2)'];
% Hyperparameters of magnSigma2
[wh sh] = gpcf.p.magnSigma2.fh.pak(gpcf.p.magnSigma2);
w = [w wh];
s = [s; sh];
end
if isfield(gpcf,'metric')
[wh sh]=gpcf.metric.fh.pak(gpcf.metric);
w = [w wh];
s = [s; sh];
else
if ~isempty(gpcf.p.lengthScale)
w = [w log(gpcf.lengthScale)];
if numel(gpcf.lengthScale)>1
s = [s; sprintf('log(ppcs3.lengthScale x %d)',numel(gpcf.lengthScale))];
else
s = [s; 'log(ppcs3.lengthScale)'];
end
% Hyperparameters of lengthScale
[wh sh] = gpcf.p.lengthScale.fh.pak(gpcf.p.lengthScale);
w = [w wh];
s = [s; sh];
end
end
end
function [gpcf, w] = gpcf_ppcs3_unpak(gpcf, w)
%GPCF_PPCS3_UNPAK Sets the covariance function parameters into
% the structure
%
% Description
% [GPCF, W] = GPCF_PPCS3_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W,
% and returns a covariance function structure identical
% to the input, except that the covariance hyper-parameters
% have been set to the values in W. Deletes the values set to
% GPCF from W and returns the modified W. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_PPCS3_PAK
gpp=gpcf.p;
if ~isempty(gpp.magnSigma2)
gpcf.magnSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.magnSigma2.fh.unpak(gpcf.p.magnSigma2, w);
gpcf.p.magnSigma2 = p;
end
if isfield(gpcf,'metric')
[metric, w] = gpcf.metric.fh.unpak(gpcf.metric, w);
gpcf.metric = metric;
else
if ~isempty(gpp.lengthScale)
i1=1;
i2=length(gpcf.lengthScale);
gpcf.lengthScale = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.lengthScale.fh.unpak(gpcf.p.lengthScale, w);
gpcf.p.lengthScale = p;
end
end
end
function lp = gpcf_ppcs3_lp(gpcf)
%GPCF_PPCS3_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_PPCS3_LP(GPCF, X, T) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_PPCS3_PAK, GPCF_PPCS3_UNPAK, GPCF_PPCS3_LPG, GP_E
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lp = lp +gpp.magnSigma2.fh.lp(gpcf.magnSigma2, ...
gpp.magnSigma2) +log(gpcf.magnSigma2);
end
if isfield(gpcf,'metric')
lp = lp +gpcf.metric.fh.lp(gpcf.metric);
elseif ~isempty(gpp.lengthScale)
lp = lp +gpp.lengthScale.fh.lp(gpcf.lengthScale, ...
gpp.lengthScale) +sum(log(gpcf.lengthScale));
end
end
function lpg = gpcf_ppcs3_lpg(gpcf)
%GPCF_PPCS3_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_PPCS3_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_PPCS3_PAK, GPCF_PPCS3_UNPAK, GPCF_PPCS3_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lpgs = gpp.magnSigma2.fh.lpg(gpcf.magnSigma2, gpp.magnSigma2);
lpg = [lpg lpgs(1).*gpcf.magnSigma2+1 lpgs(2:end)];
end
if isfield(gpcf,'metric')
lpg_dist = gpcf.metric.fh.lpg(gpcf.metric);
lpg=[lpg lpg_dist];
else
if ~isempty(gpcf.p.lengthScale)
lll = length(gpcf.lengthScale);
lpgs = gpp.lengthScale.fh.lpg(gpcf.lengthScale, gpp.lengthScale);
lpg = [lpg lpgs(1:lll).*gpcf.lengthScale+1 lpgs(lll+1:end)];
end
end
end
function DKff = gpcf_ppcs3_cfg(gpcf, x, x2, mask, i1)
%GPCF_PPCS3_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_PPCS3_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_PPCS3_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_PPCS3_CFG(GPCF, X, X2, [], i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_PPCS3_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using memory
% save option in gp_set.
%
% See also
% GPCF_PPCS3_PAK, GPCF_PPCS3_UNPAK, GPCF_PPCS3_LP, GP_G
gpp=gpcf.p;
i2=1;
DKff = {};
gprior = [];
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
i=0;
if ~isempty(gpcf.p.magnSigma2)
i=i+1;
end
if ~isempty(gpcf.p.lengthScale)
i=i+length(gpcf.lengthScale);
end
DKff=i;
return
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters
% are transformed through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
Cdm = gpcf_ppcs3_trcov(gpcf, x);
ii1=0;
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = Cdm;
end
l = gpcf.l;
[I,J] = find(Cdm);
if isfield(gpcf,'metric')
% Compute the sparse distance matrix and its gradient.
[n, m] =size(x);
ntriplets = (nnz(Cdm)-n)./2;
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n-1
col_ind = ii + find(Cdm(ii+1:n,ii));
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii,:));
gd = gpcf.metric.fh.distg(gpcf.metric, x(col_ind,:), x(ii,:));
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(d);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = col_ind;
J(ind_tr) = ii;
dist(ind_tr) = d;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj};
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = l^3 + 9*l^2 + 23*l + 15;
const2 = 6*l^2 + 36*l + 45;
const3 = 15*l + 45;
Dd = -(l+3).*cs.^(l+2).*(const1.*dist.^3 + const2.*dist.^2 + const3.*dist + 15)/15;
Dd = Dd + cs.^(l+3).*(3.*const1.*dist.^2 + 2.*const2.*dist + const3)./15;
Dd = ma2.*Dd;
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n,n);
DKff{ii1} = D + D';
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
ii1=ii1-1;
end
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic PPCS3
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
d2 = 0;
for i = 1:m
d2 = d2 + s2.*(x(I,i) - x(J,i)).^2;
end
d = sqrt(d2);
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
% Calculate the gradient matrix
const1 = l^3 + 9*l^2 + 23*l + 15;
const2 = 6*l^2 + 36*l + 45;
const3 = 15*l + 45;
D = -(l+3).*cs.^(l+2).*(const1.*d.^3 + const2.*d.^2 + const3.*d + 15)/15;
D = D + cs.^(l+3).*(3.*const1.*d.^2 + 2.*const2.*d + const3)./15;
D = -d.*ma2.*D;
D = sparse(I,J,D,n,n);
ii1 = ii1+1;
DKff{ii1} = D;
else
% In the case ARD is used
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
d2 = 0;
d_l2 = [];
for i = 1:m
d_l2(:,i) = s2(i).*(x(I,i) - x(J,i)).^2;
d2 = d2 + d_l2(:,i);
end
d = sqrt(d2);
d_l = d_l2;
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
const1 = l^3 + 9*l^2 + 23*l + 15;
const2 = 6*l^2 + 36*l + 45;
const3 = 15*l + 45;
Dd = -(l+3).*cs.^(l+2).*(const1.*d.^3 + const2.*d.^2 + const3.*d + 15)/15;
Dd = Dd + cs.^(l+3).*(3.*const1.*d.^2 + 2.*const2.*d + const3)./15;
Dd = -ma2.*Dd;
int = d ~= 0;
for i = i1
% Calculate the gradient matrix
D = d_l(:,i).*Dd;
% Divide by r in cases where r is non-zero
D(int) = D(int)./d(int);
D = sparse(I,J,D,n,n);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_ppcs -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = K;
end
l = gpcf.l;
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x);
[n2,m2]=size(x2);
ma = gpcf.magnSigma2;
% Compute the sparse distance matrix.
ntriplets = nnz(K);
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n2
d = zeros(n1,1);
d = gpcf.metric.fh.dist(gpcf.metric, x, x2(ii,:));
gd = gpcf.metric.fh.distg(gpcf.metric, x, x2(ii,:));
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric, x, x2(ii,:));
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = I2;
J(ind_tr) = ii;
dist(ind_tr) = R2;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj}(I2);
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = l^3 + 9*l^2 + 23*l + 15;
const2 = 6*l^2 + 36*l + 45;
const3 = 15*l + 45;
Dd = -(l+3).*cs.^(l+2).*(const1.*d.^3 + const2.*d.^2 + const3.*d + 15)/15;
Dd = Dd + cs.^(l+3).*(3.*const1.*d.^2 + 2.*const2.*d + const3)./15;
Dd = ma2.*Dd;
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n1,n2);
DKff{ii1} = D;
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
ii1=ii1-1;
end
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic PPCS3
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
dist1 = 0;
for i=1:m
dist1 = dist1 + s2.*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
d1 = sqrt(dist1);
cs1 = max(1-d1,0);
const1 = l^3 + 9*l^2 + 23*l + 15;
const2 = 6*l^2 + 36*l + 45;
const3 = 15*l + 45;
D = -(l+3).*cs1.^(l+2).*(const1.*d1.^3 + const2.*d1.^2 + const3.*d1 + 15)/15;
D = D + cs1.^(l+3).*(3.*const1.*d1.^2 + 2.*const2.*d1 + const3)./15;
DK_l = -d1.*ma2.*D;
ii1=ii1+1;
DKff{ii1} = DK_l;
else
% In the case ARD is used
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
dist1 = 0;
d_l1 = [];
for i = 1:m
dist1 = dist1 + s2(i).*bsxfun(@minus,x(:,i),x2(:,i)').^2;
d_l1{i} = s2(i).*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
d1 = sqrt(dist1);
cs1 = max(1-d1,0);
const1 = l^3 + 9*l^2 + 23*l + 15;
const2 = 6*l^2 + 36*l + 45;
const3 = 15*l + 45;
D = -(l+3).*cs1.^(l+2).*(const1.*d1.^3 + const2.*d1.^2 + const3.*d1 + 15)/15;
D = D + cs1.^(l+3).*(3.*const1.*d1.^2 + 2.*const2.*d1 + const3)./15;
for i = i1
% Calculate the gradient matrix
DK_l = -D.*ma2.*d_l1{i};
% Divide by r in cases where r is non-zero
DK_l(d1 ~= 0) = DK_l(d1 ~= 0)./d1(d1 ~= 0);
ii1=ii1+1;
DKff{ii1} = DK_l;
end
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
ii1=0;
[n, m] =size(x);
if ~isempty(gpcf.p.magnSigma2) && (~savememory || all(i1==1))
ii1 = ii1+1;
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
end
if isfield(gpcf,'metric')
dist = 0;
distg = gpcf.metric.fh.distg(gpcf.metric, x, [], 1);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = 0;
end
else
if ~isempty(gpcf.p.lengthScale)
for i2=1:length(gpcf.lengthScale)
ii1 = ii1+1;
DKff{ii1} = 0; % d mask(Kff,I) / d lengthScale
end
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_ppcs3_ginput(gpcf, x, x2, i1)
%GPCF_PPCS3_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_PPCS3_GINPUT(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_PPCS3_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_PPCS3_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X (cell array with matrix elements). This
% subfunction is needed when using memory save option in
% gp_set.
%
% See also
% GPCF_PPCS3_PAK, GPCF_PPCS3_UNPAK, GPCF_PPCS3_LP, GP_G
[n, m] =size(x);
ii1=0;
DKff = {};
if nargin<4
i1=1:m;
else
% Use memory save option
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
end
% evaluate the gradient for training covariance
if nargin == 2 || isempty(x2)
K = gpcf_ppcs3_trcov(gpcf, x);
l = gpcf.l;
[I,J] = find(K);
if isfield(gpcf,'metric')
% Compute the sparse distance matrix and its gradient.
ntriplets = (nnz(Cdm)-n)./2;
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n-1
col_ind = ii + find(Cdm(ii+1:n,ii));
d = zeros(length(col_ind),1);
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii,:));
[gd, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x(col_ind,:), x(ii,:));
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(d);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = col_ind;
J(ind_tr) = ii;
dist(ind_tr) = d;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj};
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = l^3 + 9*l^2 + 23*l + 15;
const2 = 6*l^2 + 36*l + 45;
const3 = 15*l + 45;
Dd = -(l+3).*cs.^(l+2).*(const1.*d.^3 + const2.*d.^2 + const3.*d + 15)/15;
Dd = Dd + cs.^(l+3).*(3.*const1.*d.^2 + 2.*const2.*d + const3)./15;
Dd = ma2.*Dd;
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n,n);
DKff{ii1} = D + D';
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic
s2 = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s2 = 1./gpcf.lengthScale.^2;
end
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
d2 = 0;
for i = 1:m
d2 = d2 + s2(i).*(x(I,i) - x(J,i)).^2;
end
d = sqrt(d2);
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
const1 = l^3 + 9*l^2 + 23*l + 15;
const2 = 6*l^2 + 36*l + 45;
const3 = 15*l + 45;
Dd = -(l+3).*cs.^(l+2).*(const1.*d.^3 + const2.*d.^2 + const3.*d + 15)/15;
Dd = Dd + cs.^(l+3).*(3.*const1.*d.^2 + 2.*const2.*d + const3)./15;
Dd = ma2.*Dd;
Dd = sparse(I,J,Dd,n,n);
d = sparse(I,J,d,n,n);
row = ones(n,1);
cols = 1:n;
for i = i1
for j = 1:n
% Calculate the gradient matrix
ind = find(d(:,j));
apu = full(Dd(:,j)).*s2(i).*(x(j,i)-x(:,i));
apu(ind) = apu(ind)./d(ind,j);
D = sparse(row*j, cols, apu, n, n);
D = D+D';
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || nargin == 4
if size(x,2) ~= size(x2,2)
error('gpcf_ppcs -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
n2 = size(x2,1);
l = gpcf.l;
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x);
[n2,m2]=size(x2);
ma = gpcf.magnSigma2;
% Compute the sparse distance matrix.
ntriplets = nnz(K);
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n2
d = zeros(n1,1);
d = gpcf.metric.fh.dist(gpcf.metric, x, x2(ii,:));
[gd, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x, x2(ii,:));
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = I2;
J(ind_tr) = ii;
dist(ind_tr) = R2;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj}(I2);
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = l^3 + 9*l^2 + 23*l + 15;
const2 = 6*l^2 + 36*l + 45;
const3 = 15*l + 45;
Dd = -(l+3).*cs.^(l+2).*(const1.*d.^3 + const2.*d.^2 + const3.*d + 15)/15;
Dd = Dd + cs.^(l+3).*(3.*const1.*d.^2 + 2.*const2.*d + const3)./15;
Dd = ma2.*Dd;
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n1,n2);
DKff{ii1} = D;
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic PPCS3
s2 = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s2 = 1./gpcf.lengthScale.^2;
end
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
d2 = 0;
for i = 1:m
d2 = d2 + s2(i).*bsxfun(@minus,x(:,i),x2(:,i)').^2;
end
d = sqrt(d2);
cs = max(1-d,0);
const1 = l^3 + 9*l^2 + 23*l + 15;
const2 = 6*l^2 + 36*l + 45;
const3 = 15*l + 45;
Dd = -(l+3).*cs.^(l+2).*(const1.*d.^3 + const2.*d.^2 + const3.*d + 15)/15;
Dd = Dd + cs.^(l+3).*(3.*const1.*d.^2 + 2.*const2.*d + const3)./15;
Dd = ma2.*Dd;
row = ones(n2,1);
cols = 1:n2;
for i = i1
for j = 1:n
% Calculate the gradient matrix
ind = find(d(j,:));
apu = Dd(j,:).*s2(i).*(x(j,i)-x2(:,i))';
apu(ind) = apu(ind)./d(j,ind);
D = sparse(row*j, cols, apu, n, n2);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
end
function C = gpcf_ppcs3_cov(gpcf, x1, x2, varargin)
%GP_PPCS3_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_PPCS3_COV(GP, TX, X) takes in covariance function of
% a Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_PPCS3_TRCOV, GPCF_PPCS3_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x1);
[n2,m2]=size(x2);
else
% If scaled euclidean metric
if isfield(gpcf, 'selectedVariables')
x1 = x1(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
s = 1./(gpcf.lengthScale);
s2 = s.^2;
if size(s)==1
s2 = repmat(s2,1,m1);
end
end
ma2 = gpcf.magnSigma2;
l = gpcf.l;
% Compute the sparse distance matrix.
ntriplets = max(1,floor(0.03*n1*n2));
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
ntriplets = 0;
I0=zeros(ntriplets,1);
J0=zeros(ntriplets,1);
nn0=0;
for ii1=1:n2
d = zeros(n1,1);
if isfield(gpcf, 'metric')
d = gpcf.metric.fh.dist(gpcf.metric, x1, x2(ii1,:));
else
for j=1:m1
d = d + s2(j).*(x1(:,j)-x2(ii1,j)).^2;
end
end
%d = sqrt(d);
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
R2=sqrt(R2);
%len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
I(ntrip_prev+1:ntriplets) = I2;
J(ntrip_prev+1:ntriplets) = ii1;
R(ntrip_prev+1:ntriplets) = R2;
I0(nn0+1:nn0+length(I0t)) = I0t;
J0(nn0+1:nn0+length(I0t)) = ii1;
nn0 = nn0+length(I0t);
end
r = sparse(I(1:ntriplets),J(1:ntriplets),R(1:ntriplets));
[I,J,r] = find(r);
cs = full(sparse(max(0, 1-r)));
const1 = l^3 + 9*l^2 + 23*l + 15;
const2 = 6*l^2 + 36*l + 45;
const3 = 15*l + 45;
C = ma2.*cs.^(l+3).*(const1.*r.^3 + const2.*r.^2 + const3.*r + 15)/15;
C = sparse(I,J,C,n1,n2) + sparse(I0,J0,ma2,n1,n2);
end
function C = gpcf_ppcs3_trcov(gpcf, x)
%GP_PPCS3_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_PPCS3_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This is
% a mandatory subfunction used for example in prediction and
% energy computations.
%
% See also
% GPCF_PPCS3_COV, GPCF_PPCS3_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n, m] =size(x);
else
% If a scaled euclidean metric try first mex-implementation
% and if there is not such...
C = trcov(gpcf,x);
% ... evaluate the covariance here.
if isnan(C)
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
s = 1./(gpcf.lengthScale);
s2 = s.^2;
if size(s)==1
s2 = repmat(s2,1,m);
end
else
return
end
end
ma2 = gpcf.magnSigma2;
l = gpcf.l;
% Compute the sparse distance matrix.
ntriplets = max(1,floor(0.03*n*n));
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
ntriplets = 0;
ntripletsz = max(1,floor(0.03.^2*n*n));
Iz = zeros(ntripletsz,1);
Jz = zeros(ntripletsz,1);
ntripletsz = 0;
for ii1=1:n-1
d = zeros(n-ii1,1);
col_ind = ii1+1:n;
if isfield(gpcf,'metric')
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii1,:));
else
for ii2=1:m
d = d+s2(ii2).*(x(col_ind,ii2)-x(ii1,ii2)).^2;
end
end
%d = sqrt(d);
% store zero distance index
[I2z,J2z] = find(d==0);
% create triplets for distances 0<d<1
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
if (ntriplets > len)
I(2*len) = 0;
J(2*len) = 0;
R(2*len) = 0;
end
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = ii1+I2;
J(ind_tr) = ii1;
R(ind_tr) = sqrt(R2);
% create triplets for distances d==0 (i~=j)
lenz = length(Iz);
ntrip_prevz = ntripletsz;
ntripletsz = ntripletsz + length(I2z);
if (ntripletsz > lenz)
Iz(2*lenz) = 0;
Jz(2*lenz) = 0;
end
ind_trz = ntrip_prevz+1:ntripletsz;
Iz(ind_trz) = ii1+I2z;
Jz(ind_trz) = ii1;
end
% create a lower triangular sparse distance matrix from the triplets for distances 0<d<1
R = sparse(I(1:ntriplets),J(1:ntriplets),R(1:ntriplets),n,n);
% create a lower triangular sparse covariance matrix from the
% triplets for distances d==0 (i~=j)
Rz = sparse(Iz(1:ntripletsz),Jz(1:ntripletsz),repmat(ma2,1,ntripletsz),n,n);
% Find the non-zero elements of R.
[I,J,rn] = find(R);
% Compute covariances for distances 0<d<1
const1 = l^3 + 9*l^2 + 23*l + 15;
const2 = 6*l^2 + 36*l + 45;
const3 = 15*l + 45;
cs = max(0,1-rn);
C = ma2.*cs.^(l+3).*(const1.*rn.^3 + const2.*rn.^2 + const3.*rn + 15)/15;
% create a lower triangular sparse covariance matrix from the triplets for distances 0<d<1
C = sparse(I,J,C,n,n);
% add the lower triangular covariance matrix for distances d==0 (i~=j)
C = C + Rz;
% form a square covariance matrix and add the covariance matrix for i==j (d==0)
C = C + C' + sparse(1:n,1:n,ma2,n,n);
end
function C = gpcf_ppcs3_trvar(gpcf, x)
%GP_PPCS3_TRVAR Evaluate training variance vector
%
% Description
% C = GP_PPCS3_TRVAR(GPCF, TX) takes in covariance function of
% a Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
% See also
% GPCF_PPCS3_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C = ones(n,1).*gpcf.magnSigma2;
C(C<eps)=0;
end
function reccf = gpcf_ppcs3_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_PPCS3_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_ppcs3';
reccf.nin = ri.nin;
reccf.l = floor(reccf.nin/2)+4;
% cf is compactly supported
reccf.cs = 1;
% Initialize parameters
reccf.lengthScale= [];
reccf.magnSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_ppcs3_pak;
reccf.fh.unpak = @gpcf_ppcs3_unpak;
reccf.fh.e = @gpcf_ppcs3_lp;
reccf.fh.lpg = @gpcf_ppcs3_lpg;
reccf.fh.cfg = @gpcf_ppcs3_cfg;
reccf.fh.cov = @gpcf_ppcs3_cov;
reccf.fh.trcov = @gpcf_ppcs3_trcov;
reccf.fh.trvar = @gpcf_ppcs3_trvar;
reccf.fh.recappend = @gpcf_ppcs3_recappend;
reccf.p=[];
reccf.p.lengthScale=[];
reccf.p.magnSigma2=[];
if isfield(ri.p,'lengthScale') && ~isempty(ri.p.lengthScale)
reccf.p.lengthScale = ri.p.lengthScale;
end
if ~isempty(ri.p.magnSigma2)
reccf.p.magnSigma2 = ri.p.magnSigma2;
end
if isfield(ri, 'selectedVariables')
reccf.selectedVariables = ri.selectedVariables;
end
else
% Append to the record
gpp = gpcf.p;
if ~isfield(gpcf,'metric')
% record lengthScale
reccf.lengthScale(ri,:)=gpcf.lengthScale;
if isfield(gpp,'lengthScale') && ~isempty(gpp.lengthScale)
reccf.p.lengthScale = gpp.lengthScale.fh.recappend(reccf.p.lengthScale, ri, gpcf.p.lengthScale);
end
end
% record magnSigma2
reccf.magnSigma2(ri,:)=gpcf.magnSigma2;
if isfield(gpp,'magnSigma2') && ~isempty(gpp.magnSigma2)
reccf.p.magnSigma2 = gpp.magnSigma2.fh.recappend(reccf.p.magnSigma2, ri, gpcf.p.magnSigma2);
end
end
end
|
github
|
lcnhappe/happe-master
|
esls.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/esls.m
| 4,193 |
utf_8
|
ae75a9e42b7e6284ac4e8dd42766f9cf
|
function [f, energ, diagn] = esls(f, opt, gp, x, y, z, angle_range)
%ESLS Markov chain update for a distribution with a Gaussian "prior"
% factored out
%
% Description
% [F, ENERG, DIAG] = ESLS(F, OPT, GP, X, Y) takes the current
% latent values F, options structure OPT, Gaussian process
% structure GP, inputs X and outputs Y. Samples new latent
% values and returns also energies ENERG and diagnostics DIAG.
%
% A Markov chain update is applied to the D-element array f leaving a
% "posterior" distribution
% P(f) \propto N(f;0,Sigma) L(f)
% invariant. Where N(0,Sigma) is a zero-mean Gaussian
% distribution with covariance Sigma. Often L is a likelihood
% function in an inference problem.
%
% Reference:
% Elliptical slice sampling
% Iain Murray, Ryan Prescott Adams and David J.C. MacKay.
% The Proceedings of the 13th International Conference on Artificial
% Intelligence and Statistics (AISTATS), JMLR W&CP 9:541-548, 2010.
%
% See also
% GP_MC
% Copyright (c) Iain Murray, September 2009
% Tweak to interface and documentation, September 2010
% Ville Tolvanen, October 2011 - Changed inputs and outputs for the function to
% fit in with other GPstuf samplers. Documentation standardized with other
% GPstuff documentation and modified according to input/output changes.
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if nargin<=1
% Return only default options
if nargin==0
f=elliptical_sls_opt();
else
f=elliptical_sls_opt(f);
end
return
end
D = numel(f);
if (nargin < 7) || isempty(angle_range)
angle_range = 0;
end
if ~isfield(gp.lik, 'nondiagW') || ismember(gp.lik.type, {'LGP' 'LGPC'});
[K, C] = gp_trcov(gp,x);
else
if ~isfield(gp.lik,'xtime')
nl=[0 repmat(size(y,1), 1, length(gp.comp_cf))];
else
xtime=gp.lik.xtime;
nl=[0 size(gp.lik.xtime,1) size(y,1)];
end
nl=cumsum(nl);
nlp=length(nl)-1;
C = zeros(nl(end));
for i1=1:nlp
if i1==1 && isfield(gp.lik, 'xtime')
C((1+nl(i1)):nl(i1+1),(1+nl(i1)):nl(i1+1)) = gp_trcov(gp, xtime, gp.comp_cf{i1});
else
C((1+nl(i1)):nl(i1+1),(1+nl(i1)):nl(i1+1)) = gp_trcov(gp, x, gp.comp_cf{i1});
end
end
end
if isfield(gp,'meanf')
[H_m,b_m,B_m]=mean_prep(gp,x,[]);
C = C + H_m'*B_m*H_m;
end
L=chol(C, 'lower');
cur_log_like = gp.lik.fh.ll(gp.lik, y, f, z);
for i1=1:opt.repeat
% Set up the ellipse and the slice threshold
nu = reshape(L*randn(D, 1), size(f));
hh = log(rand) + cur_log_like;
% Set up a bracket of angles and pick a first proposal.
% "phi = (theta'-theta)" is a change in angle.
if angle_range <= 0
% Bracket whole ellipse with both edges at first proposed point
phi = rand*2*pi;
phi_min = phi - 2*pi;
phi_max = phi;
else
% Randomly center bracket on current point
phi_min = -angle_range*rand;
phi_max = phi_min + angle_range;
phi = rand*(phi_max - phi_min) + phi_min;
end
% Slice sampling loop
slrej = 0;
while true
% Compute f for proposed angle difference and check if it's on the slice
f_prop = f*cos(phi) + nu*sin(phi);
cur_log_like = gp.lik.fh.ll(gp.lik, y, f_prop, z);
if (cur_log_like > hh)
% New point is on slice, ** EXIT LOOP **
break;
end
% Shrink slice to rejected point
if phi > 0
phi_max = phi;
elseif phi < 0
phi_min = phi;
else
error('BUG DETECTED: Shrunk to current position and still not acceptable.');
end
% Propose new angle difference
phi = rand*(phi_max - phi_min) + phi_min;
slrej = slrej + 1;
end
f = f_prop;
end
energ = [];
diagn.rej = slrej;
diagn.opt = opt;
end
function opt = elliptical_sls_opt(opt)
%ELLIPTICAL_SLS_OPT Default options for elliptical slice sampling
%
% Description
% OPT = ELLIPTICAL_SLS_OPT
% return default options
% OPT = ELLIPTICAL_SLS_OPT(OPT)
% fill empty options with default values
%
% The options and defaults are
% repeat - nth accepted value
if nargin < 1
opt=[];
end
if ~isfield(opt,'repeat')
opt.repeat=40;
end
end
|
github
|
lcnhappe/happe-master
|
gpcf_linear.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_linear.m
| 20,157 |
UNKNOWN
|
88ffa237f9edab312bb8598ea4db4415
|
function gpcf = gpcf_linear(varargin)
%GPCF_LINEAR Create a linear (dot product) covariance function
%
% Description
% GPCF = GPCF_LINEAR('PARAM1',VALUE1,'PARAM2,VALUE2,...) creates
% a linear (dot product) covariance function structure in which
% the named parameters have the specified values. Any
% unspecified parameters are set to default values.
%
% GPCF = GPCF_LINEAR(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for linear (dot product) covariance function
% coeffSigma2 - prior variance for regressor coefficients [10]
% This can be either scalar corresponding
% to a common prior variance or vector
% defining own prior variance for each
% coefficient.
% coeffSigma2_prior - prior structure for coeffSigma2 [prior_logunif]
% selectedVariables - vector defining which inputs are used [all]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, MEAN_*
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2008-2010 Jaakko Riihim�ki
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPCF_LINEAR';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('coeffSigma2',10, @(x) isvector(x) && all(x>0));
ip.addParamValue('coeffSigma2_prior',prior_logunif, @(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isvector(x) && all(x>0));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_linear';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_linear')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
% Initialize parameter
if init || ~ismember('coeffSigma2',ip.UsingDefaults)
gpcf.coeffSigma2=ip.Results.coeffSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('coeffSigma2_prior',ip.UsingDefaults)
gpcf.p.coeffSigma2=ip.Results.coeffSigma2_prior;
end
if ~ismember('selectedVariables',ip.UsingDefaults)
selectedVariables=ip.Results.selectedVariables;
if ~isempty(selectedVariables)
gpcf.selectedVariables = selectedVariables;
end
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_linear_pak;
gpcf.fh.unpak = @gpcf_linear_unpak;
gpcf.fh.lp = @gpcf_linear_lp;
gpcf.fh.lpg = @gpcf_linear_lpg;
gpcf.fh.cfg = @gpcf_linear_cfg;
gpcf.fh.ginput = @gpcf_linear_ginput;
gpcf.fh.cov = @gpcf_linear_cov;
gpcf.fh.trcov = @gpcf_linear_trcov;
gpcf.fh.trvar = @gpcf_linear_trvar;
gpcf.fh.recappend = @gpcf_linear_recappend;
end
end
function [w, s] = gpcf_linear_pak(gpcf, w)
%GPCF_LINEAR_PAK Combine GP covariance function parameters into one vector
%
% Description
% W = GPCF_LINEAR_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for
% example in energy and gradient computations.
%
% w = [ log(gpcf.coeffSigma2)
% (hyperparameters of gpcf.coeffSigma2)]'
%
% See also
% GPCF_LINEAR_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.coeffSigma2)
w = log(gpcf.coeffSigma2);
if numel(gpcf.coeffSigma2)>1
s = [s; sprintf('log(linear.coeffSigma2 x %d)',numel(gpcf.coeffSigma2))];
else
s = [s; 'log(linear.coeffSigma2)'];
end
% Hyperparameters of coeffSigma2
[wh sh] = gpcf.p.coeffSigma2.fh.pak(gpcf.p.coeffSigma2);
w = [w wh];
s = [s; sh];
end
end
function [gpcf, w] = gpcf_linear_unpak(gpcf, w)
%GPCF_LINEAR_UNPAK Sets the covariance function parameters
% into the structure
%
% Description
% [GPCF, W] = GPCF_LINEAR_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W, and
% returns a covariance function structure identical to the
% input, except that the covariance hyper-parameters have been
% set to the values in W. Deletes the values set to GPCF from
% W and returns the modified W. This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.coeffSigma2)
% (hyperparameters of gpcf.coeffSigma2)]'
%
% See also
% GPCF_LINEAR_PAK
gpp=gpcf.p;
if ~isempty(gpp.coeffSigma2)
i2=length(gpcf.coeffSigma2);
i1=1;
gpcf.coeffSigma2 = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of coeffSigma2
[p, w] = gpcf.p.coeffSigma2.fh.unpak(gpcf.p.coeffSigma2, w);
gpcf.p.coeffSigma2 = p;
end
end
function lp = gpcf_linear_lp(gpcf)
%GPCF_LINEAR_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_LINEAR_LP(GPCF) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_LINEAR_PAK, GPCF_LINEAR_UNPAK, GPCF_LINEAR_LPG, GP_E
% Evaluate the prior contribution to the error. The parameters that
% are sampled are from space W = log(w) where w is all the "real" samples.
% On the other hand errors are evaluated in the W-space so we need take
% into account also the Jacobian of transformation W -> w = exp(W).
% See Gelman et.al., 2004, Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpp.coeffSigma2)
lp = gpp.coeffSigma2.fh.lp(gpcf.coeffSigma2, gpp.coeffSigma2) + sum(log(gpcf.coeffSigma2));
end
end
function lpg = gpcf_linear_lpg(gpcf)
%GPCF_LINEAR_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_LINEAR_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_LINEAR_PAK, GPCF_LINEAR_UNPAK, GPCF_LINEAR_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.coeffSigma2)
lll=length(gpcf.coeffSigma2);
lpgs = gpp.coeffSigma2.fh.lpg(gpcf.coeffSigma2, gpp.coeffSigma2);
lpg = [lpg lpgs(1:lll).*gpcf.coeffSigma2+1 lpgs(lll+1:end)];
end
end
function DKff = gpcf_linear_cfg(gpcf, x, x2, mask, i1)
%GPCF_LINEAR_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_LINEAR_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_LINEAR_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_LINEAR_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_LINEAR_CFG(GPCF,X,X2,MASK,i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradient of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using
% memory save option in gp_set.
%
% See also
% GPCF_LINEAR_PAK, GPCF_LINEAR_UNPAK, GPCF_LINEAR_LP, GP_G
[n, m] =size(x);
DKff = {};
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
DKff=0;
if ~isempty(gpcf.p.coeffSigma2)
DKff=length(gpcf.coeffSigma2);
end
return
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d coeffSigma2
% NOTE! Here we have already taken into account that the parameters are transformed
% through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
if isfield(gpcf, 'selectedVariables')
if ~isempty(gpcf.p.coeffSigma2)
if length(gpcf.coeffSigma2) == 1
DKff{1}=gpcf.coeffSigma2*x(:,gpcf.selectedVariables)*(x(:,gpcf.selectedVariables)');
else
if ~savememory
i1=1:length(gpcf.coeffSigma2);
end
for ii1=i1
DD = gpcf.coeffSigma2(ii1)*x(:,gpcf.selectedVariables(ii1))*(x(:,gpcf.selectedVariables(ii1))');
DD(abs(DD)<=eps) = 0;
DKff{ii1}= (DD+DD')./2;
end
end
end
else
if ~isempty(gpcf.p.coeffSigma2)
if length(gpcf.coeffSigma2) == 1
DKff{1}=gpcf.coeffSigma2*x*(x');
else
if isa(gpcf.coeffSigma2,'single')
epsi=eps('single');
else
epsi=eps;
end
if ~savememory
i1=1:length(gpcf.coeffSigma2);
end
DKff=cell(1,length(i1));
for ii1=i1
DD = gpcf.coeffSigma2(ii1)*x(:,ii1)*(x(:,ii1)');
DD(abs(DD)<=epsi) = 0;
DKff{ii1}= (DD+DD')./2;
end
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_linear -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
if isfield(gpcf, 'selectedVariables')
if ~isempty(gpcf.p.coeffSigma2)
if length(gpcf.coeffSigma2) == 1
DKff{1}=gpcf.coeffSigma2*x(:,gpcf.selectedVariables)*(x2(:,gpcf.selectedVariables)');
else
if ~savememory
i1=1:length(gpcf.coeffSigma2);
end
for ii1=i1
DKff{ii1}=gpcf.coeffSigma2(ii1)*x(:,gpcf.selectedVariables(ii1))*(x2(:,gpcf.selectedVariables(ii1))');
end
end
end
else
if ~isempty(gpcf.p.coeffSigma2)
if length(gpcf.coeffSigma2) == 1
DKff{1}=gpcf.coeffSigma2*x*(x2');
else
if ~savememory
i1=1:m;
end
for ii1=i1
DKff{ii1}=gpcf.coeffSigma2(ii1)*x(:,ii1)*(x2(:,ii1)');
end
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d coeffSigma2
% DKff{2...} = d mask(Kff,I) / d coeffSigma2
elseif nargin == 4 || nargin == 5
if isfield(gpcf, 'selectedVariables')
if ~isempty(gpcf.p.coeffSigma2)
if length(gpcf.coeffSigma2) == 1
DKff{1}=gpcf.coeffSigma2*sum(x(:,gpcf.selectedVariables).^2,2); % d mask(Kff,I) / d coeffSigma2
else
if ~savememory
i1=1:length(gpcf.coeffSigma2);
end
for ii1=i1
DKff{ii1}=gpcf.coeffSigma2(ii1)*(x(:,gpcf.selectedVariables(ii1)).^2); % d mask(Kff,I) / d coeffSigma2
end
end
end
else
if ~isempty(gpcf.p.coeffSigma2)
if length(gpcf.coeffSigma2) == 1
DKff{1}=gpcf.coeffSigma2*sum(x.^2,2); % d mask(Kff,I) / d coeffSigma2
else
if ~savememory
i1=1:m;
end
for ii1=i1
DKff{ii1}=gpcf.coeffSigma2(ii1)*(x(:,ii1).^2); % d mask(Kff,I) / d coeffSigma2
end
end
end
end
end
if savememory
DKff=DKff{i1};
end
end
function DKff = gpcf_linear_ginput(gpcf, x, x2, i1)
%GPCF_LINEAR_GINPUT Evaluate gradient of covariance function with
% respect to x.
%
% Description
% DKff = GPCF_LINEAR_GINPUT(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_LINEAR_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_LINEAR_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to ith covariate in X (matrix).
% This subfunction is needed when using memory save option
% in gp_set.
%
% See also
% GPCF_LINEAR_PAK, GPCF_LINEAR_UNPAK, GPCF_LINEAR_LP, GP_G
[n, m] =size(x);
if nargin==4
% Use memory save option
savememory=1;
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
else
savememory=0;
end
if nargin == 2 || isempty(x2)
%K = feval(gpcf.fh.trcov, gpcf, x);
if length(gpcf.coeffSigma2) == 1
% In the case of an isotropic LINEAR
s = repmat(gpcf.coeffSigma2, 1, m);
else
s = gpcf.coeffSigma2;
end
ii1 = 0;
if isfield(gpcf, 'selectedVariables')
if ~savememory
i1=1:length(gpcf.selectedVariables);
end
for i=i1
for j = 1:n
DK = zeros(n);
DK(j,:)=s(i)*x(:,gpcf.selectedVariables(i))';
DK = DK + DK';
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
else
if ~savememory
i1=1:m;
end
for i=i1
for j = 1:n
DK = zeros(n);
DK(j,:)=s(i)*x(:,i)';
DK = DK + DK';
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
end
elseif nargin == 3 || nargin == 4
%K = feval(gpcf.fh.cov, gpcf, x, x2);
if length(gpcf.coeffSigma2) == 1
% In the case of an isotropic LINEAR
s = repmat(gpcf.coeffSigma2, 1, m);
else
s = gpcf.coeffSigma2;
end
ii1 = 0;
if isfield(gpcf, 'selectedVariables')
if ~savememory
i1=1:length(gpcf.selectedVariables);
end
for i=i1
for j = 1:n
DK = zeros(n, size(x2,1));
DK(j,:)=s(i)*x2(:,gpcf.selectedVariables(i))';
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
else
if ~savememory
i1=1:m;
end
for i=i1
for j = 1:n
DK = zeros(n, size(x2,1));
DK(j,:)=s(i)*x2(:,i)';
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
end
end
end
function C = gpcf_linear_cov(gpcf, x1, x2, varargin)
%GP_LINEAR_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_LINEAR_COV(GP, TX, X) takes in covariance function of
% a Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_LINEAR_TRCOV, GPCF_LINEAR_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
if m1~=m2
error('the number of columns of X1 and X2 has to be same')
end
if isfield(gpcf, 'selectedVariables')
C = x1(:,gpcf.selectedVariables)*diag(gpcf.coeffSigma2)*(x2(:,gpcf.selectedVariables)');
else
C = x1*diag(gpcf.coeffSigma2)*(x2');
end
C(abs(C)<=eps) = 0;
end
function C = gpcf_linear_trcov(gpcf, x)
%GP_LINEAR_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_LINEAR_TRCOV(GP, TX) takes in covariance function of
% a Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This
% is a mandatory subfunction used for example in prediction and
% energy computations.
%
% See also
% GPCF_LINEAR_COV, GPCF_LINEAR_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf, 'selectedVariables')
C = x(:,gpcf.selectedVariables)*diag(gpcf.coeffSigma2)*(x(:,gpcf.selectedVariables)');
else
C = x*diag(gpcf.coeffSigma2)*(x');
end
C(abs(C)<=eps) = 0;
C = (C+C')./2;
end
function C = gpcf_linear_trvar(gpcf, x)
%GP_LINEAR_TRVAR Evaluate training variance vector
%
% Description
% C = GP_LINEAR_TRVAR(GPCF, TX) takes in covariance function
% of a Gaussian process GPCF and matrix TX that contains
% training inputs. Returns variance vector C. Every element i
% of C contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
%
% See also
% GPCF_LINEAR_COV, GP_COV, GP_TRCOV
if length(gpcf.coeffSigma2) == 1
if isfield(gpcf, 'selectedVariables')
C=gpcf.coeffSigma2.*sum(x(:,gpcf.selectedVariables).^2,2);
else
C=gpcf.coeffSigma2.*sum(x.^2,2);
end
else
if isfield(gpcf, 'selectedVariables')
C=sum(repmat(gpcf.coeffSigma2, size(x,1), 1).*x(:,gpcf.selectedVariables).^2,2);
else
C=sum(repmat(gpcf.coeffSigma2, size(x,1), 1).*x.^2,2);
end
end
C(abs(C)<eps)=0;
end
function reccf = gpcf_linear_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_LINEAR_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_linear';
% Initialize parameters
reccf.coeffSigma2= [];
% Set the function handles
reccf.fh.pak = @gpcf_linear_pak;
reccf.fh.unpak = @gpcf_linear_unpak;
reccf.fh.lp = @gpcf_linear_lp;
reccf.fh.lpg = @gpcf_linear_lpg;
reccf.fh.cfg = @gpcf_linear_cfg;
reccf.fh.cov = @gpcf_linear_cov;
reccf.fh.trcov = @gpcf_linear_trcov;
reccf.fh.trvar = @gpcf_linear_trvar;
reccf.fh.recappend = @gpcf_linear_recappend;
reccf.p=[];
reccf.p.coeffSigma2=[];
if ~isempty(ri.p.coeffSigma2)
reccf.p.coeffSigma2 = ri.p.coeffSigma2;
end
else
% Append to the record
gpp = gpcf.p;
% record coeffSigma2
reccf.coeffSigma2(ri,:)=gpcf.coeffSigma2;
if isfield(gpp,'coeffSigma2') && ~isempty(gpp.coeffSigma2)
reccf.p.coeffSigma2 = gpp.coeffSigma2.fh.recappend(reccf.p.coeffSigma2, ri, gpcf.p.coeffSigma2);
end
if isfield(gpcf, 'selectedVariables')
reccf.selectedVariables = gpcf.selectedVariables;
end
end
end
|
github
|
lcnhappe/happe-master
|
lik_weibull.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_weibull.m
| 23,168 |
windows_1250
|
75fa0c5df6db30dda34b9a8e3c552dcd
|
function lik = lik_weibull(varargin)
%LIK_WEIBULL Create a right censored Weibull likelihood structure
%
% Description
% LIK = LIK_WEIBULL('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a likelihood structure for right censored Weibull
% survival model in which the named parameters have the
% specified values. Any unspecified parameters are set to
% default values.
%
% LIK = LIK_WEIBULL(LIK,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a likelihood structure with the named parameters
% altered with the specified values.
%
% Parameters for Weibull likelihood [default]
% shape - shape parameter r [1]
% shape_prior - prior for shape [prior_logunif]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The likelihood is defined as follows:
% __ n
% p(y|f, z) = || i=1 [ r^(1-z_i) exp( (1-z_i)*(-f_i)
% +(1-z_i)*(r-1)*log(y_i)
% -exp(-f_i)*y_i^r) ]
%
% where r is the shape parameter of Weibull distribution.
% z is a vector of censoring indicators with z = 0 for uncensored event
% and z = 1 for right censored event.
%
% When using the Weibull likelihood you need to give the vector z
% as an extra parameter to each function that requires also y.
% For example, you should call gpla_e as follows: gpla_e(w, gp,
% x, y, 'z', z)
%
% See also
% GP_SET, LIK_*, PRIOR_*
%
% Copyright (c) 2011 Jaakko Riihimäki
% Copyright (c) 2011 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_WEIBULL';
ip.addOptional('lik', [], @isstruct);
ip.addParamValue('shape',1, @(x) isscalar(x) && x>0);
ip.addParamValue('shape_prior',prior_logunif(), @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Weibull';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Weibull')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('shape',ip.UsingDefaults)
lik.shape = ip.Results.shape;
end
% Initialize prior structure
if init
lik.p=[];
end
if init || ~ismember('shape_prior',ip.UsingDefaults)
lik.p.shape=ip.Results.shape_prior;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_weibull_pak;
lik.fh.unpak = @lik_weibull_unpak;
lik.fh.lp = @lik_weibull_lp;
lik.fh.lpg = @lik_weibull_lpg;
lik.fh.ll = @lik_weibull_ll;
lik.fh.llg = @lik_weibull_llg;
lik.fh.llg2 = @lik_weibull_llg2;
lik.fh.llg3 = @lik_weibull_llg3;
lik.fh.tiltedMoments = @lik_weibull_tiltedMoments;
lik.fh.siteDeriv = @lik_weibull_siteDeriv;
lik.fh.invlink = @lik_weibull_invlink;
lik.fh.predy = @lik_weibull_predy;
lik.fh.predcdf = @lik_weibull_predcdf;
lik.fh.recappend = @lik_weibull_recappend;
end
end
function [w,s] = lik_weibull_pak(lik)
%LIK_WEIBULL_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_WEIBULL_PAK(LIK) takes a likelihood structure LIK and
% combines the parameters into a single row vector W. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
% w = log(lik.shape)
%
% See also
% LIK_WEIBULL_UNPAK, GP_PAK
w=[];s={};
if ~isempty(lik.p.shape)
w = log(lik.shape);
s = [s; 'log(weibull.shape)'];
[wh sh] = lik.p.shape.fh.pak(lik.p.shape);
w = [w wh];
s = [s; sh];
end
end
function [lik, w] = lik_weibull_unpak(lik, w)
%LIK_WEIBULL_UNPAK Extract likelihood parameters from the vector.
%
% Description
% [LIK, W] = LIK_WEIBULL_UNPAK(W, LIK) takes a likelihood
% structure LIK and extracts the parameters from the vector W
% to the LIK structure. This is a mandatory subfunction used
% for example in energy and gradient computations.
%
% Assignment is inverse of
% w = log(lik.shape)
%
% See also
% LIK_WEIBULL_PAK, GP_UNPAK
if ~isempty(lik.p.shape)
lik.shape = exp(w(1));
w = w(2:end);
[p, w] = lik.p.shape.fh.unpak(lik.p.shape, w);
lik.p.shape = p;
end
end
function lp = lik_weibull_lp(lik, varargin)
%LIK_WEIBULL_LP log(prior) of the likelihood parameters
%
% Description
% LP = LIK_WEIBULL_LP(LIK) takes a likelihood structure LIK and
% returns log(p(th)), where th collects the parameters. This
% subfunction is needed when there are likelihood parameters.
%
% See also
% LIK_WEIBULL_LLG, LIK_WEIBULL_LLG3, LIK_WEIBULL_LLG2, GPLA_E
% If prior for shape parameter, add its contribution
lp=0;
if ~isempty(lik.p.shape)
lp = lik.p.shape.fh.lp(lik.shape, lik.p.shape) +log(lik.shape);
end
end
function lpg = lik_weibull_lpg(lik)
%LIK_WEIBULL_LPG d log(prior)/dth of the likelihood
% parameters th
%
% Description
% E = LIK_WEIBULL_LPG(LIK) takes a likelihood structure LIK and
% returns d log(p(th))/dth, where th collects the parameters.
% This subfunction is needed when there are likelihood parameters.
%
% See also
% LIK_WEIBULL_LLG, LIK_WEIBULL_LLG3, LIK_WEIBULL_LLG2, GPLA_G
lpg=[];
if ~isempty(lik.p.shape)
% Evaluate the gprior with respect to shape
ggs = lik.p.shape.fh.lpg(lik.shape, lik.p.shape);
lpg = ggs(1).*lik.shape + 1;
if length(ggs) > 1
lpg = [lpg ggs(2:end)];
end
end
end
function ll = lik_weibull_ll(lik, y, f, z)
%LIK_WEIBULL_LL Log likelihood
%
% Description
% LL = LIK_WEIBULL_LL(LIK, Y, F, Z) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z, and
% latent values F. Returns the log likelihood, log p(y|f,z).
% This subfunction is needed when using Laplace approximation
% or MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria (DIC, WAIC)
% computations.
%
% See also
% LIK_WEIBULL_LLG, LIK_WEIBULL_LLG3, LIK_WEIBULL_LLG2, GPLA_E
if isempty(z)
error(['lik_weibull -> lik_weibull_ll: missing z! '...
'Weibull likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_weibull and gpla_e. ']);
end
a = lik.shape;
ll = sum((1-z).*(log(a) + (a-1).*log(y)-f) - exp(-f).*y.^a);
end
function llg = lik_weibull_llg(lik, y, f, param, z)
%LIK_WEIBULL_LLG Gradient of the log likelihood
%
% Description
% LLG = LIK_WEIBULL_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z and
% latent values F. Returns the gradient of the log likelihood
% with respect to PARAM. At the moment PARAM can be 'param' or
% 'latent'. This subfunction is needed when using Laplace
% approximation or MCMC for inference with non-Gaussian likelihoods.
%
% See also
% LIK_WEIBULL_LL, LIK_WEIBULL_LLG2, LIK_WEIBULL_LLG3, GPLA_E
if isempty(z)
error(['lik_weibull -> lik_weibull_llg: missing z! '...
'Weibull likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_weibull and gpla_e. ']);
end
a = lik.shape;
switch param
case 'param'
llg = sum((1-z).*(1./a + log(y)) - exp(-f).*y.^a.*log(y));
% correction for the log transformation
llg = llg.*lik.shape;
case 'latent'
llg = -(1-z) + exp(-f).*y.^a;
end
end
function llg2 = lik_weibull_llg2(lik, y, f, param, z)
%LIK_WEIBULL_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_WEIBULL_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z, and
% latent values F. Returns the hessian of the log likelihood
% with respect to PARAM. At the moment PARAM can be only
% 'latent'. LLG2 is a vector with diagonal elements of the
% Hessian matrix (off diagonals are zero). This subfunction
% is needed when using Laplace approximation or EP for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_WEIBULL_LL, LIK_WEIBULL_LLG, LIK_WEIBULL_LLG3, GPLA_E
if isempty(z)
error(['lik_weibull -> lik_weibull_llg2: missing z! '...
'Weibull likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_weibull and gpla_e. ']);
end
a = lik.shape;
switch param
case 'param'
case 'latent'
llg2 = -exp(-f).*y.^a;
case 'latent+param'
llg2 = exp(-f).*y.^a.*log(y);
% correction due to the log transformation
llg2 = llg2.*lik.shape;
end
end
function llg3 = lik_weibull_llg3(lik, y, f, param, z)
%LIK_WEIBULL_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_WEIBULL_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z and
% latent values F and returns the third gradients of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% only 'latent'. LLG3 is a vector with third gradients. This
% subfunction is needed when using Laplace approximation for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_WEIBULL_LL, LIK_WEIBULL_LLG, LIK_WEIBULL_LLG2, GPLA_E, GPLA_G
if isempty(z)
error(['lik_weibull -> lik_weibull_llg3: missing z! '...
'Weibull likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_weibull and gpla_e. ']);
end
a = lik.shape;
switch param
case 'param'
case 'latent'
llg3 = exp(-f).*y.^a;
case 'latent2+param'
llg3 = -exp(-f).*y.^a.*log(y);
% correction due to the log transformation
llg3 = llg3.*lik.shape;
end
end
function [logM_0, m_1, sigm2hati1] = lik_weibull_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_WEIBULL_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_WEIBULL_TILTEDMOMENTS(LIK, Y, I, S2,
% MYY, Z) takes a likelihood structure LIK, survival times
% Y, censoring indicators Z, index I and cavity variance S2 and
% mean MYY. Returns the zeroth moment M_0, mean M_1 and
% variance M_2 of the posterior marginal (see Rasmussen and
% Williams (2006): Gaussian processes for Machine Learning,
% page 55). This subfunction is needed when using EP for
% inference with non-Gaussian likelihoods.
%
% See also
% GPEP_E
% if isempty(z)
% error(['lik_weibull -> lik_weibull_tiltedMoments: missing z!'...
% 'Weibull likelihood needs the censoring '...
% 'indicators as an extra input z. See, for '...
% 'example, lik_weibull and gpep_e. ']);
% end
yy = y(i1);
yc = 1-z(i1);
r = lik.shape;
logM_0=zeros(size(yy));
m_1=zeros(size(yy));
sigm2hati1=zeros(size(yy));
for i=1:length(i1)
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Weibull * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_weibull_norm(yy(i),myy_i(i),sigm2_i(i),yc(i),r);
% Integrate with quadrature
RTOL = 1.e-6;
ATOL = 1.e-10;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
% If the second central moment is less than cavity variance
% integrate more precisely. Theoretically for log-concave
% likelihood should be sigm2hati1 < sigm2_i.
if sigm2hati1(i) >= sigm2_i(i)
ATOL = ATOL.^2;
RTOL = RTOL.^2;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
if sigm2hati1(i) >= sigm2_i(i)
error('lik_weibull_tilted_moments: sigm2hati1 >= sigm2_i');
end
end
logM_0(i) = log(m_0);
end
end
function [g_i] = lik_weibull_siteDeriv(lik, y, i1, sigm2_i, myy_i, z)
%LIK_WEIBULL_SITEDERIV Evaluate the expectation of the gradient
% of the log likelihood term with respect
% to the likelihood parameters for EP
%
% Description [M_0, M_1, M2] =
% LIK_WEIBULL_SITEDERIV(LIK, Y, I, S2, MYY, Z) takes a
% likelihood structure LIK, survival times Y, expected
% counts Z, index I and cavity variance S2 and mean MYY.
% Returns E_f [d log p(y_i|f_i) /d a], where a is the
% likelihood parameter and the expectation is over the
% marginal posterior. This term is needed when evaluating the
% gradients of the marginal likelihood estimate Z_EP with
% respect to the likelihood parameters (see Seeger (2008):
% Expectation propagation for exponential families). This
% subfunction is needed when using EP for inference with
% non-Gaussian likelihoods and there are likelihood parameters.
%
% See also
% GPEP_G
if isempty(z)
error(['lik_weibull -> lik_weibull_siteDeriv: missing z!'...
'Weibull likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_weibull and gpla_e. ']);
end
yy = y(i1);
yc = 1-z(i1);
r = lik.shape;
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Weibull * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_weibull_norm(yy,myy_i,sigm2_i,yc,r);
% additionally get function handle for the derivative
td = @deriv;
% Integrate with quadgk
[m_0, fhncnt] = quadgk(tf, minf, maxf);
[g_i, fhncnt] = quadgk(@(f) td(f).*tf(f)./m_0, minf, maxf);
g_i = g_i.*r;
function g = deriv(f)
g = yc.*(1./r + log(yy)) - exp(-f).*yy.^r.*log(yy);
end
end
function [lpy, Ey, Vary] = lik_weibull_predy(lik, Ef, Varf, yt, zt)
%LIK_WEIBULL_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_WEIBULL_PREDY(LIK, EF, VARF YT, ZT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the survival times YT, censoring indicators ZT.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [LPY, EY, VARY] = LIK_WEIBULL_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
if isempty(zt)
error(['lik_weibull -> lik_weibull_predy: missing zt!'...
'Weibull likelihood needs the censoring '...
'indicators as an extra input zt. See, for '...
'example, lik_weibull and gpla_e. ']);
end
yc = 1-zt;
r = lik.shape;
Ey=[];
Vary=[];
% lpy = zeros(size(Ef));
% Ey = zeros(size(Ef));
% EVary = zeros(size(Ef));
% VarEy = zeros(size(Ef));
%
% % Evaluate Ey and Vary
% for i1=1:length(Ef)
% %%% With quadrature
% myy_i = Ef(i1);
% sigm_i = sqrt(Varf(i1));
% minf=myy_i-6*sigm_i;
% maxf=myy_i+6*sigm_i;
%
% F = @(f) exp(log(yc(i1))+f+norm_lpdf(f,myy_i,sigm_i));
% Ey(i1) = quadgk(F,minf,maxf);
%
% F2 = @(f) exp(log(yc(i1).*exp(f)+((yc(i1).*exp(f)).^2/r))+norm_lpdf(f,myy_i,sigm_i));
% EVary(i1) = quadgk(F2,minf,maxf);
%
% F3 = @(f) exp(2*log(yc(i1))+2*f+norm_lpdf(f,myy_i,sigm_i));
% VarEy(i1) = quadgk(F3,minf,maxf) - Ey(i1).^2;
% end
% Vary = EVary + VarEy;
% Evaluate the posterior predictive densities of the given observations
lpy = zeros(length(yt),1);
for i1=1:length(yt)
% get a function handle of the likelihood times posterior
% (likelihood * posterior = Weibull * Gaussian)
% and useful integration limits
[pdf,minf,maxf]=init_weibull_norm(...
yt(i1),Ef(i1),Varf(i1),yc(i1),r);
% integrate over the f to get posterior predictive distribution
lpy(i1) = log(quadgk(pdf, minf, maxf));
end
end
function [df,minf,maxf] = init_weibull_norm(yy,myy_i,sigm2_i,yc,r)
%INIT_WEIBULL_NORM
%
% Description
% Return function handle to a function evaluating
% Weibull * Gaussian which is used for evaluating
% (likelihood * cavity) or (likelihood * posterior) Return
% also useful limits for integration. This is private function
% for lik_weibull. This subfunction is needed by subfunctions
% tiltedMoments, siteDeriv and predy.
%
% See also
% LIK_WEIBULL_TILTEDMOMENTS, LIK_WEIBULL_SITEDERIV,
% LIK_WEIBULL_PREDY
% avoid repetitive evaluation of constant part
ldconst = yc*log(r)+yc*(r-1)*log(yy)...
- log(sigm2_i)/2 - log(2*pi)/2;
% Create function handle for the function to be integrated
df = @weibull_norm;
% use log to avoid underflow, and derivates for faster search
ld = @log_weibull_norm;
ldg = @log_weibull_norm_g;
ldg2 = @log_weibull_norm_g2;
% Set the limits for integration
if yc==0
% with yy==0, the mode of the likelihood is not defined
% use the mode of the Gaussian (cavity or posterior) as a first guess
modef = myy_i;
else
% use precision weighted mean of the Gaussian approximation
% of the Weibull likelihood and Gaussian
mu=-log(yc./(yy.^r));
%s2=1./(yc+1./sigm2_i);
s2=1./yc;
modef = (myy_i/sigm2_i + mu/s2)/(1/sigm2_i + 1/s2);
end
% find the mode of the integrand using Newton iterations
% few iterations is enough, since first guess is in the right direction
niter=4; % number of Newton iterations
mindelta=1e-6; % tolerance in stopping Newton iterations
for ni=1:niter
g=ldg(modef);
h=ldg2(modef);
delta=-g/h;
modef=modef+delta;
if abs(delta)<mindelta
break
end
end
% integrand limits based on Gaussian approximation at mode
modes=sqrt(-1/h);
minf=modef-8*modes;
maxf=modef+8*modes;
modeld=ld(modef);
iter=0;
% check that density at end points is low enough
lddiff=20; % min difference in log-density between mode and end-points
minld=ld(minf);
step=1;
while minld>(modeld-lddiff)
minf=minf-step*modes;
minld=ld(minf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_weibull -> init_weibull_norm: ' ...
'integration interval minimun not found ' ...
'even after looking hard!'])
end
end
maxld=ld(maxf);
step=1;
while maxld>(modeld-lddiff)
maxf=maxf+step*modes;
maxld=ld(maxf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_weibull -> init_weibull_norm: ' ...
'integration interval maximun not found ' ...
'even after looking hard!'])
end
end
function integrand = weibull_norm(f)
% Weibull * Gaussian
integrand = exp(ldconst ...
-yc.*f -exp(-f).*yy.^r ...
-0.5*(f-myy_i).^2./sigm2_i);
end
function log_int = log_weibull_norm(f)
% log(Weibull * Gaussian)
% log_weibull_norm is used to avoid underflow when searching
% integration interval
log_int = ldconst ...
-yc.*f -exp(-f).*yy.^r ...
-0.5*(f-myy_i).^2./sigm2_i;
end
function g = log_weibull_norm_g(f)
% d/df log(Weibull * Gaussian)
% derivative of log_weibull_norm
g = -yc + exp(-f).*yy.^r ...
+ (myy_i - f)./sigm2_i;
end
function g2 = log_weibull_norm_g2(f)
% d^2/df^2 log(Weibull * Gaussian)
% second derivate of log_weibull_norm
g2 = - exp(-f).*yy.^r ...
-1/sigm2_i;
end
end
function cdf = lik_weibull_predcdf(lik, Ef, Varf, yt)
%LIK_WEIBULL_PREDCDF Returns the predictive cdf evaluated at yt
%
% Description
% CDF = LIK_WEIBULL_PREDCDF(LIK, EF, VARF, YT)
% Returns the predictive cdf evaluated at YT given likelihood
% structure LIK, posterior mean EF and posterior Variance VARF
% of the latent variable. This subfunction is needed when using
% functions gp_predcdf or gp_kfcv_cdf.
%
% See also
% GP_PREDCDF
r = lik.shape;
% Evaluate the posterior predictive cdf at given yt
cdf = zeros(length(yt),1);
for i1=1:length(yt)
% Get a function handle of the likelihood times posterior
% (likelihood * posterior = Weibull * Gaussian)
% and useful integration limits.
% yc=0 when evaluating predictive cdf
[sf,minf,maxf]=init_weibull_norm(...
yt(i1),Ef(i1),Varf(i1),0,r);
% integrate over the f to get posterior predictive distribution
cdf(i1) = 1-quadgk(sf, minf, maxf);
end
end
function p = lik_weibull_invlink(lik, f)
%LIK_WEIBULL Returns values of inverse link function
%
% Description
% P = LIK_WEIBULL_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values of inverse link function P.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_WEIBULL_LL, LIK_WEIBULL_PREDY
p = exp(f);
end
function reclik = lik_weibull_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = GPCF_WEIBULL_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
% Initialize the record
reclik.type = 'Weibull';
% Initialize parameter
reclik.shape = [];
% Set the function handles
reclik.fh.pak = @lik_weibull_pak;
reclik.fh.unpak = @lik_weibull_unpak;
reclik.fh.lp = @lik_t_lp;
reclik.fh.lpg = @lik_t_lpg;
reclik.fh.ll = @lik_weibull_ll;
reclik.fh.llg = @lik_weibull_llg;
reclik.fh.llg2 = @lik_weibull_llg2;
reclik.fh.llg3 = @lik_weibull_llg3;
reclik.fh.tiltedMoments = @lik_weibull_tiltedMoments;
reclik.fh.invlink = @lik_weibull_invlink;
reclik.fh.predy = @lik_weibull_predy;
reclik.fh.predcdf = @lik_weibull_predcdf;
reclik.fh.recappend = @lik_weibull_recappend;
reclik.p=[];
reclik.p.shape=[];
if ~isempty(ri.p.shape)
reclik.p.shape = ri.p.shape;
end
else
% Append to the record
reclik.shape(ri,:)=lik.shape;
if ~isempty(lik.p)
reclik.p.shape = lik.p.shape.fh.recappend(reclik.p.shape, ri, lik.p.shape);
end
end
end
|
github
|
lcnhappe/happe-master
|
lik_inputdependentweibull.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_inputdependentweibull.m
| 17,398 |
windows_1250
|
c288cecc00237a7f8b7ae149561644c4
|
function lik = lik_inputdependentweibull(varargin)
%LIK_INPUTDEPENDENTWEIBULL Create a right censored input dependent Weibull likelihood structure
%
% Description
% LIK = LIK_INPUTDEPENDENTWEIBULL('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a likelihood structure for right censored input dependent
% Weibull survival model in which the named parameters have the
% specified values. Any unspecified parameters are set to default
% values.
%
% LIK = LIK_INPUTDEPENDENTWEIBULL(LIK,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a likelihood structure with the named parameters
% altered with the specified values.
%
% Parameters for Weibull likelihood [default]
% shape - shape parameter r [1]
% shape_prior - prior for shape [prior_logunif]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The likelihood is defined as follows:
% __ n
% p(y|f1,f2, z) = || i=1 [ (r*exp(f2_i))^(1-z_i) exp( (1-z_i)*(-f1_i)
% +(1-z_i)*((r*exp(f2_i))-1)*log(y_i)
% -exp(-f1_i)*y_i^(r*exp(f2_i))) ]
%
% where r is the shape parameter of Weibull distribution.
% z is a vector of censoring indicators with z = 0 for uncensored event
% and z = 1 for right censored event. Here the second latent variable f2
% implies the input dependance to the shape parameter in the original
% Weibull likelihood.
%
% When using the Weibull likelihood you need to give the vector z
% as an extra parameter to each function that requires also y.
% For example, you should call gpla_e as follows: gpla_e(w, gp,
% x, y, 'z', z)
%
% See also
% GP_SET, LIK_*, PRIOR_*
%
% Copyright (c) 2011 Jaakko Riihimäki
% Copyright (c) 2011 Aki Vehtari
% Copyright (c) 2012 Ville Tolvanen
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_INPUTDEPENDENTWEIBULL';
ip.addOptional('lik', [], @isstruct);
ip.addParamValue('shape',1, @(x) isscalar(x) && x>0);
ip.addParamValue('shape_prior',prior_logunif(), @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.nondiagW=true;
lik.type = 'Inputdependent-Weibull';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Weibull')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('shape',ip.UsingDefaults)
lik.shape = ip.Results.shape;
end
% Initialize prior structure
if init
lik.p=[];
end
if init || ~ismember('shape_prior',ip.UsingDefaults)
lik.p.shape=ip.Results.shape_prior;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_inputdependentweibull_pak;
lik.fh.unpak = @lik_inputdependentweibull_unpak;
lik.fh.lp = @lik_inputdependentweibull_lp;
lik.fh.lpg = @lik_inputdependentweibull_lpg;
lik.fh.ll = @lik_inputdependentweibull_ll;
lik.fh.llg = @lik_inputdependentweibull_llg;
lik.fh.llg2 = @lik_inputdependentweibull_llg2;
lik.fh.llg3 = @lik_inputdependentweibull_llg3;
lik.fh.invlink = @lik_inputdependentweibull_invlink;
lik.fh.predy = @lik_inputdependentweibull_predy;
lik.fh.recappend = @lik_inputdependentweibull_recappend;
end
end
function [w,s] = lik_inputdependentweibull_pak(lik)
%LIK_INPUTDEPENDENTWEIBULL_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_INPUTDEPENDENTWEIBULL_PAK(LIK) takes a likelihood structure LIK and
% combines the parameters into a single row vector W. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
% w = log(lik.shape)
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_UNPAK, GP_PAK
w=[];s={};
if ~isempty(lik.p.shape)
w = log(lik.shape);
s = [s; 'log(weibull.shape)'];
[wh sh] = lik.p.shape.fh.pak(lik.p.shape);
w = [w wh];
s = [s; sh];
end
end
function [lik, w] = lik_inputdependentweibull_unpak(lik, w)
%LIK_INPUTDEPENDENTWEIBULL_UNPAK Extract likelihood parameters from the vector.
%
% Description
% [LIK, W] = LIK_INPUTDEPENDENTWEIBULL_UNPAK(W, LIK) takes a likelihood
% structure LIK and extracts the parameters from the vector W
% to the LIK structure. This is a mandatory subfunction used
% for example in energy and gradient computations.
%
% Assignment is inverse of
% w = log(lik.shape)
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_PAK, GP_UNPAK
if ~isempty(lik.p.shape)
lik.shape = exp(w(1));
w = w(2:end);
[p, w] = lik.p.shape.fh.unpak(lik.p.shape, w);
lik.p.shape = p;
end
end
function lp = lik_inputdependentweibull_lp(lik, varargin)
%LIK_INPUTDEPENDENTWEIBULL_LP log(prior) of the likelihood parameters
%
% Description
% LP = LIK_INPUTDEPENDENTWEIBULL_LP(LIK) takes a likelihood structure LIK and
% returns log(p(th)), where th collects the parameters. This
% subfunction is needed when there are likelihood parameters.
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_LLG, LIK_INPUTDEPENDENTWEIBULL_LLG3, LIK_INPUTDEPENDENTWEIBULL_LLG2, GPLA_E
% If prior for shape parameter, add its contribution
lp=0;
if ~isempty(lik.p.shape)
lp = lik.p.shape.fh.lp(lik.shape, lik.p.shape) +log(lik.shape);
end
end
function lpg = lik_inputdependentweibull_lpg(lik)
%LIK_INPUTDEPENDENTWEIBULL_LPG d log(prior)/dth of the likelihood
% parameters th
%
% Description
% E = LIK_INPUTDEPENDENTWEIBULL_LPG(LIK) takes a likelihood structure LIK and
% returns d log(p(th))/dth, where th collects the parameters.
% This subfunction is needed when there are likelihood parameters.
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_LLG, LIK_INPUTDEPENDENTWEIBULL_LLG3, LIK_INPUTDEPENDENTWEIBULL_LLG2, GPLA_G
lpg=[];
if ~isempty(lik.p.shape)
% Evaluate the gprior with respect to shape
ggs = lik.p.shape.fh.lpg(lik.shape, lik.p.shape);
lpg = ggs(1).*lik.shape + 1;
if length(ggs) > 1
lpg = [lpg ggs(2:end)];
end
end
end
function ll = lik_inputdependentweibull_ll(lik, y, ff, z)
%LIK_INPUTDEPENDENTWEIBULL_LL Log likelihood
%
% Description
% LL = LIK_INPUTDEPENDENTWEIBULL_LL(LIK, Y, F, Z) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z, and
% latent values F. Returns the log likelihood, log p(y|f,z).
% This subfunction is needed when using Laplace approximation
% or MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria (DIC, WAIC)
% computations.
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_LLG, LIK_INPUTDEPENDENTWEIBULL_LLG3, LIK_INPUTDEPENDENTWEIBULL_LLG2, GPLA_E
if isempty(z)
error(['lik_inputdependentweibull -> lik_inputdependentweibull_ll: missing z! '...
'Weibull likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_inputdependentweibull and gpla_e. ']);
end
f=ff(:);
n=size(y,1);
f1=f(1:n);
f2=f((n+1):2*n);
expf2=exp(f2);
expf2(isinf(expf2))=realmax;
a = lik.shape;
ll = sum((1-z).*(log(a*expf2) + (a*expf2-1).*log(y)-f1) - exp(-f1).*y.^(a*expf2));
end
function llg = lik_inputdependentweibull_llg(lik, y, ff, param, z)
%LIK_INPUTDEPENDENTWEIBULL_LLG Gradient of the log likelihood
%
% Description
% LLG = LIK_INPUTDEPENDENTWEIBULL_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z and
% latent values F. Returns the gradient of the log likelihood
% with respect to PARAM. At the moment PARAM can be 'param' or
% 'latent'. This subfunction is needed when using Laplace
% approximation or MCMC for inference with non-Gaussian likelihoods.
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_LL, LIK_INPUTDEPENDENTWEIBULL_LLG2, LIK_INPUTDEPENDENTWEIBULL_LLG3, GPLA_E
if isempty(z)
error(['lik_inputdependentweibull -> lik_inputdependentweibull_llg: missing z! '...
'Weibull likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_inputdependentweibull and gpla_e. ']);
end
f=ff(:);
n=size(y,1);
f1=f(1:n);
f2=f((n+1):2*n);
expf2=exp(f2);
expf2(isinf(expf2))=realmax;
a = lik.shape;
switch param
case 'param'
llg = sum((1-z).*(1./a + expf2.*log(y)) - exp(-f1).*y.^(a.*expf2).*log(y).*expf2);
% correction for the log transformation
llg = llg.*lik.shape;
case 'latent'
llg1 = -(1-z) + exp(-f1).*y.^(a.*expf2);
llg2 = (1-z).*(1 + a.*expf2.*log(y)) - exp(-f1).*y.^(a.*expf2).*log(y).*a.*expf2;
llg = [llg1; llg2];
end
end
function llg2 = lik_inputdependentweibull_llg2(lik, y, ff, param, z)
%LIK_INPUTDEPENDENTWEIBULL_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_INPUTDEPENDENTWEIBULL_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z, and
% latent values F. Returns the hessian of the log likelihood
% with respect to PARAM. At the moment PARAM can be only
% 'latent'. LLG2 is a vector with diagonal elements of the
% Hessian matrix (off diagonals are zero). This subfunction
% is needed when using Laplace approximation or EP for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_LL, LIK_INPUTDEPENDENTWEIBULL_LLG, LIK_INPUTDEPENDENTWEIBULL_LLG3, GPLA_E
if isempty(z)
error(['lik_inputdependentweibull -> lik_inputdependentweibull_llg2: missing z! '...
'Weibull likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_inputdependentweibull and gpla_e. ']);
end
a = lik.shape;
f=ff(:);
n=size(y,1);
f1=f(1:n);
f2=f((n+1):2*n);
expf2=exp(f2);
expf2(isinf(expf2))=realmax;
switch param
case 'param'
case 'latent'
t1=exp(-f1).*y.^(a.*expf2);
t2=log(y).*a.*expf2;
t3=t1.*t2;
llg2_11 = -t1;
llg2_12 = t3;
llg2_22 = (1-z).*t2 - (t2 + 1).*t3;
llg2 = [llg2_11 llg2_12; llg2_12 llg2_22];
case 'latent+param'
t1=expf2.*log(y);
t2=exp(-f1).*y.^(a.*expf2);
t3=t1.*t2;
llg2_1 = t3;
llg2_2 = (1-z).*t1 - (t1.*a + 1).*t3;
llg2 = [llg2_1; llg2_2];
% correction due to the log transformation
llg2 = llg2.*lik.shape;
end
end
function llg3 = lik_inputdependentweibull_llg3(lik, y, ff, param, z)
%LIK_INPUTDEPENDENTWEIBULL_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_INPUTDEPENDENTWEIBULL_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z and
% latent values F and returns the third gradients of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% only 'latent'. LLG3 is a vector with third gradients. This
% subfunction is needed when using Laplace approximation for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_LL, LIK_INPUTDEPENDENTWEIBULL_LLG, LIK_INPUTDEPENDENTWEIBULL_LLG2, GPLA_E, GPLA_G
if isempty(z)
error(['lik_inputdependentweibull -> lik_inputdependentweibull_llg3: missing z! '...
'Weibull likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_inputdependentweibull and gpla_e. ']);
end
a = lik.shape;
f=ff(:);
n=size(y,1);
f1=f(1:n);
f2=f((n+1):2*n);
expf2=exp(f2);
expf2(isinf(expf2))=realmax;
switch param
case 'param'
case 'latent'
t1=a.*expf2.*log(y);
t2=exp(-f1).*y.^(a.*expf2);
t3=t2.*t1;
t4=t3.*t1;
nl=2;
llg3=zeros(nl,nl,nl,n);
llg3(1,1,1,:) = t2;
llg3(2,2,1,:) = t4 + t3;
llg3(2,1,2,:) = llg3(2,2,1,:);
llg3(1,2,2,:) = llg3(2,2,1,:);
llg3(2,1,1,:) = -t3;
llg3(1,2,1,:) = llg3(2,1,1,:);
llg3(1,1,2,:) = llg3(2,1,1,:);
llg3(2,2,2,:) = (1-z).*t1 - t4.*t1 - 3.*t4 - t3;
case 'latent2+param'
t1 = log(y).*expf2;
t2 = exp(-f1).*y.^(a*expf2);
t3 = t2.*t1;
t4 = t3.*t1;
llg3_11 = -t3;
llg3_12 = a.*t4 + t3;
llg3_22 = (1-z).*t1 - a.^2.*t4.*t1 - 3.*a.*t4 - t3;
llg3 = [diag(llg3_11) diag(llg3_12); diag(llg3_12) diag(llg3_22)];
% correction due to the log transformation
llg3 = llg3.*lik.shape;
end
end
function [lpy, Ey, Vary] = lik_inputdependentweibull_predy(lik, Ef, Varf, yt, zt)
%LIK_INPUTDEPENDENTWEIBULL_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_INPUTDEPENDENTWEIBULL_PREDY(LIK, EF, VARF YT, ZT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the survival times YT, censoring indicators ZT.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [LPY, EY, VARY] = LIK_INPUTDEPENDENTWEIBULL_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
if isempty(zt)
error(['lik_inputdependentweibull -> lik_inputdependentweibull_predy: missing zt!'...
'Weibull likelihood needs the censoring '...
'indicators as an extra input zt. See, for '...
'example, lik_inputdependentweibull and gpla_e. ']);
end
yc = 1-zt;
r = lik.shape;
Ef = Ef(:);
ntest = 0.5*size(Ef,1);
Ef1=Ef(1:ntest); Ef2=Ef(ntest+1:end);
% Varf1=squeeze(Varf(1,1,:)); Varf2=squeeze(Varf(2,2,:));
if size(Varf,2) == size(Varf,1)
Varf1=diag(Varf(1:ntest,1:ntest));Varf2=diag(Varf(ntest+1:end,ntest+1:end));
else
Varf1=Varf(:,1); Varf2=Varf(:,2);
end
Ey=[];
Vary=[];
% Evaluate the posterior predictive densities of the given observations
lpy = zeros(length(yt),1);
for i2=1:ntest
m1=Ef1(i2); m2=Ef2(i2);
s1=sqrt(Varf1(i2)); s2=sqrt(Varf2(i2));
% Function handle for Weibull * Gaussian_f1 * Gaussian_f2
pd=@(f1,f2) exp(yc(i2).*((log(r) + f2) + (r.*exp(f2)-1).*log(yt(i2))-f1) - exp(-f1).*yt(i2).^(r*exp(f2))) ...
.*norm_pdf(f1,Ef1(i2),sqrt(Varf1(i2))).*norm_pdf(f2,Ef2(i2),sqrt(Varf2(i2)));
% Integrate over latent variables
lpy(i2) = log(dblquad(pd, m1-6.*s1, m1+6.*s1, m2-6.*s2, m2+6.*s2));
end
end
function p = lik_inputdependentweibull_invlink(lik, f)
%LIK_INPUTDEPENDENTWEIBULL Returns values of inverse link function
%
% Description
% P = LIK_INPUTDEPENDENTWEIBULL_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values of inverse link function P.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_INPUTDEPENDENTWEIBULL_LL, LIK_INPUTDEPENDENTWEIBULL_PREDY
p = exp(f);
end
function reclik = lik_inputdependentweibull_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = LIK_INPUTDEPENDENTWEIBULL__RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
% Initialize the record
reclik.type = 'Inputdependent-Weibull';
reclik.nondiagW=true;
% Initialize parameter
% reclik.shape = [];
% Set the function handles
reclik.fh.pak = @lik_inputdependentweibull_pak;
reclik.fh.unpak = @lik_inputdependentweibull_unpak;
reclik.fh.lp = @lik_t_lp;
reclik.fh.lpg = @lik_t_lpg;
reclik.fh.ll = @lik_inputdependentweibull_ll;
reclik.fh.llg = @lik_inputdependentweibull_llg;
reclik.fh.llg2 = @lik_inputdependentweibull_llg2;
reclik.fh.llg3 = @lik_inputdependentweibull_llg3;
reclik.fh.invlink = @lik_inputdependentweibull_invlink;
reclik.fh.predy = @lik_inputdependentweibull_predy;
reclik.fh.recappend = @lik_inputdependentweibull_recappend;
reclik.p=[];
reclik.p.shape=[];
if ~isempty(ri.p.shape)
reclik.p.shape = ri.p.shape;
end
else
% Append to the record
reclik.shape(ri,:)=lik.shape;
if ~isempty(lik.p.shape)
reclik.p.shape = lik.p.shape.fh.recappend(reclik.p.shape, ri, lik.p.shape);
end
end
end
|
github
|
lcnhappe/happe-master
|
gpep_pred.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpep_pred.m
| 22,353 |
UNKNOWN
|
9fdd2594b62565a6cf43df3eb051355a
|
function [Eft, Varft, lpyt, Eyt, Varyt] = gpep_pred(gp, x, y, varargin)
%GPEP_PRED Predictions with Gaussian Process EP approximation
%
% Description
% [EFT, VARFT] = GPEP_PRED(GP, X, Y, XT, OPTIONS)
% takes a GP structure together with matrix X of training
% inputs and vector Y of training targets, and evaluates the
% predictive distribution at test inputs XT. Returns a posterior
% mean EFT and variance VARFT of latent variables.
%
% [EFT, VARFT, LPYT] = GPEP_PRED(GP, X, Y, XT, 'yt', YT, OPTIONS)
% returns also logarithm of the predictive density LPYT of the
% observations YT at test input locations XT. This can be used
% for example in the cross-validation. Here Y has to be a vector.
%
% [EFT, VARFT, LPYT, EYT, VARYT] = GPEP_PRED(GP, X, Y, XT, OPTIONS)
% returns also the posterior predictive mean EYT and variance VARYT.
%
% [EF, VARF, LPY, EY, VARY] = GPEP_PRED(GP, X, Y, OPTIONS)
% evaluates the predictive distribution at training inputs X
% and logarithm of the predictive density LPY of the training
% observations Y.
%
% OPTIONS is optional parameter-value pair
% predcf - an index vector telling which covariance functions are
% used for prediction. Default is all (1:gpcfn).
% See additional information below.
% tstind - a vector/cell array defining, which rows of X belong
% to which training block in *IC type sparse models.
% Default is []. In case of PIC, a cell array
% containing index vectors specifying the blocking
% structure for test data. IN FIC and CS+FIC a
% vector of length n that points out the test inputs
% that are also in the training set (if none, set
% TSTIND = [])
% yt - optional observed yt in test points (see below)
% z - optional observed quantity in triplet (x_i,y_i,z_i)
% Some likelihoods may use this. For example, in case of
% Poisson likelihood we have z_i=E_i, that is, expected value
% for ith case.
% zt - optional observed quantity in triplet (xt_i,yt_i,zt_i)
% Some likelihoods may use this. For example, in case of
% Poisson likelihood we have z_i=E_i, that is, the expected
% value for the ith case.
%
% NOTE! In case of FIC and PIC sparse approximation the
% prediction for only some PREDCF covariance functions is just
% an approximation since the covariance functions are coupled in
% the approximation and are not strictly speaking additive
% anymore.
%
% For example, if you use covariance such as K = K1 + K2 your
% predictions Eft1 = gpep_pred(GP, X, Y, X, 'predcf', 1) and
% Eft2 = gpep_pred(gp, x, y, x, 'predcf', 2) should sum up to
% Eft = gpep_pred(gp, x, y, x). That is Eft = Eft1 + Eft2. With
% FULL model this is true but with FIC and PIC this is true only
% approximately. That is Eft \approx Eft1 + Eft2.
%
% With CS+FIC the predictions are exact if the PREDCF covariance
% functions are all in the FIC part or if they are CS
% covariances.
%
% NOTE! When making predictions with a subset of covariance
% functions with FIC approximation the predictive variance can
% in some cases be ill-behaved i.e. negative or unrealistically
% small. This may happen because of the approximative nature of
% the prediction.
%
% See also
% GPEP_E, GPEP_G, GP_PRED, DEMO_SPATIAL, DEMO_CLASSIFIC
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Heikki Peura
% Copyright (c) 2011 Pasi Jyl�nki
% Copyright (c) 2012 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPEP_PRED';
ip.addRequired('gp', @isstruct);
ip.addRequired('x', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addRequired('y', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addOptional('xt', [], @(x) isempty(x) || (isreal(x) && all(isfinite(x(:)))))
ip.addParamValue('yt', [], @(x) isreal(x) && all(isfinite(x(:))))
ip.addParamValue('z', [], @(x) isreal(x) && all(isfinite(x(:))))
ip.addParamValue('zt', [], @(x) isreal(x) && all(isfinite(x(:))))
ip.addParamValue('predcf', [], @(x) isempty(x) || ...
isvector(x) && isreal(x) && all(isfinite(x)&x>0))
ip.addParamValue('tstind', [], @(x) isempty(x) || iscell(x) ||...
(isvector(x) && isreal(x) && all(isfinite(x)&x>0)))
if numel(varargin)==0 || isnumeric(varargin{1})
% inputParser should handle this, but it doesn't
ip.parse(gp, x, y, varargin{:});
else
ip.parse(gp, x, y, [], varargin{:});
end
xt=ip.Results.xt;
yt=ip.Results.yt;
z=ip.Results.z;
zt=ip.Results.zt;
predcf=ip.Results.predcf;
tstind=ip.Results.tstind;
if isempty(xt)
xt=x;
if isempty(tstind)
if iscell(gp)
gptype=gp{1}.type;
else
gptype=gp.type;
end
switch gptype
case {'FULL' 'VAR' 'DTC' 'SOR'}
tstind = [];
case {'FIC' 'CS+FIC'}
tstind = 1:size(x,1);
case 'PIC'
if iscell(gp)
tstind = gp{1}.tr_index;
else
tstind = gp.tr_index;
end
end
end
if isempty(yt)
yt=y;
end
if isempty(zt)
zt=z;
end
end
[tn, tnin] = size(x);
switch gp.type
% ============================================================
% FULL
% ============================================================
case 'FULL' % Predictions with FULL GP model
[e, edata, eprior, tautilde, nutilde, L] = gpep_e(gp_pak(gp), gp, x, y, 'z', z);
[K, C]=gp_trcov(gp,x);
kstarstar = gp_trvar(gp, xt, predcf);
ntest=size(xt,1);
K_nf=gp_cov(gp,xt,x,predcf);
[n,nin] = size(x);
if all(tautilde > 0) && ~isequal(gp.latent_opt.optim_method, 'robust-EP')
% This is the usual case where likelihood is log concave
% for example, Poisson and probit
sqrttautilde = sqrt(tautilde);
Stildesqroot = sparse(1:n, 1:n, sqrttautilde, n, n);
if ~isfield(gp,'meanf')
if issparse(L) % If compact support covariance functions are used
% the covariance matrix will be sparse
z=Stildesqroot*ldlsolve(L,Stildesqroot*(C*nutilde));
else
z=Stildesqroot*(L'\(L\(Stildesqroot*(C*nutilde))));
end
Eft=K_nf*(nutilde-z); % The mean, zero mean GP
else
z = Stildesqroot*(L'\(L\(Stildesqroot*(C))));
Eft_zm=K_nf*(nutilde-z*nutilde); % The mean, zero mean GP
Ks = eye(size(z)) - z; % inv(K + S^-1)*S^-1
Ksy = Ks*nutilde;
[RB RAR] = mean_predf(gp,x,xt,K_nf',Ks,Ksy,'EP',Stildesqroot.^2);
Eft = Eft_zm + RB; % The mean
end
% Compute variance
if nargout > 1
if issparse(L)
V = ldlsolve(L, Stildesqroot*K_nf');
Varft = kstarstar - sum(K_nf.*(Stildesqroot*V)',2);
else
V = (L\Stildesqroot)*K_nf';
Varft = kstarstar - sum(V.^2)';
end
if isfield(gp,'meanf')
Varft = Varft + RAR;
end
end
else
% We might end up here if the likelihood is not log concave
% For example Student-t likelihood.
%{
z=tautilde.*(L'*(L*nutilde));
Eft=K_nf*(nutilde-z);
if nargout > 1
S = diag(tautilde);
V = K_nf*S*L';
Varft = kstarstar - sum((K_nf*S).*K_nf,2) + sum(V.^2,2);
end
%}
% An alternative implementation for avoiding negative variances
[Eft,V]=pred_var(tautilde,K,K_nf,nutilde);
Varft=kstarstar-V;
end
% ============================================================
% FIC
% ============================================================
case 'FIC' % Predictions with FIC sparse approximation for GP
[e, edata, eprior, tautilde, nutilde, L, La, b] = gpep_e(gp_pak(gp), gp, x, y, 'z', z);
% Here tstind = 1 if the prediction is made for the training set
if nargin > 6
if ~isempty(tstind) && length(tstind) ~= size(x,1)
error('tstind (if provided) has to be of same lenght as x.')
end
else
tstind = [];
end
u = gp.X_u;
m = size(u,1);
K_fu = gp_cov(gp,x,u,predcf); % f x u
K_nu=gp_cov(gp,xt,u,predcf);
K_uu = gp_trcov(gp,u,predcf); % u x u, noiseless covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
kstarstar=gp_trvar(gp,xt,predcf);
if all(tautilde > 0) && ~isequal(gp.latent_opt.optim_method, 'robust-EP')
% From this on evaluate the prediction
% See Snelson and Ghahramani (2007) for details
% p=iLaKfu*(A\(iLaKfu'*mutilde));
p = b';
ntest=size(xt,1);
Eft = K_nu*(K_uu\(K_fu'*p));
% if the prediction is made for training set, evaluate Lav also for prediction points
if ~isempty(tstind)
[Kv_ff, Cv_ff] = gp_trvar(gp, xt(tstind,:), predcf);
Luu = chol(K_uu)';
B=Luu\(K_fu');
Qv_ff=sum(B.^2)';
Lav = Kv_ff-Qv_ff;
Eft(tstind) = Eft(tstind) + Lav.*p;
end
% Compute variance
if nargout > 1
%Varft(i1,1)=kstarstar(i1) - (sum(Knf(i1,:).^2./La') - sum((Knf(i1,:)*L).^2));
Luu = chol(K_uu)';
B=Luu\(K_fu');
B2=Luu\(K_nu');
Varft = kstarstar - sum(B2'.*(B*(repmat(La,1,m).\B')*B2)',2) + sum((K_nu*(K_uu\(K_fu'*L))).^2, 2);
% if the prediction is made for training set, evaluate Lav also for prediction points
if ~isempty(tstind)
Varft(tstind) = Varft(tstind) - 2.*sum( B2(:,tstind)'.*(repmat((La.\Lav),1,m).*B'),2) ...
+ 2.*sum( B2(:,tstind)'*(B*L).*(repmat(Lav,1,m).*L), 2) ...
- Lav./La.*Lav + sum((repmat(Lav,1,m).*L).^2,2);
end
end
else
% Robust-EP
[Eft,V]=pred_var2(tautilde,nutilde,L,K_uu,K_fu,b,K_nu);
Varft=kstarstar-V;
end
% ============================================================
% PIC
% ============================================================
case {'PIC' 'PIC_BLOCK'} % Predictions with PIC sparse approximation for GP
% Calculate some help matrices
u = gp.X_u;
ind = gp.tr_index;
[e, edata, eprior, tautilde, nutilde, L, La, b] = gpep_e(gp_pak(gp), gp, x, y, 'z', z);
K_fu = gp_cov(gp, x, u, predcf); % f x u
K_nu = gp_cov(gp, xt, u, predcf); % n x u
K_uu = gp_trcov(gp, u, predcf); % u x u, noiseles covariance K_uu
% From this on evaluate the prediction
% See Snelson and Ghahramani (2007) for details
% p=iLaKfu*(A\(iLaKfu'*mutilde));
p = b';
iKuuKuf = K_uu\K_fu';
w_bu=zeros(length(xt),length(u));
w_n=zeros(length(xt),1);
for i=1:length(ind)
w_bu(tstind{i},:) = repmat((iKuuKuf(:,ind{i})*p(ind{i},:))', length(tstind{i}),1);
K_nf = gp_cov(gp, xt(tstind{i},:), x(ind{i},:), predcf); % n x u
w_n(tstind{i},:) = K_nf*p(ind{i},:);
end
Eft = K_nu*(iKuuKuf*p) - sum(K_nu.*w_bu,2) + w_n;
% Compute variance
if nargout > 1
kstarstar = gp_trvar(gp, xt, predcf);
KnfL = K_nu*(iKuuKuf*L);
Varft = zeros(length(xt),1);
for i=1:length(ind)
v_n = gp_cov(gp, xt(tstind{i},:), x(ind{i},:), predcf); % n x u
v_bu = K_nu(tstind{i},:)*iKuuKuf(:,ind{i});
KnfLa = K_nu*(iKuuKuf(:,ind{i})/chol(La{i}));
KnfLa(tstind{i},:) = KnfLa(tstind{i},:) - (v_bu + v_n)/chol(La{i});
Varft = Varft + sum((KnfLa).^2,2);
KnfL(tstind{i},:) = KnfL(tstind{i},:) - v_bu*L(ind{i},:) + v_n*L(ind{i},:);
end
Varft = kstarstar - (Varft - sum((KnfL).^2,2));
end
% ============================================================
% CS+FIC
% ============================================================
case 'CS+FIC' % Predictions with CS+FIC sparse approximation for GP
% Here tstind = 1 if the prediction is made for the training set
if nargin > 6
if ~isempty(tstind) && length(tstind) ~= size(x,1)
error('tstind (if provided) has to be of same lenght as x.')
end
else
tstind = [];
end
u = gp.X_u;
m = length(u);
n = size(x,1);
n2 = size(xt,1);
[e, edata, eprior, tautilde, nutilde, L, La, b] = gpep_e(gp_pak(gp), gp, x, y, 'z', z);
% Indexes to all non-compact support and compact support covariances.
cf1 = [];
cf2 = [];
% Indexes to non-CS and CS covariances, which are used for predictions
predcf1 = [];
predcf2 = [];
ncf = length(gp.cf);
% Loop through all covariance functions
for i = 1:ncf
% Non-CS covariances
if ~isfield(gp.cf{i},'cs')
cf1 = [cf1 i];
% If used for prediction
if ~isempty(find(predcf==i))
predcf1 = [predcf1 i];
end
% CS-covariances
else
cf2 = [cf2 i];
% If used for prediction
if ~isempty(find(predcf==i))
predcf2 = [predcf2 i];
end
end
end
if isempty(predcf1) && isempty(predcf2)
predcf1 = cf1;
predcf2 = cf2;
end
% Determine the types of the covariance functions used
% in making the prediction.
if ~isempty(predcf1) && isempty(predcf2) % Only non-CS covariances
ptype = 1;
predcf2 = cf2;
elseif isempty(predcf1) && ~isempty(predcf2) % Only CS covariances
ptype = 2;
predcf1 = cf1;
else % Both non-CS and CS covariances
ptype = 3;
end
K_fu = gp_cov(gp,x,u,predcf1); % f x u
K_uu = gp_trcov(gp,u,predcf1); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
K_nu=gp_cov(gp,xt,u,predcf1);
Kcs_nf = gp_cov(gp, xt, x, predcf2);
p = b';
ntest=size(xt,1);
% Calculate the predictive mean according to the type of
% covariance functions used for making the prediction
if ptype == 1
Eft = K_nu*(K_uu\(K_fu'*p));
elseif ptype == 2
Eft = Kcs_nf*p;
else
Eft = K_nu*(K_uu\(K_fu'*p)) + Kcs_nf*p;
end
% evaluate also Lav if the prediction is made for training set
if ~isempty(tstind)
[Kv_ff, Cv_ff] = gp_trvar(gp, xt(tstind,:), predcf1);
Luu = chol(K_uu)';
B=Luu\(K_fu');
Qv_ff=sum(B.^2)';
Lav = Kv_ff-Qv_ff;
end
% Add also Lav if the prediction is made for training set
% and non-CS covariance function is used for prediction
if ~isempty(tstind) && (ptype == 1 || ptype == 3)
Eft(tstind) = Eft(tstind) + Lav.*p;
end
% Evaluate the variance
if nargout > 1
Knn_v = gp_trvar(gp,xt,predcf);
Luu = chol(K_uu)';
B=Luu\(K_fu');
B2=Luu\(K_nu');
p = amd(La);
iLaKfu = La\K_fu;
% Calculate the predictive variance according to the type
% covariance functions used for making the prediction
if ptype == 1 || ptype == 3
% FIC part of the covariance
Varft = Knn_v - sum(B2'.*(B*(La\B')*B2)',2) + sum((K_nu*(K_uu\(K_fu'*L))).^2, 2);
% Add Lav2 if the prediction is made for the training set
if ~isempty(tstind)
% Non-CS covariance
if ptype == 1
Kcs_nf = sparse(tstind,1:n,Lav,n2,n);
% Non-CS and CS covariances
else
Kcs_nf = Kcs_nf + sparse(tstind,1:n,Lav,n2,n);
end
% Add Lav2 inside Kcs_nf
Varft = Varft - sum((Kcs_nf(:,p)/chol(La(p,p))).^2,2) + sum((Kcs_nf*L).^2, 2) ...
- 2.*sum((Kcs_nf*iLaKfu).*(K_uu\K_nu')',2) + 2.*sum((Kcs_nf*L).*(L'*K_fu*(K_uu\K_nu'))' ,2);
% In case of both non-CS and CS prediction covariances add
% only Kcs_nf if the prediction is not done for the training set
elseif ptype == 3
Varft = Varft - sum((Kcs_nf(:,p)/chol(La(p,p))).^2,2) + sum((Kcs_nf*L).^2, 2) ...
- 2.*sum((Kcs_nf*iLaKfu).*(K_uu\K_nu')',2) + 2.*sum((Kcs_nf*L).*(L'*K_fu*(K_uu\K_nu'))' ,2);
end
% Prediction with only CS covariance
elseif ptype == 2
Varft = Knn_v - sum((Kcs_nf(:,p)/chol(La(p,p))).^2,2) + sum((Kcs_nf*L).^2, 2) ;
end
end
% ============================================================
% DTC/(VAR)
% ============================================================
case {'DTC' 'VAR' 'SOR'} % Predictions with DTC or variational sparse approximation for GP
[e, edata, eprior, tautilde, nutilde, L, La, b] = gpep_e(gp_pak(gp), gp, x, y, 'z', z);
% Here tstind = 1 if the prediction is made for the training set
if nargin > 6
if ~isempty(tstind) && length(tstind) ~= size(x,1)
error('tstind (if provided) has to be of same lenght as x.')
end
else
tstind = [];
end
u = gp.X_u;
m = size(u,1);
K_fu = gp_cov(gp,x,u,predcf); % f x u
K_nu=gp_cov(gp,xt,u,predcf);
K_uu = gp_trcov(gp,u,predcf); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
kstarstar=gp_trvar(gp,xt,predcf);
% From this on evaluate the prediction
p = b';
ntest=size(xt,1);
Eft = K_nu*(K_uu\(K_fu'*p));
% if the prediction is made for training set, evaluate Lav also for prediction points
if ~isempty(tstind)
[Kv_ff, Cv_ff] = gp_trvar(gp, xt(tstind,:), predcf);
Luu = chol(K_uu)';
B=Luu\(K_fu');
Qv_ff=sum(B.^2)';
Lav = Kv_ff-Cv_ff;
Eft(tstind) = Eft(tstind);% + Lav.*p;
end
if nargout > 1
% Compute variances of predictions
%Varft(i1,1)=kstarstar(i1) - (sum(Knf(i1,:).^2./La') - sum((Knf(i1,:)*L).^2));
Luu = chol(K_uu)';
B=Luu\(K_fu');
B2=Luu\(K_nu');
Varft = sum(B2'.*(B*(repmat(La,1,m).\B')*B2)',2) + sum((K_nu*(K_uu\(K_fu'*L))).^2, 2);
switch gp.type
case {'VAR' 'DTC'}
Varft = kstarstar - Varft;
case 'SOR'
Varft = sum(B2.^2,1)' - Varft;
end
end
% ============================================================
% SSGP
% ============================================================
case 'SSGP' % Predictions with sparse spectral sampling approximation for GP
% The approximation is proposed by M. Lazaro-Gredilla, J. Quinonero-Candela and A. Figueiras-Vidal
% in Microsoft Research technical report MSR-TR-2007-152 (November 2007)
% NOTE! This does not work at the moment.
[e, edata, eprior, tautilde, nutilde, L, S, b] = gpep_e(gp_pak(gp), gp, x, y, 'z', z);
%param = varargin{1};
Phi_f = gp_trcov(gp, x);
Phi_a = gp_trcov(gp, xt);
m = size(Phi_f,2);
ntest=size(xt,1);
Eft = Phi_a*(Phi_f'*b');
if nargout > 1
% Compute variances of predictions
%Varft(i1,1)=kstarstar(i1) - (sum(Knf(i1,:).^2./La') - sum((Knf(i1,:)*L).^2));
Varft = sum(Phi_a.^2,2) - sum(Phi_a.*((Phi_f'*(repmat(S,1,m).*Phi_f))*Phi_a')',2) + sum((Phi_a*(Phi_f'*L)).^2,2);
for i1=1:ntest
switch gp.lik.type
case 'Probit'
p1(i1,1)=norm_cdf(Eft(i1,1)/sqrt(1+Varft(i1))); % Probability p(y_new=1)
case 'Poisson'
p1 = NaN;
end
end
end
end
% ============================================================
% Evaluate also the predictive mean and variance of new observation(s)
% ============================================================
if nargout == 3
if isempty(yt)
lpyt=[];
else
lpyt = gp.lik.fh.predy(gp.lik, Eft, Varft, yt, zt);
end
elseif nargout > 3
[lpyt, Eyt, Varyt] = gp.lik.fh.predy(gp.lik, Eft, Varft, yt, zt);
end
end
function [m,S]=pred_var(tau_q,K,A,b)
% helper function for determining
%
% m = A * inv( K+ inv(diag(tau_q)) ) * inv(diag(tau_q)) *b
% S = diag( A * inv( K+ inv(diag(tau_q)) ) * A)
%
% when the site variances tau_q may be negative
%
ii1=find(tau_q>0); n1=length(ii1); W1=sqrt(tau_q(ii1));
ii2=find(tau_q<0); n2=length(ii2); W2=sqrt(abs(tau_q(ii2)));
m=A*b;
b=K*b;
S=zeros(size(A,1),1);
u=0;
U=0;
if ~isempty(ii1)
% Cholesky decomposition for the positive sites
L1=(W1*W1').*K(ii1,ii1);
L1(1:n1+1:end)=L1(1:n1+1:end)+1;
L1=chol(L1);
U = bsxfun(@times,A(:,ii1),W1')/L1;
u = L1'\(W1.*b(ii1));
m = m-U*u;
S = S+sum(U.^2,2);
end
if ~isempty(ii2)
% Cholesky decomposition for the negative sites
V=bsxfun(@times,K(ii2,ii1),W1')/L1;
L2=(W2*W2').*(V*V'-K(ii2,ii2));
L2(1:n2+1:end)=L2(1:n2+1:end)+1;
[L2,pd]=chol(L2);
if pd==0
U = bsxfun(@times,A(:,ii2),W2')/L2 -U*(bsxfun(@times,V,W2)'/L2);
u = L2'\(W2.*b(ii2)) -L2'\(bsxfun(@times,V,W2)*u);
m = m+U*u;
S = S-sum(U.^2,2);
else
fprintf('Posterior covariance is negative definite.\n')
end
end
end
function [m_q,S_q]=pred_var2(tautilde,nutilde,L,K_uu,K_fu,D,K_nu)
% function for determining the parameters of the q-distribution
% when site variances tau_q may be negative
%
% q(f) = N(f|0,K)*exp( -0.5*f'*diag(tau_q)*f + nu_q'*f )/Z_q = N(f|m_q,S_q)
%
% S_q = inv(inv(K)+diag(tau_q)) where K is sparse approximation for prior
% covariance
% m_q = S_q*nu_q;
%
% det(eye(n)+K*diag(tau_q))) = det(L1)^2 * det(L2)^2
% where L1 and L2 are upper triangular
%
% see Expectation consistent approximate inference (Opper & Winther, 2005)
n=length(nutilde);
U = K_fu;
S = 1+tautilde.*D;
B = tautilde./S;
BUiL = bsxfun(@times, B, U)/L';
% iKS = diag(B) - BUiL*BUiL';
Ktnu = D.*nutilde + U*(K_uu\(U'*nutilde));
m_q = nutilde - B.*Ktnu + BUiL*(BUiL'*Ktnu);
kstar = K_nu*(K_uu\K_fu');
m_q = kstar*m_q;
S_q = sum(bsxfun(@times,B',kstar.^2),2) - sum((kstar*BUiL).^2,2);
% S_q = kstar*iKS*kstar';
end
|
github
|
lcnhappe/happe-master
|
gpcf_constant.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_constant.m
| 14,290 |
utf_8
|
a923f90b02c067d618848207c02b8980
|
function gpcf = gpcf_constant(varargin)
%GPCF_CONSTANT Create a constant covariance function
%
% Description
% GPCF = GPCF_CONSTANT('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a squared exponential covariance function structure in
% which the named parameters have the specified values. Any
% unspecified parameters are set to default values.
%
% GPCF = GPCF_CONSTANT(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for constant covariance function [default]
% constSigma2 - magnitude (squared) [0.1]
% constSigma2_prior - prior for constSigma2 [prior_logunif]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, MEAN_*
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Jaakko Riihimaki, Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPCF_CONSTANT';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('constSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('constSigma2_prior',prior_logunif, @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_constant';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_constant')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
% Initialize parameter
if init || ~ismember('constSigma2',ip.UsingDefaults)
gpcf.constSigma2=ip.Results.constSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('constSigma2_prior',ip.UsingDefaults)
gpcf.p.constSigma2=ip.Results.constSigma2_prior;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_constant_pak;
gpcf.fh.unpak = @gpcf_constant_unpak;
gpcf.fh.lp = @gpcf_constant_lp;
gpcf.fh.lpg = @gpcf_constant_lpg;
gpcf.fh.cfg = @gpcf_constant_cfg;
gpcf.fh.ginput = @gpcf_constant_ginput;
gpcf.fh.cov = @gpcf_constant_cov;
gpcf.fh.trcov = @gpcf_constant_trcov;
gpcf.fh.trvar = @gpcf_constant_trvar;
gpcf.fh.recappend = @gpcf_constant_recappend;
end
end
function [w, s] = gpcf_constant_pak(gpcf, w)
%GPCF_CONSTANT_PAK Combine GP covariance function parameters into
% one vector.
%
% Description
% W = GPCF_CONSTANT_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% w = [ log(gpcf.constSigma2)
% (hyperparameters of gpcf.constSigma2)]'
%
% See also
% GPCF_CONSTANT_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.constSigma2)
w = log(gpcf.constSigma2);
s = [s 'log(constant.constSigma2)'];
% Hyperparameters of constSigma2
[wh sh] = gpcf.p.constSigma2.fh.pak(gpcf.p.constSigma2);
w = [w wh];
s = [s sh];
end
end
function [gpcf, w] = gpcf_constant_unpak(gpcf, w)
%GPCF_CONSTANT_UNPAK Sets the covariance function parameters
% into the structure
%
% Description
% [GPCF, W] = GPCF_CONSTANT_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a parameter vector W, and
% returns a covariance function structure identical to the
% input, except that the covariance parameters have been set
% to the values in W. Deletes the values set to GPCF from W
% and returns the modified W. This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.constSigma2)
% (hyperparameters of gpcf.constSigma2)]'
%
% See also
% GPCF_CONSTANT_PAK
gpp=gpcf.p;
if ~isempty(gpp.constSigma2)
gpcf.constSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.constSigma2.fh.unpak(gpcf.p.constSigma2, w);
gpcf.p.constSigma2 = p;
end
end
function lp = gpcf_constant_lp(gpcf)
%GPCF_CONSTANT_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_CONSTANT_LP(GPCF) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_CONSTANT_PAK, GPCF_CONSTANT_UNPAK, GPCF_CONSTANT_LPG, GP_E
% Evaluate the prior contribution to the error. The parameters that
% are sampled are from space W = log(w) where w is all the
% "real" samples. On the other hand errors are evaluated in the
% W-space so we need take into account also the Jacobian of
% transformation W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpp.constSigma2)
lp = gpp.constSigma2.fh.lp(gpcf.constSigma2, gpp.constSigma2) +log(gpcf.constSigma2);
end
end
function lpg = gpcf_constant_lpg(gpcf)
%GPCF_CONSTANT_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_CONSTANT_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_CONSTANT_PAK, GPCF_CONSTANT_UNPAK, GPCF_CONSTANT_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.constSigma2)
lpgs = gpp.constSigma2.fh.lpg(gpcf.constSigma2, gpp.constSigma2);
lpg = [lpg lpgs(1).*gpcf.constSigma2+1 lpgs(2:end)];
end
end
function DKff = gpcf_constant_cfg(gpcf, x, x2, mask, i1)
%GPCF_CONSTANT_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_CONSTANT_CFG(GPCF, X) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradients of covariance matrix
% Kff = k(X,X) with respect to th (cell array with matrix
% elements). This is a mandatory subfunction used in gradient
% computations.
%
% DKff = GPCF_CONSTANT_CFG(GPCF, X, X2) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_CONSTANT_CFG(GPCF, X, [], MASK)
% takes a covariance function structure GPCF, a matrix X of
% input vectors and returns DKff, the diagonal of gradients of
% covariance matrix Kff = k(X,X2) with respect to th (cell
% array with matrix elements). This subfunction is needed when
% using sparse approximations (e.g. FIC).
%
% See also
% GPCF_CONSTANT_PAK, GPCF_CONSTANT_UNPAK, GPCF_CONSTANT_LP, GP_G
[n, m] =size(x);
DKff = {};
if nargin==5
% Use memory save option
if i1==0
% Return number of hyperparameters
if ~isempty(gpcf.p.constSigma2)
DKff=1;
else
DKff=0;
end
return
end
end
% Evaluate: DKff{1} = d Kff / d constSigma2
% DKff{2} = d Kff / d coeffSigma2
% NOTE! Here we have already taken into account that the parameters are transformed
% through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
if ~isempty(gpcf.p.constSigma2)
DKff{1}=ones(n)*gpcf.constSigma2;
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_constant -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
if ~isempty(gpcf.p.constSigma2)
DKff{1}=ones([n size(x2,1)])*gpcf.constSigma2;
end
% Evaluate: DKff{1} = d mask(Kff,I) / d constSigma2
% DKff{2...} = d mask(Kff,I) / d coeffSigma2
elseif nargin == 4 || nargin == 5
if ~isempty(gpcf.p.constSigma2)
DKff{1}=ones(n,1)*gpcf.constSigma2; % d mask(Kff,I) / d constSigma2
end
end
if nargin==5
DKff=DKff{1};
end
end
function DKff = gpcf_constant_ginput(gpcf, x, x2, i1)
%GPCF_CONSTANT_GINPUT Evaluate gradient of covariance function with
% respect to x.
%
% Description
% DKff = GPCF_CONSTANT_GINPUT(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_CONSTANT_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_CONSTANT_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X. This subfunction is needed when using
% memory save option in gp_set.
%
% See also
% GPCF_CONSTANT_PAK, GPCF_CONSTANT_UNPAK, GPCF_CONSTANT_LP, GP_G
[n, m] =size(x);
if nargin==4
% Use memory save option
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
end
if nargin == 2 || isempty(x2)
ii1 = 0;
for i=1:m
for j = 1:n
ii1 = ii1 + 1;
DKff{ii1} = zeros(n);
end
end
elseif nargin == 3 || nargin == 4
%K = feval(gpcf.fh.cov, gpcf, x, x2);
ii1 = 0;
for i=1:m
for j = 1:n
ii1 = ii1 + 1;
DKff{ii1} = zeros(n, size(x2,1));
gprior(ii1) = 0;
end
end
end
if nargin==5
DKff=DKff{1};
end
end
function C = gpcf_constant_cov(gpcf, x1, x2, varargin)
%GP_CONSTANT_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_CONSTANT_COV(GP, TX, X) takes in covariance function
% of a Gaussian process GP and two matrixes TX and X that
% contain input vectors to GP. Returns covariance matrix C.
% Every element ij of C contains covariance between inputs i
% in TX and j in X. This is a mandatory subfunction used for
% example in prediction and energy computations.
%
% See also
% GPCF_CONSTANT_TRCOV, GPCF_CONSTANT_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
if m1~=m2
error('the number of columns of X1 and X2 has to be same')
end
C = ones(n1,n2)*gpcf.constSigma2;
end
function C = gpcf_constant_trcov(gpcf, x)
%GP_CONSTANT_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_CONSTANT_TRCOV(GP, TX) takes in covariance function
% of a Gaussian process GP and matrix TX that contains
% training input vectors. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i and j
% in TX. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_CONSTANT_COV, GPCF_CONSTANT_TRVAR, GP_COV, GP_TRCOV
n =size(x,1);
C = ones(n,n)*gpcf.constSigma2;
end
function C = gpcf_constant_trvar(gpcf, x)
%GP_CONSTANT_TRVAR Evaluate training variance vector
%
% Description
% C = GP_CONSTANT_TRVAR(GPCF, TX) takes in covariance function
% of a Gaussian process GPCF and matrix TX that contains
% training inputs. Returns variance vector C. Every
% element i of C contains variance of input i in TX. This is
% a mandatory subfunction used for example in prediction and
% energy computations.
%
% See also
% GPCF_CONSTANT_COV, GP_COV, GP_TRCOV
n =size(x,1);
C = ones(n,1)*gpcf.constSigma2;
end
function reccf = gpcf_constant_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_CONSTANT_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_constant';
% Initialize parameters
reccf.constSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_constant_pak;
reccf.fh.unpak = @gpcf_constant_unpak;
reccf.fh.lp = @gpcf_constant_lp;
reccf.fh.lpg = @gpcf_constant_lpg;
reccf.fh.cfg = @gpcf_constant_cfg;
reccf.fh.cov = @gpcf_constant_cov;
reccf.fh.trcov = @gpcf_constant_trcov;
reccf.fh.trvar = @gpcf_constant_trvar;
reccf.fh.recappend = @gpcf_constant_recappend;
reccf.p=[];
reccf.p.constSigma2=[];
if ~isempty(ri.p.constSigma2)
reccf.p.constSigma2 = ri.p.constSigma2;
end
else
% Append to the record
gpp = gpcf.p;
% record constSigma2
reccf.constSigma2(ri,:)=gpcf.constSigma2;
if isfield(gpp,'constSigma2') && ~isempty(gpp.constSigma2)
reccf.p.constSigma2 = gpp.constSigma2.fh.recappend(reccf.p.constSigma2, ri, gpcf.p.constSigma2);
end
end
end
|
github
|
lcnhappe/happe-master
|
lik_gaussiansmt.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_gaussiansmt.m
| 11,228 |
utf_8
|
e1313f56ba16306bf8fd48d805556b9d
|
function lik = lik_gaussiansmt(varargin)
%LIK_GAUSSIANSMT Create a Gaussian scale mixture likelihood structure
% with priors producing approximation of the Student's t
%
% Description
% LIK = LIK_GAUSSIANSMT('ndata',N,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a scale mixture noise covariance function structure
% (with priors producing approximation of the Student's t) in
% which the named parameters have the specified values. Any
% unspecified parameters are set to default values. Obligatory
% parameter is 'ndata', which tells the number of data points,
% that is, number of mixture components.
%
% LIK = LIK_GAUSSIANSMT(LIK,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for the Gaussian scale mixture approximation of the
% Student's t
% sigma2 - Variances of the mixture components.
% The default is 1 x ndata vector of 0.1s.
% U - Part of the parameter expansion, see below.
% The default is 1 x ndata vector of 1s.
% tau2 - Part of the parameter expansion, see below.
% The default is 0.1.
% alpha - Part of the parameter expansion, see below.
% The default is 0.5.
% nu - Degrees of freedom. The default is 4.
% nu_prior - Prior for nu. The default is prior_fixed().
% gibbs - Whether Gibbs sampling is 'on' (default) or 'off'.
%
% Parametrisation and non-informative priors for alpha and tau
% are same as in Gelman et. al. (2004) page 304-305:
% y-E[y] ~ N(0, alpha^2 * U),
% where U = diag(u_1, u_2, ..., u_n)
% u_i ~ Inv-Chi^2(nu, tau^2)
%
% The parameters of this likelihood can be inferred only by
% Gibbs sampling by calling GP_MC.
%
% If degrees of freedom nu is given a prior (other than
% prior_fixed), it is sampled using slice sampling within Gibbs
% sampling with limits [0,128].
%
% See also
% GP_SET, PRIOR_*, LIK_*
% Copyright (c) 1998,1999,2010 Aki Vehtari
% Copyright (c) 2007-2010 Jarno Vanhatalo
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_GAUSSIANSMT';
ip.addOptional('lik', [], @isstruct);
ip.addParamValue('ndata',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('sigma2',[], @(x) isvector(x) && all(x>0));
ip.addParamValue('U',[], @isvector);
ip.addParamValue('tau2',0.1, @isscalar);
ip.addParamValue('alpha',0.5, @isscalar);
ip.addParamValue('nu',4, @isscalar);
ip.addParamValue('nu_prior',[], @(x) isstruct(x) || isempty(x));
ip.addParamValue('censored',[], @(x) isstruct);
ip.addParamValue('gibbs','on', @(x) ismember(x,{'on' 'off'}));
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Gaussian-smt';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Gaussian-smt')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('ndata',ip.UsingDefaults)
ndata = ip.Results.ndata;
lik.ndata=ndata;
lik.r = zeros(ndata,1);
end
if isempty(ndata)
error('NDATA has to be defined')
end
if init || ~ismember('sigma2',ip.UsingDefaults)
sigma2=ip.Results.sigma2;
if isempty(sigma2)
lik.sigma2 = repmat(0.1,ndata,1);
else
if (size(sigma2,1) == lik.ndata && size(sigma2,2) == 1)
lik.sigma2 = sigma2;
else
error('The size of sigma2 has to be NDATAx1')
end
end
lik.sigma2 = sigma2;
end
if init || ~ismember('U',ip.UsingDefaults)
U=ip.Results.U;
if isempty(U)
lik.U = ones(ndata,1);
else
if size(U,1) == lik.ndata
lik.U = U;
else
error('the size of U has to be NDATAx1')
end
end
end
if init || ~ismember('tau2',ip.UsingDefaults)
lik.tau2=ip.Results.tau2;
end
if init || ~ismember('alpha',ip.UsingDefaults)
lik.alpha=ip.Results.alpha;
end
if init || ~ismember('nu',ip.UsingDefaults)
lik.nu=ip.Results.nu;
end
if init || ~ismember('censored',ip.UsingDefaults)
censored=ip.Results.censored;
if ~isempty(censored)
lik.censored = censored{1};
yy = censored{2};
if lik.censored(1) >= lik.censored(2)
error('lik_gaussiansmt -> if censored model is used, the limits must be given in increasing order.')
end
imis1 = [];
imis2 = [];
if lik.censored(1) > -inf
imis1 = find(yy<=lik.censored(1));
end
if lik.censored(1) < inf
imis2 = find(yy>=lik.censored(2));
end
lik.cy = yy([imis1 ; imis2])';
lik.imis = [imis1 ; imis2];
end
end
% Initialize prior structure
lik.p=[];
lik.p.sigma=[];
if init || ~ismember('nu_prior',ip.UsingDefaults)
lik.p.nu=ip.Results.nu_prior;
end
% using Gibbs or not
if init || ~ismember('gibbs',ip.UsingDefaults)
lik.gibbs = ip.Results.gibbs;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_gaussiansmt_pak;
lik.fh.unpak = @lik_gaussiansmt_unpak;
lik.fh.lp = @lik_gaussiansmt_lp;
lik.fh.lpg = @lik_gaussiansmt_lpg;
lik.fh.cfg = @lik_gaussiansmt_cfg;
lik.fh.trcov = @lik_gaussiansmt_trcov;
lik.fh.trvar = @lik_gaussiansmt_trvar;
lik.fh.gibbs = @lik_gaussiansmt_gibbs;
lik.fh.recappend = @lik_gaussiansmt_recappend;
end
end
function [w,s] = lik_gaussiansmt_pak(lik)
w = []; s = {};
end
function [lik, w] = lik_gaussiansmt_unpak(lik, w)
end
function lp =lik_gaussiansmt_lp(lik)
lp = 0;
end
function lpg = lik_gaussiansmt_lpg(lik)
lpg = [];
end
function DKff = lik_gaussiansmt_cfg(lik, x, x2)
DKff = [];
end
function C = lik_gaussiansmt_trcov(lik, x)
%LIK_GAUSSIANSMT_TRCOV Evaluate training covariance matrix
% corresponding to Gaussian noise
% Description
% C = LIK_GAUSSIANSMT_TRCOV(GP, TX) takes in covariance function
% of a Gaussian process GP and matrix TX that contains
% training input vectors. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i and j
% in TX. This subfunction is needed only in Gaussian likelihoods.
%
% See also
% LIK_GAUSSIANSMT_COV, LIK_GAUSSIANSMT_TRVAR, GP_COV, GP_TRCOV
[n, m] =size(x);
n1=n+1;
if n ~= lik.ndata
error(['lik_gaussiansmt -> _trvar: The training variance can be evaluated'...
' only for training data. '])
end
C = sparse(1:n, 1:n, lik.sigma2, n, n);
end
function C = lik_gaussiansmt_trvar(lik, x)
%LIK_GAUSSIANSMT_TRVAR Evaluate training variance vector
% corresponding to Gaussian noise
%
% Description
% C = LIK_GAUSSIANSMT_TRVAR(LIK, TX) takes in covariance function
% of a Gaussian process LIK and matrix TX that contains
% training inputs. Returns variance vector C. Every
% element i of C contains variance of input i in TX. This
% subfunction is needed only in Gaussian likelihoods.
%
%
% See also
% LIK_GAUSSIANSMT_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
if n ~= lik.ndata
error(['lik_gaussiansmt -> _trvar: The training variance can be evaluated'...
' only for training data. '])
end
C = lik.sigma2;
end
function [lik, y] = lik_gaussiansmt_gibbs(gp, lik, x, y)
%LIK_GAUSSIANSMT_GIBBS Function for sampling the sigma2's
%
% Description
% Perform Gibbs sampling for the scale mixture variances. This
% function is likelihood specific.
[n,m] = size(x);
% Draw a sample of the mean of y. Its distribution is
% f ~ N(K*inv(C)*y, K - K*inv(C)*K')
switch gp.type
case 'FULL'
sampy = gp_rnd(gp, x, y, x);
case 'FIC'
sampy = gp_rnd(gp, x, y, x, 'tstind', 1:n);
case {'PIC' 'PIC_BLOCK'}
sampy = gp_rnd(gp, x, y, x, 'tstind', gp.tr_index);
end
% Calculate the residual
r = y-sampy;
U = lik.U;
t2 = lik.tau2;
alpha = lik.alpha;
nu = lik.nu;
rss2=alpha.^2.*U;
% Perform the gibbs sampling (Gelman et.al. (2004) page 304-305)
% Notice that 'sinvchi2rand' is parameterized as in Gelman et. al.
U=sinvchi2rand(nu+1, (nu.*t2+(r./alpha).^2)./(nu+1));
shape = n*nu./2; % These are parameters...
invscale = nu.*sum(1./U)./2; % used in Gelman et al
t2=gamrnd(shape, 1./invscale); % Notice! The matlab parameterization is different
alpha2=sinvchi2rand(n,mean(r.^2./U));
rss2=alpha2.*U;
if ~isempty(lik.p.nu)
% Sample nu using Gibbs sampling
pp = lik.p.nu;
opt=struct('nomit',4,'display',0,'method','doubling', ...
'wsize',4,'plimit',5,'unimodal',1,'mmlimits',[0; 128]);
nu=sls(@(nu) (-sum(sinvchi2_lpdf(U,nu,t2))-pp.fh.lp(nu, pp)),nu,opt);
end
lik.sigma2 = rss2;
lik.U = U;
lik.tau2 = t2;
lik.alpha = sqrt(alpha2);
lik.nu = nu;
lik.r = r;
if isfield(lik, 'censored')
imis1 = [];
imis2 = [];
if lik.censored(1) > -inf
imis1 = find(y<=lik.censored(1));
y(imis1)=normrtrand(sampy(imis1),alpha2*U(imis1),lik.censored(1));
end
if lik.censored(1) < inf
imis2 = find(y>=lik.censored(2));
y(imis2)=normltrand(sampy(imis2),alpha2*U(imis2),lik.censored(2));
end
lik.cy = y([imis1 ; imis2]);
end
end
function reccf = lik_gaussiansmt_recappend(reccf, ri, lik)
%RECAPPEND Record append
%
% Description
% RECCF = LIK_GAUSSIANSMT_RECAPPEND(RECCF, RI, LIK)
% takes a likelihood record structure RECCF, record
% index RI and likelihood structure LIK with the
% current MCMC samples of the parameters. Returns
% RECCF which contains all the old samples and the
% current samples from LIK . This subfunction is
% needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'Gaussian-smt';
lik.ndata = [];
% Initialize parameters
reccf.sigma2 = [];
% Set the function handles
reccf.fh.pak = @lik_gaussiansmt_pak;
reccf.fh.unpak = @lik_gaussiansmt_unpak;
reccf.fh.lp = @lik_gaussiansmt_lp;
reccf.fh.lpg = @lik_gaussiansmt_lpg;
reccf.fh.cfg = @lik_gaussiansmt_cfg;
reccf.fh.cov = @lik_gaussiansmt_cov;
reccf.fh.trcov = @lik_gaussiansmt_trcov;
reccf.fh.trvar = @lik_gaussiansmt_trvar;
reccf.fh.gibbs = @lik_gaussiansmt_gibbs;
reccf.fh.recappend = @lik_gaussiansmt_recappend;
else
% Append to the record
reccf.ndata = lik.ndata;
gpp = lik.p;
% record noiseSigma
reccf.sigma2(ri,:)=lik.sigma2;
if ~isempty(lik.nu)
reccf.nu(ri,:)=lik.nu;
reccf.U(ri,:) = lik.U;
reccf.tau2(ri,:) = lik.tau2;
reccf.alpha(ri,:) = lik.alpha;
reccf.r(ri,:) = lik.r;
end
if isfield(lik, 'censored')
reccf.cy(ri,:) = lik.cy';
end
end
end
|
github
|
lcnhappe/happe-master
|
gp_avpredcomp.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gp_avpredcomp.m
| 9,333 |
windows_1250
|
048f8facc21b0b145997ed98f7e991c9
|
function [apcs,apcss]=gp_avpredcomp(gp, x, y, varargin)
%GP_AVPREDCOMP Average predictive comparison for Gaussian process model
%
% Description
% APCS=GP_AVPREDCOMP(GP, X, Y, OPTIONS) Takes a Gaussian process
% structure GP together with a matrix X of training inputs and
% vector Y of training targets, and returns average predictive
% comparison (APC) estimates for each input in a structure APCS.
% APCS contains following fields
% ps - the probability of knowing the sign of the APC
% in the latent outcome for each input variable.
% fs - the samples from the APC in the latent outcome for each
% input variable
% fsa - the samples from the absolute APC in the latent outcome
% for each input variable
% fsrms - the samples from the root mean squared APC in the latent
% outcome for each input variable
% ys - the samples from the APC in the target outcome for each
% input variable
% ysa - the samples from the absolute APC in the target outcome
% for each input variable
% ysrms - the samples from the root mean squared APC in the target
% outcome for each input variable
%
% [APCS,APCSS]=GP_AVPREDCOMP(GP, X, Y, OPTIONS) returns also APCSS
% which contains APCS components for each data point. These can
% be used to form conditional average predictive comparisons (CAPC).
% APCSS contains following fields
% numfs - the samples from the numerator of APC in the latent
% outcome for each input variable
% numfsa - the samples from the numerator of absolute APC in
% the latent outcome for each input variable
% numfsrms - the samples from the numerator of RMS APC in
% the latent outcome for each input variable
% numys - the samples from the numerator of APC in the latent
% outcome for each input variable
% numysa - the samples from the numerator of absolute APC in
% the latent outcome for each input variable
% numysrms - the samples from the numerator of RMS APC in
% the latent outcome for each input variable
% dens - the samples from the denominator of APC in the latent
% outcome for each input variable
% densa - the samples from the denominator of absolute APC in
% the latent outcome for each input variable
% densrms - the samples from the denominator of RMS APC in
% the latent outcome for each input variable
%
% OPTIONS is optional parameter-value pair
% z - optional observed quantity in triplet (x_i,y_i,z_i)
% Some likelihoods may use this. For example, in
% case of Poisson likelihood we have z_i=E_i, that
% is, expected value for ith case.
% nsamp - determines the number of samples used (default=500).
% deltadist - indicator vector telling which component sets
% are handled using the delta distance (0 if x=x',
% and 1 otherwise). Default is found by examining
% the covariance and metric functions used.
%
% See also
% GP_PRED
% Copyright (c) 2011 Jaakko Riihimäki
% Copyright (c) 2011 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GP_AVPREDCOMP';
ip.addRequired('gp',@isstruct);
ip.addRequired('x', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addRequired('y', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addParamValue('z', [], @(x) isreal(x) && all(isfinite(x(:))))
ip.addParamValue('predcf', [], @(x) isempty(x) || ...
isvector(x) && isreal(x) && all(isfinite(x)&x>0))
ip.addParamValue('tstind', [], @(x) isempty(x) || iscell(x) ||...
(isvector(x) && isreal(x) && all(isfinite(x)&x>0)))
ip.addParamValue('nsamp', 500, @(x) isreal(x) && isscalar(x))
ip.addParamValue('deltadist',[], @(x) isvector(x));
ip.parse(gp, x, y, varargin{:});
options=struct();
options.predcf=ip.Results.predcf;
options.tstind=ip.Results.tstind;
z=isempty(ip.Results.z);
if ~isempty(z)
options.z=ip.Results.z;
end
nsamp=ip.Results.nsamp;
deltadist = logical(ip.Results.deltadist);
[n, nin]=size(x);
if isempty(deltadist)
deltadist=false(1,nin);
deltadist(gp_finddeltadist(gp))=true;
end
ps=zeros(1,nin);
fs=zeros(nsamp,nin);
fsa=zeros(nsamp,nin);
fsrms=zeros(nsamp,nin);
if nargout>1
numfs=zeros(n,nsamp,nin);
numfsa=zeros(n,nsamp,nin);
numfsrms=zeros(n,nsamp,nin);
dens=zeros(n,nsamp,nin);
densa=zeros(n,nsamp,nin);
densrms=zeros(n,nsamp,nin);
end
ys=zeros(nsamp,nin);
ysa=zeros(nsamp,nin);
ysrms=zeros(nsamp,nin);
if nargout>1
numys=zeros(n,nsamp,nin);
numysa=zeros(n,nsamp,nin);
numysrms=zeros(n,nsamp,nin);
end
% covariance is used for Mahalanobis weighted distanec
covx=cov(x);
% handle categorical variables
covx(deltadist,:)=0;
covx(:,deltadist)=0;
for i1=find(deltadist)
covx(i1,i1)=1;
end
prevstream=setrandstream();
% loop through the input variables
for k1=1:nin
fprintf('k1=%d\n',k1)
%- Compute the weight matrix based on Mahalanobis distances:
x_=x; x_(:,k1)=[];
covx_=covx; covx_(:,k1)=[]; covx_(k1,:)=[];
deltadist_=deltadist; deltadist_(k1)=[];
% weight matrix:
W=zeros(n);
for i1=1:n
x_diff=zeros(nin-1,n-i1);
x_diff(~deltadist_,:)=bsxfun(@minus,x_(i1,~deltadist_),x_((i1+1):n,~deltadist_))';
x_diff(deltadist_,:)=double(bsxfun(@ne,x_(i1,deltadist_),x_((i1+1):n,deltadist_))');
W(i1,(i1+1):n)=1./(1+sum(x_diff.*(covx_\x_diff)));
end
W=W+W'+eye(n);
seed=round(rand*10e8);
numf=zeros(1,nsamp);
numfa=zeros(1,nsamp);
numfrms=zeros(1,nsamp);
numy=zeros(1,nsamp);
numya=zeros(1,nsamp);
numyrms=zeros(1,nsamp);
den=0;
dena=0;
for i1=1:n
% inputs of interest
ui=x(i1, k1);
ujs=x(:, k1);
% replicate same values for other inputs
xrep=repmat(x(i1,:),n,1); xrep(:,k1)=ujs;
if deltadist(k1)
Udiff=double(ujs~=ui);
else
Udiff=ujs-ui;
end
Udiffa=abs(Udiff);
Usign=sign(Udiff);
% draw random samples from the posterior
setrandstream(seed);
fr = gp_rnd(gp, x, y, xrep, 'nsamp', nsamp, options);
% average change in input
deni=sum(W(:,i1).*Udiff.*Usign);
denai=sum(W(:,i1).*Udiffa);
den=den+deni;
dena=dena+denai;
% average change in latent outcome
b=bsxfun(@minus,fr,fr(i1,:));
numfi=sum(bsxfun(@times,W(:,i1).*Usign,b));
numfai=sum(bsxfun(@times,W(:,i1),abs(b)));
numfrmsi=sum(bsxfun(@times,W(:,i1),b.^2));
numf=numf+numfi;
numfa=numfa+numfai;
numfrms=numfrms+numfrmsi;
if nargout>1
numfs(i1,:,k1)=numfi;
numsa(i1,:,k1)=numfai;
numfsrms(i1,:,k1)=numfrmsi;
dens(i1,:,k1)=deni;
densa(i1,:,k1)=denai;
densrms(i1,:,k1)=denai;
end
% compute latent values through the inverse link function
if isfield(gp.lik.fh, 'invlink')
ilfr = gp.lik.fh.invlink(gp.lik, fr, repmat(z,1,nsamp));
% average change in outcome
b=bsxfun(@minus,ilfr,ilfr(i1,:));
numyi=sum(bsxfun(@times,W(:,i1).*Usign,b));
numyai=sum(bsxfun(@times,W(:,i1),abs(b)));
numyrmsi=sum(bsxfun(@times,W(:,i1),b.^2));
numy=numy+numyi;
numya=numya+numyai;
numyrms=numyrms+numyrmsi;
if nargout>1
numys(i1,:,k1)=numyi;
numysa(i1,:,k1)=numyai;
numysrms(i1,:,k1)=numyrmsi;
end
end
end
% outcome is the latent function
fs(:,k1)=numf./den;
fsa(:,k1)=numfa./dena;
fsrms(:,k1)=sqrt(numfrms./dena);
if isfield(gp.lik.fh, 'invlink')
% outcome is computed through the inverse link function
ys(:,k1)=numy./den;
ysa(:,k1)=numya./dena;
ysrms(:,k1)=sqrt(numyrms./dena);
end
% probability of knowing the sign of the change in
% latent function
ps(1,k1)=mean(numf./den>0);
if ps(1,k1)<0.5
ps(1,k1)=1-ps(1,k1);
end
end
apcs.ps=ps;
apcs.fs=fs;
apcs.fsa=fsa;
apcs.fsrms=fsrms;
if isfield(gp.lik.fh, 'invlink')
apcs.ys=ys;
apcs.ysa=ysa;
apcs.ysrms=ysrms;
end
if nargout>1
apcss.numfs=numfs;
apcss.numfsa=numfsa;
apcss.numfsrms=numfsrms;
apcss.dens=dens;
apcss.densa=densa;
apcss.densrms=densrms;
if isfield(gp.lik.fh, 'invlink')
apcss.numys=numys;
apcss.numysa=numysa;
apcss.numysrms=numysrms;
end
end
setrandstream(prevstream);
end
function deltadist = gp_finddeltadist(cf)
% FINDDELTADIST - Find which covariates are using delta distance
%
deltadist=[];
if ~iscell(cf) && isfield(cf,'cf')
deltadist=union(deltadist,gp_finddeltadist(cf.cf));
else
for cfi=1:numel(cf)
if isfield(cf{cfi},'cf')
deltadist=union(deltadist,gp_finddeltadist(cf{cfi}.cf));
else
if isfield(cf{cfi},'metric')
if isfield(cf{cfi}.metric,'deltadist')
deltadist=union(deltadist,cf{cfi}.metric.deltadist);
end
elseif ismember(cf{cfi}.type,{'gpcf_cat' 'gpcf_mask'}) && ...
isfield(cf{cfi},'selectedVariables')
deltadist=union(deltadist,cf{cfi}.selectedVariables);
end
end
end
end
end
|
github
|
lcnhappe/happe-master
|
lik_t.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_t.m
| 36,265 |
UNKNOWN
|
7c042ee6cb876d45190e93aa46e3e774
|
function lik = lik_t(varargin)
%LIK_T Create a Student-t likelihood structure
%
% Description
% LIK = LIK_T('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates Student-t likelihood structure in which the named
% parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% LIK = LIK_T(LIK,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a likelihood structure with the named parameters
% altered with the specified values.
%
% Parameters for Student-t likelihood [default]
% sigma2 - scale squared [1]
% nu - degrees of freedom [4]
% sigma2_prior - prior for sigma2 [prior_logunif]
% nu_prior - prior for nu [prior_fixed]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The likelihood is defined as follows:
% __ n
% p(y|f, z) = || i=1 C(nu,s2) * (1 + 1/nu * (y_i - f_i)^2/s2 )^(-(nu+1)/2)
%
% where nu is the degrees of freedom, s2 the scale and f_i the
% latent variable defining the mean. C(nu,s2) is constant
% depending on nu and s2.
%
% See also
% GP_SET, LIK_*, PRIOR_*
%
% Copyright (c) 2009-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% Copyright (c) 2011 Pasi Jyl�nki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_T';
ip.addOptional('lik', [], @isstruct);
ip.addParamValue('sigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('sigma2_prior',prior_logunif(), @(x) isstruct(x) || isempty(x));
ip.addParamValue('nu',4, @(x) isscalar(x) && x>0);
ip.addParamValue('nu_prior',prior_fixed, @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Student-t';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Student-t')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('sigma2',ip.UsingDefaults)
lik.sigma2 = ip.Results.sigma2;
end
if init || ~ismember('nu',ip.UsingDefaults)
lik.nu = ip.Results.nu;
end
% Initialize prior structure
if init
lik.p=[];
end
if init || ~ismember('sigma2_prior',ip.UsingDefaults)
lik.p.sigma2=ip.Results.sigma2_prior;
end
if init || ~ismember('nu_prior',ip.UsingDefaults)
lik.p.nu=ip.Results.nu_prior;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_t_pak;
lik.fh.unpak = @lik_t_unpak;
lik.fh.lp = @lik_t_lp;
lik.fh.lpg = @lik_t_lpg;
lik.fh.ll = @lik_t_ll;
lik.fh.llg = @lik_t_llg;
lik.fh.llg2 = @lik_t_llg2;
lik.fh.llg3 = @lik_t_llg3;
lik.fh.tiltedMoments = @lik_t_tiltedMoments;
lik.fh.tiltedMoments2 = @lik_t_tiltedMoments2;
lik.fh.siteDeriv = @lik_t_siteDeriv;
lik.fh.siteDeriv2 = @lik_t_siteDeriv2;
lik.fh.optimizef = @lik_t_optimizef;
lik.fh.upfact = @lik_t_upfact;
lik.fh.invlink = @lik_t_invlink;
lik.fh.predy = @lik_t_predy;
lik.fh.predprcty = @lik_t_predprcty;
lik.fh.recappend = @lik_t_recappend;
end
end
function [w, s] = lik_t_pak(lik)
%LIK_T_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_T_PAK(LIK) takes a likelihood structure LIK and
% combines the parameters into a single row vector W. This
% is a mandatory subfunction used for example in energy and
% gradient computations.
%
% w = [ log(lik.sigma2)
% (hyperparameters of lik.sigma2)
% log(log(lik.nu))
% (hyperparameters of lik.nu)]'
%
% See also
% LIK_T_UNPAK, GP_PAK
w = []; s = {};
if ~isempty(lik.p.sigma2)
w = [w log(lik.sigma2)];
s = [s; 'log(lik.sigma2)'];
[wh sh] = lik.p.sigma2.fh.pak(lik.p.sigma2);
w = [w wh];
s = [s; sh];
end
if ~isempty(lik.p.nu)
w = [w log(log(lik.nu))];
s = [s; 'loglog(lik.nu)'];
[wh sh] = lik.p.nu.fh.pak(lik.p.nu);
w = [w wh];
s = [s; sh];
end
end
function [lik, w] = lik_t_unpak(lik, w)
%LIK_T_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_T_UNPAK(W, LIK) takes a likelihood structure LIK and
% extracts the parameters from the vector W to the LIK
% structure. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(lik.sigma2)
% (hyperparameters of lik.sigma2)
% log(log(lik.nu))
% (hyperparameters of lik.nu)]'
%
% See also
% LIK_T_PAK, GP_UNPAK
if ~isempty(lik.p.sigma2)
lik.sigma2 = exp(w(1));
w = w(2:end);
[p, w] = lik.p.sigma2.fh.unpak(lik.p.sigma2, w);
lik.p.sigma2 = p;
end
if ~isempty(lik.p.nu)
lik.nu = exp(exp(w(1)));
w = w(2:end);
[p, w] = lik.p.nu.fh.unpak(lik.p.nu, w);
lik.p.nu = p;
end
end
function lp = lik_t_lp(lik)
%LIK_T_LP log(prior) of the likelihood parameters
%
% Description
% LP = LIK_T_LP(LIK) takes a likelihood structure LIK and
% returns log(p(th)), where th collects the parameters.
% This subfunction is needed when there are likelihood parameters.
%
% See also
% LIK_T_LLG, LIK_T_LLG3, LIK_T_LLG2, GPLA_E
v = lik.nu;
sigma2 = lik.sigma2;
lp = 0;
if ~isempty(lik.p.sigma2)
lp = lp + lik.p.sigma2.fh.lp(sigma2, lik.p.sigma2) +log(sigma2);
end
if ~isempty(lik.p.nu)
lp = lp + lik.p.nu.fh.lp(lik.nu, lik.p.nu) +log(v) +log(log(v));
end
end
function lpg = lik_t_lpg(lik)
%LIK_T_LPG d log(prior)/dth of the likelihood parameters th
%
% Description
% LPG = LIK_T_LPG(LIK) takes a likelihood structure LIK
% and returns d log(p(th))/dth, where th collects the
% parameters. This subfunction is needed when there are
% likelihood parameters.
%
% See also
% LIK_T_LLG, LIK_T_LLG3, LIK_T_LLG2, GPLA_G
% Evaluate the gradients of log(prior)
v = lik.nu;
sigma2 = lik.sigma2;
lpg = [];
i1 = 0;
if ~isempty(lik.p.sigma2)
i1 = i1+1;
lpg(i1) = lik.p.sigma2.fh.lpg(lik.sigma2, lik.p.sigma2).*sigma2 + 1;
end
if ~isempty(lik.p.nu)
i1 = i1+1;
lpg(i1) = lik.p.nu.fh.lpg(lik.nu, lik.p.nu).*v.*log(v) +log(v) + 1;
end
end
function ll = lik_t_ll(lik, y, f, z)
%LIK_T_LL Log likelihood
%
% Description
% LL = LIK_T_LL(LIK, Y, F) takes a likelihood structure LIK,
% observations Y, and latent values F. Returns the log
% likelihood, log p(y|f,z). This subfunction is needed when
% using Laplace approximation or MCMC for inference with
% non-Gaussian likelihoods. This subfunction is also used in
% information criteria (DIC, WAIC) computations.
%
% See also
% LIK_T_LLG, LIK_T_LLG3, LIK_T_LLG2, GPLA_E
r = y-f;
v = lik.nu;
sigma2 = lik.sigma2;
term = gammaln((v + 1) / 2) - gammaln(v/2) -log(v.*pi.*sigma2)/2;
ll = term + log(1 + (r.^2)./v./sigma2) .* (-(v+1)/2);
ll = sum(ll);
end
function llg = lik_t_llg(lik, y, f, param, z)
%LIK_T_LLG Gradient of the log likelihood
%
% Description
% LOKLIKG = LIK_T_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, observations Y, and latent values F. Returns
% the gradient of log likelihood with respect to PARAM. At the
% moment PARAM can be 'param' or 'latent'. This subfunction is
% needed when using Laplace approximation or MCMC for inference
% with non-Gaussian likelihoods.
%
% See also
% LIK_T_LL, LIK_T_LLG2, LIK_T_LLG3, GPLA_E
r = y-f;
v = lik.nu;
sigma2 = lik.sigma2;
switch param
case 'param'
n = length(y);
i1=0;
if ~isempty(lik.p.sigma2)
i1=i1+1;
% Derivative with respect to sigma2
llg(i1) = -n./sigma2/2 + (v+1)./2.*sum(r.^2./(v.*sigma2.^2+r.^2*sigma2));
% correction for the log transformation
llg(i1) = llg(i1).*sigma2;
end
if ~isempty(lik.p.nu)
i1=i1+1;
% Derivative with respect to nu
llg(i1) = 0.5.* sum(psi((v+1)./2) - psi(v./2) - 1./v - log(1+r.^2./(v.*sigma2)) + (v+1).*r.^2./(v.^2.*sigma2 + v.*r.^2));
% correction for the log transformation
llg(i1) = llg(i1).*v.*log(v);
end
case 'latent'
llg = (v+1).*r ./ (v.*sigma2 + r.^2);
end
end
function llg2 = lik_t_llg2(lik, y, f, param, z)
%LIK_T_LLG2 Second gradients of log likelihood
%
% Description
% LLG2 = LIK_T_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, observations Y, and latent values F. Returns
% the Hessian of log likelihood with respect to PARAM. At the
% moment PARAM can be only 'latent'. LLG2 is a vector with
% diagonal elements of the Hessian matrix (off diagonals are
% zero). This subfunction is needed when using Laplace
% approximation or EP for inference with non-Gaussian likelihoods.
%
% See also
% LIK_T_LL, LIK_T_LLG, LIK_T_LLG3, GPLA_E
r = y-f;
v = lik.nu;
sigma2 = lik.sigma2;
switch param
case 'param'
case 'latent'
% The Hessian d^2 /(dfdf)
llg2 = (v+1).*(r.^2 - v.*sigma2) ./ (v.*sigma2 + r.^2).^2;
case 'latent+param'
% gradient d^2 / (dfds2)
llg2 = -v.*(v+1).*r ./ (v.*sigma2 + r.^2).^2;
% Correction for the log transformation
llg2 = llg2.*sigma2;
if ~isempty(lik.p.nu)
% gradient d^2 / (dfdnu)
llg2(:,2) = r./(v.*sigma2 + r.^2) - sigma2.*(v+1).*r./(v.*sigma2 + r.^2).^2;
% Correction for the log transformation
llg2(:,2) = llg2(:,2).*v.*log(v);
end
end
end
function llg3 = lik_t_llg3(lik, y, f, param, z)
%LIK_T_LLG3 Third gradients of log likelihood (energy)
%
% Description
% LLG3 = LIK_T_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, observations Y and latent values F and
% returns the third gradients of log likelihood with respect
% to PARAM. At the moment PARAM can be only 'latent'. G3 is a
% vector with third gradients. This subfunction is needed when
% using Laplace approximation for inference with non-Gaussian
% likelihoods.
%
% See also
% LIK_T_LL, LIK_T_LLG, LIK_T_LLG2, GPLA_E, GPLA_G
r = y-f;
v = lik.nu;
sigma2 = lik.sigma2;
switch param
case 'param'
case 'latent'
% Return the diagonal of W differentiated with respect to latent values / dfdfdf
llg3 = (v+1).*(2.*r.^3 - 6.*v.*sigma2.*r) ./ (v.*sigma2 + r.^2).^3;
case 'latent2+param'
% Return the diagonal of W differentiated with respect to
% likelihood parameters / dfdfds2
llg3 = (v+1).*v.*( v.*sigma2 - 3.*r.^2) ./ (v.*sigma2 + r.^2).^3;
llg3 = llg3.*sigma2;
if ~isempty(lik.p.nu)
% dfdfdnu
llg3(:,2) = (r.^2-2.*v.*sigma2-sigma2)./(v.*sigma2 + r.^2).^2 - 2.*sigma2.*(r.^2-v.*sigma2).*(v+1)./(v.*sigma2 + r.^2).^3;
llg3(:,2) = llg3(:,2).*v.*log(v);
end
end
end
function [logM_0, m_1, sigm2hati1] = lik_t_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_T_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_T_TILTEDMOMENTS(LIK, Y, I, S2, MYY, Z)
% takes a likelihood structure LIK, incedence counts Y,
% expected counts Z, index I and cavity variance S2 and mean
% MYY. Returns the zeroth moment M_0, mean M_1 and variance
% M_2 of the posterior marginal (see Rasmussen and Williams
% (2006): Gaussian processes for Machine Learning, page 55).
% This subfunction is needed when using EP for inference with
% non-Gaussian likelihoods.
%
% See also
% GPEP_E
zm = @zeroth_moment;
tol = 1e-8;
yy = y(i1);
nu = lik.nu;
sigma2 = lik.sigma2;
% Set the limits for integration and integrate with quad
% -----------------------------------------------------
mean_app = myy_i;
sigm_app = sqrt(sigm2_i);
lambdaconf(1) = mean_app - 8.*sigm_app; lambdaconf(2) = mean_app + 8.*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2) > zm(lambdaconf(1));
test2 = zm((lambdaconf(2)+lambdaconf(1))/2) > zm(lambdaconf(2));
testiter = 1;
if test1 == 0
lambdaconf(1) = lambdaconf(1) - 3*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(1));
if test1 == 0
go=true;
while testiter<10 & go
lambdaconf(1) = lambdaconf(1) - 2*sigm_app;
lambdaconf(2) = lambdaconf(2) - 2*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(1));
test2 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(2));
if test1==1&test2==1
go=false;
end
testiter=testiter+1;
end
end
mean_app = (lambdaconf(2)+lambdaconf(1))/2;
elseif test2 == 0
lambdaconf(2) = lambdaconf(2) + 3*sigm_app;
test2 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(2));
if test2 == 0
go=true;
while testiter<10 & go
lambdaconf(1) = lambdaconf(1) + 2*sigm_app;
lambdaconf(2) = lambdaconf(2) + 2*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(1));
test2 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(2));
if test1==1&test2==1
go=false;
end
testiter=testiter+1;
end
end
mean_app = (lambdaconf(2)+lambdaconf(1))/2;
end
RTOL = 1.e-6;
ATOL = 1.e-10;
% Integrate with quadrature
[m_0, m_1, m_2] = quad_moments(zm,lambdaconf(1), lambdaconf(2), RTOL, ATOL);
sigm2hati1 = m_2 - m_1.^2;
logM_0 = log(m_0);
function integrand = zeroth_moment(f)
r = yy-f;
term = gammaln((nu + 1) / 2) - gammaln(nu/2) -log(nu.*pi.*sigma2)/2;
integrand = exp(term + log(1 + r.^2./nu./sigma2) .* (-(nu+1)/2));
integrand = integrand.*exp(- 0.5 * (f-myy_i).^2./sigm2_i - log(sigm2_i)/2 - log(2*pi)/2); %
end
end
function [g_i] = lik_t_siteDeriv(lik, y, i1, sigm2_i, myy_i, z)
%LIK_T_SITEDERIV Evaluate the expectation of the gradient
% of the log likelihood term with respect
% to the likelihood parameters for EP
%
% Description
% [M_0, M_1, M2] = LIK_T_TILTEDMOMENTS(LIK, Y, I, S2, MYY)
% takes a likelihood structure LIK, observations Y, index I
% and cavity variance S2 and mean MYY. Returns E_f [d log
% p(y_i|f_i) /d a], where a is the likelihood parameter and
% the expectation is over the marginal posterior. This term is
% needed when evaluating the gradients of the marginal
% likelihood estimate Z_EP with respect to the likelihood
% parameters (see Seeger (2008): Expectation propagation for
% exponential families). This subfunction is needed when using
% EP for inference with non-Gaussian likelihoods and there are
% likelihood parameters.
%
% See also
% GPEP_G
zm = @zeroth_moment;
znu = @deriv_nu;
zsigma2 = @deriv_sigma2;
tol = 1e-8;
yy = y(i1);
nu = lik.nu;
sigma2 = lik.sigma2;
% Set the limits for integration and integrate with quad
mean_app = myy_i;
sigm_app = sqrt(sigm2_i);
lambdaconf(1) = mean_app - 6.*sigm_app; lambdaconf(2) = mean_app + 6.*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(1));
test2 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(2));
testiter = 1;
if test1 == 0
lambdaconf(1) = lambdaconf(1) - 3*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(1));
if test1 == 0
go=true;
while testiter<10 & go
lambdaconf(1) = lambdaconf(1) - 2*sigm_app;
lambdaconf(2) = lambdaconf(2) - 2*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(1));
test2 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(2));
if test1==1&test2==1
go=false;
end
testiter=testiter+1;
end
end
mean_app = (lambdaconf(2)+lambdaconf(1))/2;
elseif test2 == 0
lambdaconf(2) = lambdaconf(2) + 3*sigm_app;
test2 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(2));
if test2 == 0
go=true;
while testiter<10 & go
lambdaconf(1) = lambdaconf(1) + 2*sigm_app;
lambdaconf(2) = lambdaconf(2) + 2*sigm_app;
test1 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(1));
test2 = zm((lambdaconf(2)+lambdaconf(1))/2)>zm(lambdaconf(2));
if test1==1&test2==1
go=false;
end
testiter=testiter+1;
end
end
mean_app = (lambdaconf(2)+lambdaconf(1))/2;
end
% Integrate with quad
[m_0, fhncnt] = quadgk(zm, lambdaconf(1), lambdaconf(2));
% t=linspace(lambdaconf(1),lambdaconf(2),100);
% plot(t,zm(t))
% keyboard
[g_i(1), fhncnt] = quadgk( @(f) zsigma2(f).*zm(f) , lambdaconf(1), lambdaconf(2));
g_i(1) = g_i(1)/m_0*sigma2;
if ~isempty(lik.p.nu)
[g_i(2), fhncnt] = quadgk(@(f) znu(f).*zm(f) , lambdaconf(1), lambdaconf(2));
g_i(2) = g_i(2)/m_0.*nu.*log(nu);
end
function integrand = zeroth_moment(f)
r = yy-f;
term = gammaln((nu + 1) / 2) - gammaln(nu/2) -log(nu.*pi.*sigma2)/2;
integrand = exp(term + log(1 + r.^2./nu./sigma2) .* (-(nu+1)/2));
integrand = integrand.*exp(- 0.5 * (f-myy_i).^2./sigm2_i - log(sigm2_i)/2 - log(2*pi)/2);
end
function g = deriv_nu(f)
r = yy-f;
temp = 1 + r.^2./nu./sigma2;
g = psi((nu+1)/2)./2 - psi(nu/2)./2 - 1./(2.*nu) - log(temp)./2 + (nu+1)./(2.*temp).*(r./nu).^2./sigma2;
end
function g = deriv_sigma2(f)
r = yy-f;
g = -1/sigma2/2 + (nu+1)./2.*r.^2./(nu.*sigma2.^2 + r.^2.*sigma2);
end
end
function [lnZhat, muhat, sigm2hat] = lik_t_tiltedMoments2(likelih, y, yi, sigm2_i, myy_i, z, eta)
%LIKELIH_T_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIKELIH_T_TILTEDMOMENTS(LIKELIH, Y, I, S2, MYY, Z)
% takes a likelihood data structure LIKELIH, incedence counts Y,
% expected counts Z, index I and cavity variance S2 and mean
% MYY. Returns the zeroth moment M_0, mean M_1 and variance M_2
% of the posterior marginal (see Rasmussen and Williams (2006):
% Gaussian processes for Machine Learning, page 55). This subfunction
% is needed when using robust-EP for inference with non-Gaussian
% likelihoods.
%
% See also
% GPEP_E
if nargin<7
eta=1;
end
yy = y(yi);
nu = likelih.nu;
sigma2 = likelih.sigma2;
sigma = sqrt(sigma2);
nuprime = eta*nu+eta-1;
a=nuprime/2; %a=nu/2;
u=linspace(log(1e-8),5,200);
du=u(2)-u(1);
lnpu=(a-1)*u -a*exp(u)+u;
% sigma2 t-likelihood parameter, scale squared
% sigm2_i cavity variance
% myy_i cavity mean
sigma2prime = sigma2*nu/nuprime;
Vu = sigm2_i + (sigma2prime)./exp(u);
lnZu = 0.5*(-log(2*pi*Vu)) -0.5 * (yy-myy_i)^2 ./Vu;
lnZt = eta*gammaln((nu+1)/2) - eta/2*log(nu*pi*sigma2) - eta*gammaln(nu/2) - gammaln((nuprime+1)/2) + 0.5*log(nuprime*pi*sigma2prime) + gammaln(nuprime/2);
ptu=exp(lnpu+lnZu+lnZt);
Z_0=sum(ptu)*du;
lnZhat=log(Z_0) + a*log(a)-gammaln(a);
Vtu=1./(1/sigm2_i +(1/sigma2prime)*exp(u));
mtu=Vtu.*(myy_i/sigm2_i + (yy/sigma2prime)*exp(u));
muhat=sum(mtu.*ptu)*du/Z_0;
sigm2hat=sum((Vtu+mtu.^2).*ptu)*du/Z_0-muhat^2;
% limiting distribution (nu -> infinity)
% Vg=1/(1/sigm2_i +eta/sigma2);
% mg=Vg*(myy_i/sigm2_i +yy*eta/sigma2);
% sigm_i=sqrt(sigm2_i);
% sg=sqrt(Vg);
%
% % set integration limits and scaling
% nu_lim=1e10;
% if nu<nu_lim
%
% if sqrt(sigma2/sigm2_i)<0.05
% % set the integration limits when the likelihood is very narrow
%
% % grid resolution
% dd=10;
% df = [12*sigm_i/100 2*dd*sigma/100];
%
% if yy>=myy_i
% % grid break points
% bp=[min(myy_i-6*sigm_i,yy-dd*sigma) myy_i-6*sigm_i, ...
% min(myy_i+6*sigm_i,yy-dd*sigma), yy-dd*sigma, yy+dd*sigma,...
% max(myy_i+6*sigm_i,yy+dd*sigma)];
%
% % grid values
% a=1e-6;
% fvec =[ bp(1):df(2):bp(2)-a, bp(2):df(1):bp(3)-a, bp(3):max(df):bp(4)-a, ...
% bp(4):df(2):bp(5)-a, bp(5):df(1):bp(6)];
% else
% % grid break points
% bp=[min(myy_i-6*sigm_i,yy-dd*sigma), yy-dd*sigma, yy+dd*sigma,...
% max(myy_i-6*sigm_i,yy+dd*sigma), myy_i+6*sigm_i, ...
% max(myy_i+6*sigm_i,yy+dd*sigma)];
%
% % grid values
% a=1e-6;
% fvec =[ bp(1):df(1):bp(2)-a, bp(2):df(2):bp(3)-a, bp(3):max(df):bp(4)-a, ...
% bp(4):df(1):bp(5)-a, bp(5):df(2):bp(6)];
% end
%
% np=numel(fvec);
% logpt = lpt(fvec,0);
% lpt_max = max([logpt lpt([myy_i mg],0)]);
% lambdaconf=[fvec(1), fvec(end)];
% for i1=2:np-1
% if logpt(i1) < lpt_max+log(1e-7) %(exp(logpt(i1))/exp(lpt_max) < 1e-7)
% lambdaconf(1) = fvec(i1);
% else
% break;
% end
% end
% for i1=1:np-2
% if logpt(end-i1) < lpt_max+log(1e-7) %(exp(logpt(end-i1))/exp(lpt_max) < 1e-7)
% lambdaconf(2) = fvec(end-i1);
% else
% break;
% end
% end
% else
% % set the integration limits in easier cases
% np=20;
% if mg>myy_i
% lambdaconf=[myy_i-6*sigm_i,max(mg+6*sg,myy_i+6*sigm_i)];
% fvec=linspace(myy_i,mg,np);
% else
% lambdaconf=[min(mg-6*sg,myy_i-6*sigm_i),myy_i+6*sigm_i];
% fvec=linspace(mg,myy_i,np);
% end
% lpt_max=max(lpt(fvec,0));
% end
% C=log(1)-lpt_max; % scale the log-density for the quadrature tolerance
% else
% lambdaconf=[mg-6*sg,mg+6*sg];
% C=log(1)-lpt(mg,0);
% end
%
% if nu>nu_lim
% % the limiting Gaussian case
% Vz=sigm2_i+sigma2/eta;
% lnZhat = 0.5*(-log(eta) +(1-eta)*log(2*pi*sigma2) -log(2*pi*Vz)) -(0.5/Vz)*(yy-myy_i)^2;
% muhat = mg;
% sigm2hat = Vg;
% else
% % Integrate with quadrature
% RTOL = 1.e-6;
% ATOL = 1.e-7;
% tic
% [m_0, m_1, m_2] = quad_moments(@(f) exp(lpt(f,C)),lambdaconf(1), lambdaconf(2), RTOL, ATOL);toc
% muhat = m_1;
% sigm2hat = m_2 - m_1.^2;
% lnZhat = log(m_0) -C;
% end
function lpdf = lpt(f,C)
% logarithm of the tilted distribution
r = yy-f;
lpdf = gammaln((nu + 1) / 2) - gammaln(nu/2) -log(nu.*pi.*sigma2)/2;
lpdf = lpdf + log(1 + r.^2./nu./sigma2) .* (-(nu+1)/2);
lpdf = lpdf*eta - (0.5/sigm2_i) * (f-myy_i).^2 + (C-log(2*pi*sigm2_i)/2);
end
end
function [g_i] = lik_t_siteDeriv2(likelih, y, yi, sigm2_i, myy_i, z, eta, lnZhat)
%LIKELIH_T_SITEDERIV Evaluate the expectation of the gradient
% of the log likelihood term with respect
% to the likelihood parameters for EP
%
% Description
% [M_0, M_1, M2] = LIKELIH_T_TILTEDMOMENTS(LIKELIH, Y, I, S2, MYY)
% takes a likelihood data structure LIKELIH, observations Y, index I
% and cavity variance S2 and mean MYY. Returns E_f [d log
% p(y_i|f_i) /d a], where a is the likelihood parameter and the
% expectation is over the marginal posterior. This term is
% needed when evaluating the gradients of the marginal
% likelihood estimate Z_EP with respect to the likelihood
% parameters (see Seeger (2008): Expectation propagation for
% exponential families). This subfunction is needed when using
% robust-EP for inference with non-Gaussian likelihoods and there
% are likelihood parameters.
%
% See also
% GPEP_G
if nargin<7
eta=1;
end
yy = y(yi);
nu = likelih.nu;
sigma2 = likelih.sigma2;
sigma = sqrt(sigma2);
% limiting distribution (nu -> infinity)
Vg=1/(1/sigm2_i +eta/sigma2);
mg=Vg*(myy_i/sigm2_i +yy*eta/sigma2);
sigm_i=sqrt(sigm2_i);
sg=sqrt(Vg);
% set integration limits and scaling
nu_lim=1e10;
if nu<nu_lim
if sqrt(sigma2/sigm2_i)<0.05
% set the integration limits when the likelihood is very narrow
% grid resolution
dd=10;
df = [12*sigm_i/100 2*dd*sigma/100];
if yy>=myy_i
% grid break points
bp=[min(myy_i-6*sigm_i,yy-dd*sigma) myy_i-6*sigm_i, ...
min(myy_i+6*sigm_i,yy-dd*sigma), yy-dd*sigma, yy+dd*sigma,...
max(myy_i+6*sigm_i,yy+dd*sigma)];
% grid values
a=1e-6;
fvec =[ bp(1):df(2):bp(2)-a, bp(2):df(1):bp(3)-a, bp(3):max(df):bp(4)-a, ...
bp(4):df(2):bp(5)-a, bp(5):df(1):bp(6)];
else
% grid break points
bp=[min(myy_i-6*sigm_i,yy-dd*sigma), yy-dd*sigma, yy+dd*sigma,...
max(myy_i-6*sigm_i,yy+dd*sigma), myy_i+6*sigm_i, ...
max(myy_i+6*sigm_i,yy+dd*sigma)];
% grid values
a=1e-6;
fvec =[ bp(1):df(1):bp(2)-a, bp(2):df(2):bp(3)-a, bp(3):max(df):bp(4)-a, ...
bp(4):df(1):bp(5)-a, bp(5):df(2):bp(6)];
end
np=numel(fvec);
logpt = lpt(fvec,0);
lpt_max = max([logpt lpt([myy_i mg],0)]);
lambdaconf=[fvec(1), fvec(end)];
for i1=2:np-1
if logpt(i1) < lpt_max+log(1e-7) %(exp(logpt(i1))/exp(lpt_max) < 1e-7)
lambdaconf(1) = fvec(i1);
else
break;
end
end
for i1=1:np-2
if logpt(end-i1) < lpt_max+log(1e-7) %(exp(logpt(end-i1))/exp(lpt_max) < 1e-7)
lambdaconf(2) = fvec(end-i1);
else
break;
end
end
else
% set the integration limits in easier cases
np=20;
if mg>myy_i
lambdaconf=[myy_i-6*sigm_i,max(mg+6*sg,myy_i+6*sigm_i)];
fvec=linspace(myy_i,mg,np);
else
lambdaconf=[min(mg-6*sg,myy_i-6*sigm_i),myy_i+6*sigm_i];
fvec=linspace(mg,myy_i,np);
end
lpt_max=max(lpt(fvec,0));
end
C=log(1)-lpt_max; % scale the log-density for the quadrature tolerance
else
lambdaconf=[mg-6*sg,mg+6*sg];
C=log(1)-lpt(mg,0);
end
if nu>nu_lim
% the limiting normal observation model
Vz=sigm2_i+sigma2/eta;
g_i(1) = 0.5*( (1-eta)/sigma2 -1/Vz/eta + (yy-myy_i)^2 /Vz^2 /eta ) *sigma2/eta;
if (isfield(likelih,'p') && ~isempty(likelih.p.nu))
g_i(2) = 0;
end
else
% Integrate with quadrature
RTOL = 1.e-6;
ATOL = 1e-7;
% Integrate with quad
%zm=@(f) exp(lpt(f,C));
%[m_0, fhncnt] = quadgk(zm, lambdaconf(1), lambdaconf(2),'AbsTol',ATOL,'RelTol',RTOL)
% Use the normalization determined in the lik_t_tiltedMoments2
m_0=exp(lnZhat+C);
zm=@(f) deriv_sigma2(f).*exp(lpt(f,C))*sigma2;
[g_i(1), fhncnt] = quadgk( zm, lambdaconf(1), lambdaconf(2),'AbsTol',ATOL,'RelTol',RTOL);
g_i(1) = g_i(1)/m_0;
if (isfield(likelih,'p') && ~isempty(likelih.p.nu))
zm=@(f) deriv_nu(f).*exp(lpt(f,C));
[g_i(2), fhncnt] = quadgk( zm, lambdaconf(1), lambdaconf(2),'AbsTol',ATOL,'RelTol',RTOL);
g_i(2) = g_i(2)/m_0.*nu.*log(nu);
end
end
function lpdf = lpt(f,C)
% logarithm of the tilted distribution
r = yy-f;
lpdf = gammaln((nu + 1) / 2) - gammaln(nu/2) -log(nu.*pi.*sigma2)/2;
lpdf = lpdf + log(1 + r.^2./nu./sigma2) .* (-(nu+1)/2);
lpdf = lpdf*eta - (0.5/sigm2_i) * (f-myy_i).^2 + (C-log(2*pi*sigm2_i)/2);
end
function g = deriv_nu(f)
% derivative of the log-likelihood wrt nu
r = yy-f;
temp = r.^2 ./(nu*sigma2);
g = psi((nu+1)/2) - psi(nu/2) - 1/nu;
g = g + (1+1/nu).*temp./(1+temp);
% for small values use a more accurate method for log(1+x)
ii = temp<1e3;
g(ii) = g(ii) - log1p(temp(ii));
g(~ii) = g(~ii) - log(1+temp(~ii));
g = g*0.5;
end
function g = deriv_sigma2(f)
% derivative of the log-likelihood wrt sigma2
r = yy-f;
temp = r.^2 /sigma2;
g = -1/sigma2/2 + ((1+1/nu)/2) * temp ./ (1 + temp/nu) /sigma2;
end
end
function [f, a] = lik_t_optimizef(gp, y, K, Lav, K_fu)
%LIK_T_OPTIMIZEF function to optimize the latent variables
% with EM algorithm
%
% Description:
% [F, A] = LIK_T_OPTIMIZEF(GP, Y, K, Lav, K_fu) Takes Gaussian
% process structure GP, observations Y and the covariance
% matrix K. Solves the posterior mode of F using EM algorithm
% and evaluates A = (K + W)\Y as a sideproduct. Lav and K_fu
% are needed for sparse approximations. For details, see
% Vanhatalo, Jyl�nki and Vehtari (2009): Gaussian process
% regression with Student-t likelihood. This subfunction is
% needed when using lik_specific optimization method for mode
% finding in Laplace algorithm.
%
iter = 1;
sigma2 = gp.lik.sigma2;
% if sigma2==0
% f=NaN;a=NaN;
% return
% end
nu = gp.lik.nu;
n = length(y);
switch gp.type
case 'FULL'
iV = ones(n,1)./sigma2;
siV = sqrt(iV);
B = eye(n) + siV*siV'.*K;
[L,notpositivedefinite] = chol(B);
if notpositivedefinite
f=NaN;a=NaN;
return
end
B=B';
b = iV.*y;
a = b - siV.*(L'\(L\(siV.*(K*b))));
f = K*a;
while iter < 200
fold = f;
iV = (nu+1) ./ (nu.*sigma2 + (y-f).^2);
siV = sqrt(iV);
B = eye(n) + siV*siV'.*K;
L = chol(B)';
b = iV.*y;
ws=warning('off','MATLAB:nearlySingularMatrix');
a = b - siV.*(L'\(L\(siV.*(K*b))));
warning(ws);
f = K*a;
if max(abs(f-fold)) < 1e-8
break
end
iter = iter + 1;
end
case 'FIC'
K_uu = K;
Luu = chol(K_uu)';
B=Luu\(K_fu'); % u x f
K = diag(Lav) + B'*B;
iV = ones(n,1)./sigma2;
siV = sqrt(iV);
B = eye(n) + siV*siV'.*K;
L = chol(B)';
b = iV.*y;
a = b - siV.*(L'\(L\(siV.*(K*b))));
f = K*a;
while iter < 200
fold = f;
iV = (nu+1) ./ (nu.*sigma2 + (y-f).^2);
siV = sqrt(iV);
B = eye(n) + siV*siV'.*K;
L = chol(B)';
b = iV.*y;
a = b - siV.*(L'\(L\(siV.*(K*b))));
f = K*a;
if max(abs(f-fold)) < 1e-8
break
end
iter = iter + 1;
end
end
end
function upfact = lik_t_upfact(gp, y, mu, ll, z)
nu = gp.lik.nu;
sigma = sqrt(gp.lik.sigma2);
sll = sqrt(ll);
fh_e = @(f) t_pdf(f, nu, y, sigma).*norm_pdf(f, mu, sll);
EE = quadgk(fh_e, -40, 40);
fm = @(f) f.*t_pdf(f, nu, y, sigma).*norm_pdf(f, mu, sll)./EE;
mm = quadgk(fm, -40, 40);
fV = @(f) (f - mm).^2.*t_pdf(f, nu, y, sigma).*norm_pdf(f, mu, sll)./EE;
Varp = quadgk(fV, -40, 40);
upfact = -(Varp - ll)./ll^2;
end
function [lpy, Ey, Vary] = lik_t_predy(lik, Ef, Varf, y, z)
%LIK_T_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_T_PREDY(LIK, EF, VARF YT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the observations YT. This subfunction is
% needed when computing posterior preditive distributions for
% future observations.
%
% [LPY, EY, VARY] = LIK_T_PREDY(LIK, EF, VARF) takes a likelihood
% structure LIK, posterior mean EF and posterior Variance
% VARF of the latent variable and returns the posterior
% predictive mean EY and variance VARY of the observations
% related to the latent variables. This subfunction is needed when
% computing posterior preditive distributions for future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
nu = lik.nu;
sigma2 = lik.sigma2;
sigma = sqrt(sigma2);
Ey = zeros(size(Ef));
EVary = zeros(size(Ef));
VarEy = zeros(size(Ef));
lpy = zeros(size(Ef));
if nargout > 1
% for i1=1:length(Ef)
% %%% With quadrature
% ci = sqrt(Varf(i1));
%
% F = @(x) x.*norm_pdf(x,Ef(i1),sqrt(Varf(i1)));
% Ey(i1) = quadgk(F,Ef(i1)-6*ci,Ef(i1)+6*ci);
%
% F2 = @(x) (nu./(nu-2).*sigma2).*norm_pdf(x,Ef(i1),sqrt(Varf(i1)));
% EVary(i1) = quadgk(F2,Ef(i1)-6*ci,Ef(i1)+6*ci);
%
% F3 = @(x) x.^2.*norm_pdf(x,Ef(i1),sqrt(Varf(i1)));
% VarEy(i1) = quadgk(F3,Ef(i1)-6*ci,Ef(i1)+6*ci) - Ey(i1).^2;
% end
% Vary = EVary + VarEy;
Ey = Ef;
if nu>2
Vary=nu./(nu-2).*sigma2 +Varf;
else
warning('Variance of Student''s t-distribution is not defined for nu<=2')
Vary=NaN+Varf;
end
end
lpy = zeros(length(y),1);
for i2 = 1:length(y)
mean_app = Ef(i2);
sigm_app = sqrt(Varf(i2));
pd = @(f) t_pdf(y(i2), nu, f, sigma).*norm_pdf(f,Ef(i2),sqrt(Varf(i2)));
lpy(i2) = log(quadgk(pd, mean_app - 12*sigm_app, mean_app + 12*sigm_app));
end
end
function prctys = lik_t_predprcty(lik, Ef, Varf, zt, prcty)
%LIK_T_PREDPRCTY Returns the percentiles of predictive density of y
%
% Description
% PRCTY = LIK_T_PREDPRCTY(LIK, EF, VARF YT, ZT)
% Returns percentiles of the predictive density PY of YT. This
% subfunction is needed when using function gp_predprcty.
%
% See also
% GP_PREDPCTY
opt=optimset('TolX',1e-5,'Display','off');
nt=size(Ef,1);
prctys = zeros(nt,numel(prcty));
prcty=prcty/100;
nu = lik.nu;
nu_p=max(2.5,nu);
sigma2 = lik.sigma2;
Vary=nu_p./(nu_p-2).*sigma2 +Varf;
for i1=1:nt
ci = sqrt(Varf(i1));
for i2=1:numel(prcty)
minf=sqrt(Vary(i1))*tinv(prcty(i2),nu)+(Ef(i1)-2.5*sqrt(Vary(i1)));
maxf=sqrt(Vary(i1))*tinv(prcty(i2),nu)+(Ef(i1)+2.5*sqrt(Vary(i1)));
a=(fminbnd(@(a) (quadgk(@(f) tcdf((a-f)/sqrt(Vary(i1)),nu).*norm_pdf(f,Ef(i1),ci),Ef(i1)-6*ci,Ef(i1)+6*ci,'AbsTol',1e-4)-prcty(i2)).^2,minf,maxf,opt));
% a=(fminbnd(@(a) (quadgk(@(f) quadgk(@(y) t_pdf(y,nu,Ef(i1),sqrt(Vary(i1))),Ef(i1)-12*sqrt(Vary(i1)),a).*norm_pdf(f,Ef(i1),ci),Ef(i1)-6*ci,Ef(i1)+6*ci,'AbsTol',1e-4)-prcty(i2)).^2,minf,maxf,opt));
prctys(i1,i2)=a;
close all;
end
end
end
function mu = lik_t_invlink(lik, f, z)
%LIK_T_INVLINK Returns values of inverse link function
%
% Description
% P = LIK_T_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values MU of inverse link function.
% This subfunction is needed when using gp_predprctmu.
%
% See also
% LIK_T_LL, LIK_T_PREDY
mu = f;
end
function reclik = lik_t_recappend(reclik, ri, lik)
%RECAPPEND Record append
% Description
% RECCF = GPCF_SEXP_RECAPPEND(RECCF, RI, GPCF) takes old
% covariance function record RECCF, record index RI, RECAPPEND
% returns a structure RECCF. This subfunction is needed when
% using MCMC sampling (gp_mc).
if nargin == 2
% Initialize the record
reclik.type = 'Student-t';
% Initialize parameters
reclik.nu = [];
reclik.sigma2 = [];
% Set the function handles
reclik.fh.pak = @lik_t_pak;
reclik.fh.unpak = @lik_t_unpak;
reclik.fh.lp = @lik_t_lp;
reclik.fh.lpg = @lik_t_lpg;
reclik.fh.ll = @lik_t_ll;
reclik.fh.llg = @lik_t_llg;
reclik.fh.llg2 = @lik_t_llg2;
reclik.fh.llg3 = @lik_t_llg3;
reclik.fh.tiltedMoments = @lik_t_tiltedMoments;
reclik.fh.tiltedMoments2 = @lik_t_tiltedMoments2;
reclik.fh.siteDeriv = @lik_t_siteDeriv;
reclik.fh.siteDeriv2 = @lik_t_siteDeriv2;
reclik.fh.optimizef = @lik_t_optimizef;
reclik.fh.upfact = @lik_t_upfact;
reclik.fh.invlink = @lik_t_invlink;
reclik.fh.predy = @lik_t_predy;
reclik.fh.predprcty = @lik_t_predprcty;
reclik.fh.recappend = @lik_t_recappend;
reclik.p.nu=[];
if ~isempty(ri.p.nu)
reclik.p.nu = ri.p.nu;
end
reclik.p.sigma2=[];
if ~isempty(ri.p.sigma2)
reclik.p.sigma2 = ri.p.sigma2;
end
else
% Append to the record
likp = lik.p;
% record sigma2
reclik.sigma2(ri,:) = lik.sigma2;
if isfield(likp,'sigma2') && ~isempty(likp.sigma2)
reclik.p.sigma2 = likp.sigma2.fh.recappend(reclik.p.sigma2, ri, likp.sigma2);
end
% record nu
reclik.nu(ri,:) = lik.nu;
if isfield(likp,'nu') && ~isempty(likp.nu)
reclik.p.nu = likp.nu.fh.recappend(reclik.p.nu, ri, likp.nu);
end
end
end
|
github
|
lcnhappe/happe-master
|
gpcf_rq.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_rq.m
| 30,341 |
utf_8
|
c746eccbc618b9d29506ad32406416e9
|
function gpcf = gpcf_rq(varargin)
%GPCF_RQ Create a rational quadratic covariance function
%
% Description
% GPCF = GPCF_RQ('PARAM1',VALUE1,'PARAM2,VALUE2,...) creates
% rational quadratic covariance function structure in which the
% named parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% GPCF = GPCF_RQ(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for rational quadratic covariance function [default]
% magnSigma2 - magnitude (squared) [0.1]
% lengthScale - length scale for each input. [1]
% This can be either scalar corresponding
% to an isotropic function or vector
% defining own length-scale for each input
% direction.
% alpha - shape parameter [20]
% magnSigma2_prior - prior for magnSigma2 [prior_logunif]
% lengthScale_prior - prior for lengthScale [prior_t]
% alpha_prior - prior for alpha [prior_unif]
% metric - metric structure used by the covariance function []
% selectedVariables - vector defining which inputs are used [all]
% selectedVariables is shorthand for using
% metric_euclidean with corresponding components
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, METRIC_*
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Tuomas Nikoskinen, Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if nargin>0 && ischar(varargin{1}) && ismember(varargin{1},{'init' 'set'})
% remove init and set
varargin(1)=[];
end
ip=inputParser;
ip.FunctionName = 'GPCF_RQ';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('magnSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('lengthScale',1, @(x) isvector(x) && all(x>0));
ip.addParamValue('alpha',20, @(x) isscalar(x) && x>0);
ip.addParamValue('metric',[], @isstruct);
ip.addParamValue('magnSigma2_prior', prior_logunif(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('lengthScale_prior',prior_t(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('alpha_prior', prior_unif(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isempty(x) || ...
(isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_rq';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_rq')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('lengthScale',ip.UsingDefaults)
gpcf.lengthScale = ip.Results.lengthScale;
end
if init || ~ismember('magnSigma2',ip.UsingDefaults)
gpcf.magnSigma2 = ip.Results.magnSigma2;
end
if init || ~ismember('alpha',ip.UsingDefaults)
gpcf.alpha = ip.Results.alpha;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.p.lengthScale=ip.Results.lengthScale_prior;
end
if init || ~ismember('magnSigma2_prior',ip.UsingDefaults)
gpcf.p.magnSigma2=ip.Results.magnSigma2_prior;
end
if init || ~ismember('alpha_prior',ip.UsingDefaults)
gpcf.p.alpha=ip.Results.alpha_prior;
end
%Initialize metric
if ~ismember('metric',ip.UsingDefaults)
if ~isempty(ip.Results.metric)
gpcf.metric = ip.Results.metric;
gpcf = rmfield(gpcf, 'lengthScale');
gpcf.p = rmfield(gpcf.p, 'lengthScale');
elseif isfield(gpcf,'metric')
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
% selectedVariables options implemented using metric_euclidean
if ~ismember('selectedVariables',ip.UsingDefaults)
if ~isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.selectedVariables = ip.Results.selectedVariables;
% gpcf.metric=metric_euclidean('components',...
% num2cell(ip.Results.selectedVariables),...
% 'lengthScale',gpcf.lengthScale,...
% 'lengthScale_prior',gpcf.p.lengthScale);
% gpcf = rmfield(gpcf, 'lengthScale');
% gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
elseif isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.metric=metric_euclidean(gpcf.metric,...
'components',...
num2cell(ip.Results.selectedVariables));
if ~ismember('lengthScale',ip.UsingDefaults)
gpcf.metric.lengthScale=ip.Results.lengthScale;
gpcf = rmfield(gpcf, 'lengthScale');
end
if ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.metric.p.lengthScale=ip.Results.lengthScale_prior;
gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
else
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_rq_pak;
gpcf.fh.unpak = @gpcf_rq_unpak;
gpcf.fh.lp = @gpcf_rq_lp;
gpcf.fh.lpg = @gpcf_rq_lpg;
gpcf.fh.cfg = @gpcf_rq_cfg;
gpcf.fh.ginput = @gpcf_rq_ginput;
gpcf.fh.cov = @gpcf_rq_cov;
gpcf.fh.trcov = @gpcf_rq_trcov;
gpcf.fh.trvar = @gpcf_rq_trvar;
gpcf.fh.recappend = @gpcf_rq_recappend;
end
end
function [w, s] = gpcf_rq_pak(gpcf)
%GPCF_RQ_PAK Combine GP covariance function parameters into
% one vector
%
% Description
% W = GPCF_RQ_PAK(GPCF) takes a covariance function structure
% GPCF and combines the covariance function parameters and
% their hyperparameters into a single row vector W. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)
% log(log(gpcf.alpha))
% (hyperparameters of gpcf.alpha)]'
%
% See also
% GPCF_RQ_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.magnSigma2)
w = [w log(gpcf.magnSigma2)];
s = [s; 'log(rq.magnSigma2)'];
% Hyperparameters of magnSigma2
[wh sh] = gpcf.p.magnSigma2.fh.pak(gpcf.p.magnSigma2);
w = [w wh];
s = [s; sh];
end
if isfield(gpcf,'metric')
[wm sm] = gpcf.metric.fh.pak(gpcf.metric);
w = [w wm];
s = [s; sm];
else
if ~isempty(gpcf.p.lengthScale)
w = [w log(gpcf.lengthScale)];
if numel(gpcf.lengthScale)>1
s = [s; sprintf('log(rq.lengthScale x %d)',numel(gpcf.lengthScale))];
else
s = [s; 'log(rq.lengthScale)'];
end
% Hyperparameters of lengthScale
[wh sh] = gpcf.p.lengthScale.fh.pak(gpcf.p.lengthScale);
w = [w wh];
s = [s; sh];
end
end
if ~isempty(gpcf.p.alpha)
w= [w log(log(gpcf.alpha))];
s = [s; 'log(log(rq.alpha))'];
% Hyperparameters of alpha
[wh sh] = gpcf.p.alpha.fh.pak(gpcf.p.alpha);
w = [w wh];
s = [s; sh];
end
end
function [gpcf, w] = gpcf_rq_unpak(gpcf, w)
%GPCF_RQ_UNPAK Sets the covariance function parameters into
% the structure
%
% Description
% [GPCF, W] = GPCF_RQ_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W, and
% returns a covariance function structure identical to the
% input, except that the covariance hyper-parameters have been
% set to the values in W. Deletes the values set to GPCF from
% W and returns the modified W. This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)
% log(log(gpcf.alpha))
% (hyperparameters of gpcf.alpha)]'
%
% See also
% GPCF_RQ_PAK
gpp=gpcf.p;
if ~isempty(gpp.magnSigma2)
gpcf.magnSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.magnSigma2.fh.unpak(gpcf.p.magnSigma2, w);
gpcf.p.magnSigma2 = p;
end
if isfield(gpcf,'metric')
[metric, w] = gpcf.metric.fh.unpak(gpcf.metric, w);
gpcf.metric = metric;
else
if ~isempty(gpp.lengthScale)
i1=1;
i2=length(gpcf.lengthScale);
gpcf.lengthScale = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.lengthScale.fh.unpak(gpcf.p.lengthScale, w);
gpcf.p.lengthScale = p;
end
end
if ~isempty(gpp.alpha)
gpcf.alpha = exp(exp(w(1)));
w = w(2:end);
% Hyperparameters of alpha
[p, w] = gpcf.p.alpha.fh.unpak(gpcf.p.alpha, w);
gpcf.p.alpha = p;
end
end
function lp =gpcf_rq_lp(gpcf, x, t)
%GPCF_RQ_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_RQ_LP(GPCF, X, T) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_RQ_PAK, GPCF_RQ_UNPAK, GPCF_RQ_LPG, GP_E
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lp = lp +gpp.magnSigma2.fh.lp(gpcf.magnSigma2, ...
gpp.magnSigma2) +log(gpcf.magnSigma2);
end
if isfield(gpcf,'metric')
lp = lp +gpcf.metric.fh.lp(gpcf.metric);
elseif ~isempty(gpp.lengthScale)
lp = lp +gpp.lengthScale.fh.lp(gpcf.lengthScale, ...
gpp.lengthScale) +sum(log(gpcf.lengthScale));
end
if ~isempty(gpcf.p.alpha)
lp = lp +gpp.alpha.fh.lp(gpcf.alpha, gpp.alpha) ...
+log(gpcf.alpha) +log(log(gpcf.alpha));
end
end
function lpg = gpcf_rq_lpg(gpcf)
%GPCF_RQ_LPG Evaluate gradient of the log prior with respect
% to the parameters
%
% Description
% LPG = GPCF_RQ_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% See also
% GPCF_RQ_PAK, GPCF_RQ_UNPAK, GPCF_RQ_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lpgs = gpp.magnSigma2.fh.lpg(gpcf.magnSigma2, gpp.magnSigma2);
lpg = [lpg lpgs(1).*gpcf.magnSigma2+1 lpgs(2:end)];
end
if isfield(gpcf,'metric')
lpg_dist = gpcf.metric.fh.lpg(gpcf.metric);
lpg=[lpg lpg_dist];
else
if ~isempty(gpcf.p.lengthScale)
lll = length(gpcf.lengthScale);
lpgs = gpp.lengthScale.fh.lpg(gpcf.lengthScale, gpp.lengthScale);
lpg = [lpg lpgs(1:lll).*gpcf.lengthScale+1 lpgs(lll+1:end)];
end
end
if ~isempty(gpcf.p.alpha)
lpgs = gpp.alpha.fh.lpg(gpcf.alpha, gpp.alpha);
lpg = [lpg lpgs(1).*gpcf.alpha.*log(gpcf.alpha)+log(gpcf.alpha)+1 lpgs(2:end)];
end
end
function DKff = gpcf_rq_cfg(gpcf, x, x2, mask, i1)
%GPCF_RQ_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_RQ_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_RQ_CFG(GPCF, X, X2) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X2) with
% respect to th (cell array with matrix elements). This subfunction
% is needed when using sparse approximations (e.g. FIC).
%
% DKff = GPCF_RQ_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_RQ_CFG(GPCF, X, X2, [], i) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X2), or
% k(X,X) if X2 is empty, with respect to ith hyperparameter. This
% subfunction is needed when using memory save option in gp_set.
%
% See also
% GPCF_RQ_PAK, GPCF_RQ_UNPAK, GPCF_RQ_LP, GP_G
gpp=gpcf.p;
a=(gpcf.alpha+1)/gpcf.alpha;
i2=1;
DKff = {};
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
i=0;
if ~isempty(gpcf.p.magnSigma2)
i=i+1;
end
if ~isempty(gpcf.p.lengthScale)
i=i+length(gpcf.lengthScale);
end
if ~isempty(gpcf.p.alpha)
i=i+1;
end
DKff=i;
return
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d alpha
% DKff{3} = d Kff / d lengthscale
% NOTE! Here we have already taken into account that the parameters
% are transformed through log() and thus dK/dlog(p) = p * dK/dp
% (or loglog gor alpha)
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
Cdm = gpcf_rq_trcov(gpcf, x);
ii1=0;
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = Cdm;
end
ma2=gpcf.magnSigma2;
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x);
distg = gpcf.metric.fh.distg(gpcf.metric, x);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
% dalpha
ii1=ii1+1;
DKff{ii1} = (ma2.^(1-a).*.5.*dist.^2.*Cdm.^a - gpcf.alpha.*log(Cdm.^(-1/gpcf.alpha)./ma2.^(-1/gpcf.alpha)).*Cdm).*log(gpcf.alpha);
% dlengthscale
for i=1:length(distg)
ii1=ii1+1;
DKff{ii1} = Cdm.*-dist./(1+dist.^2./(2*gpcf.alpha)).*distg{i};
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
i2=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
if i1 > length(gpcf.lengthScale)
i2=1:m;
else
i2=i1;
end
ii1=ii1-1;
end
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% Isotropic = no ARD
s = 1./(gpcf.lengthScale^2);
dist2 = 0;
for i=1:m
dist2 = dist2 + (bsxfun(@minus,x(:,i),x(:,i)')).^2;
end
if ~isempty(gpcf.p.lengthScale) && (~savememory || i1==1)
% dlengthscale
ii1 = ii1+1;
DKff{ii1} = Cdm.^a.*s.*dist2.*gpcf.magnSigma2^(-a+1);
end
if ~isempty(gpcf.p.alpha) && (~savememory || length(DKff) == 1)
% dalpha
ii1=ii1+1;
DKff{ii1} = (ma2^(1-a).*.5.*dist2.*s.*Cdm.^a - gpcf.alpha.*log(Cdm.^(-1/gpcf.alpha)./ma2^(-1/gpcf.alpha)).*Cdm).*log(gpcf.alpha);
end
else
% ARD
s = 1./(gpcf.lengthScale.^2);
D=zeros(size(Cdm));
for i=i2
dist2 =(bsxfun(@minus,x(:,i),x(:,i)')).^2;
% sum distance for the dalpha
D=D+dist2.*s(i);
% dlengthscale
if ~isempty(gpcf.p.lengthScale) && all(i1 <= m)
ii1 = ii1+1;
DKff{ii1}=Cdm.^a.*s(i).*dist2.*gpcf.magnSigma2.^(-a+1);
end
end
if ~isempty(gpcf.p.alpha) && (~savememory || isvector(i2))
% dalpha
ii1=ii1+1;
DKff{ii1} = (ma2^(1-a).*.5.*D.*Cdm.^a - gpcf.alpha.*log(Cdm.^(-1/gpcf.alpha)./ma2^(-1/gpcf.alpha)).*Cdm).*log(gpcf.alpha);
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_rq -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
if ~isempty(gpcf.p.magnSigma2)
ii1=ii1+1;
DKff{ii1} = K;
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x, x2);
distg = gpcf.metric.fh.distg(gpcf.metric, x, x2);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = -K.*distg{i};
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
i2=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
if i1 > length(gpcf.lengthScale)
i2=1:m;
else
i2=i1;
end
ii1=ii1-1;
end
% Evaluate help matrix for calculations of derivatives with respect to the lengthScale
if length(gpcf.lengthScale) == 1
% In the case of an isotropic EXP
s = 1/gpcf.lengthScale^2;
dist = 0;
for i=1:m
dist = dist + (bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
DK_l = s.*K.^a.*dist.*gpcf.magnSigma2^(1-a);
ii1=ii1+1;
DKff{ii1} = DK_l;
if ~isempty(gpcf.p.alpha) && (~savememory || length(DKff) == 1)
% dalpha
ii1=ii1+(1-savememory);
DKff{ii1} = (gpcf.magnSigma2^(1-a).*.5.*dist.*s.*K.^a - gpcf.alpha.*log(K.^(-1/gpcf.alpha)./gpcf.magnSigma2^(-1/gpcf.alpha)).*K).*log(gpcf.alpha);
end
else
% In the case ARD is used
s = 1./gpcf.lengthScale.^2; % set the length
D=zeros(size(K));
for i=i2
dist2 =(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
% sum distance for the dalpha
D=D+dist2.*s(i);
if ~isempty(gpcf.p.lengthScale) && all(i1 <= m)
D1 = s(i).*K.^a.*dist2.*gpcf.magnSigma2^(1-a);
ii1=ii1+1;
DKff{ii1} = D1;
end
end
if ~isempty(gpcf.p.alpha) && (~savememory || isvector(i2))
% dalpha
ii1=ii1+1;
DKff{ii1} = (gpcf.magnSigma2^(1-a).*.5.*D.*K.^a - gpcf.alpha.*log(K.^(-1/gpcf.alpha)./gpcf.magnSigma2^(-1/gpcf.alpha)).*K).*log(gpcf.alpha);
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
if isfield(gpcf,'metric')
ii1=1;
[n, m] =size(x);
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
dist = 0;
distg = gpcf.metric.fh.distg(gpcf.metric, x, [], 1);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = 0;
end
else
ii1=0;
if ~isempty(gpcf.p.magnSigma2) && (~savememory || all(i1==1))
ii1=ii1+1;
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
end
for i2=1:length(gpcf.lengthScale)
ii1 = ii1+1;
DKff{ii1} = 0; % d mask(Kff,I) / d lengthScale
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_rq_ginput(gpcf, x, x2, i1)
%GPCF_RQ_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_RQ_GINPUT(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to X (cell array with matrix elements). This
% subfunction is needed when computing gradients with respect
% to inducing inputs in sparse approximations.
%
% DKff = GPCF_RQ_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_RQ_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X (cell array with matrix elements). This
% subfunction is needed when using memory save option in
% gp_set.
%
% See also
% GPCF_RQ_PAK, GPCF_RQ_UNPAK, GPCF_RQ_LP, GP_G
a=(gpcf.alpha+1)/gpcf.alpha;
[n, m] =size(x);
if nargin<4
i1=1:m;
else
% Use memory save option
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
end
if nargin == 2 || isempty(x2)
K = gpcf.fh.trcov(gpcf, x);
ii1 = 0;
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x);
[gdist, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x);
for i=1:length(gdist)
ii1 = ii1+1;
DKff{ii1} = -K.*gdist{ii1};
gprior(ii1) = gprior_dist(ii1);
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic RQ
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
for i=i1
for j = 1:n
DK = zeros(size(K));
DK(j,:) = -s(i).*bsxfun(@minus,x(j,i),x(:,i)');
DK = DK + DK';
DK = DK.*K.^a.*gpcf.magnSigma2^(1-a);
ii1 = ii1 + 1;
DKff{ii1} = DK;
gprior(ii1) = 0;
end
end
end
elseif nargin == 3 || nargin == 4
[n2, m2] =size(x2);
K = gpcf.fh.cov(gpcf, x, x2);
ii1 = 0;
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x, x2);
[gdist, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x, x2);
for i=1:length(gdist)
ii1 = ii1+1;
DKff{ii1} = -K.*gdist{ii1};
gprior(ii1) = gprior_dist(ii1);
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic RQ
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
ii1 = 0;
for i=i1
for j = 1:n
DK= zeros(size(K));
DK(j,:) = -s(i).*bsxfun(@minus,x(j,i),x2(:,i)');
DK = DK.*K.^a.*gpcf.magnSigma2^(1-a);
ii1 = ii1 + 1;
DKff{ii1} = DK;
gprior(ii1) = 0;
end
end
end
end
end
function C = gpcf_rq_cov(gpcf, x1, x2)
% GP_RQ_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_RQ_COV(GP, TX, X) takes in covariance function of a
% Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_RQ_TRCOV, GPCF_RQ_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
if size(x1,2)~=size(x2,2)
error('the number of columns of X1 and X2 has to be same')
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x1, x2).^2;
dist(dist<eps) = 0;
C = gpcf.magnSigma2.*(1+dist./(2*gpcf.alpha)).^(-gpcf.alpha);
else
if isfield(gpcf, 'selectedVariables')
x1 = x1(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
C=zeros(n1,n2);
ma2 = gpcf.magnSigma2;
% Evaluate the covariance
if ~isempty(gpcf.lengthScale)
s2 = 1./(2.*gpcf.alpha.*gpcf.lengthScale.^2);
% If ARD is not used make s a vector of
% equal elements
if size(s2)==1
s2 = repmat(s2,1,m1);
end
dist=zeros(n1,n2);
for j=1:m1
dist = dist + s2(j).*(bsxfun(@minus,x1(:,j),x2(:,j)')).^2;
end
dist(dist<eps) = 0;
C = ma2.*(1+dist).^(-gpcf.alpha);
end
end
end
function C = gpcf_rq_trcov(gpcf, x)
%GP_RQ_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_RQ_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This
% is a mandatory subfunction used for example in prediction and
% energy computations.
%
% See also
% GPCF_RQ_COV, GPCF_RQ_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n, m] =size(x);
ma2 = gpcf.magnSigma2;
C = zeros(n,n);
for ii1=1:n-1
d = zeros(n-ii1,1);
col_ind = ii1+1:n;
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii1,:)).^2;
C(col_ind,ii1) = d;
end
C(C<eps) = 0;
C = C+C';
C = ma2.*(1+C./(2*gpcf.alpha)).^(-gpcf.alpha);
else
% If scaled euclidean metric
% Try to use the C-implementation
C=trcov(gpcf, x);
if isnan(C)
% If there wasn't C-implementation do here
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
s2 = 1./(2*gpcf.alpha.*gpcf.lengthScale.^2);
if size(s2)==1
s2 = repmat(s2,1,m);
end
ma2 = gpcf.magnSigma2;
C = zeros(n,n);
for ii1=1:n-1
d = zeros(n-ii1,1);
col_ind = ii1+1:n;
for ii2=1:m
d = d+s2(ii2).*(x(col_ind,ii2)-x(ii1,ii2)).^2;
end
C(col_ind,ii1) = d;
end
C(C<eps) = 0;
C = C+C';
C = ma2.*(1+C).^(-gpcf.alpha);
end
end
end
function C = gpcf_rq_trvar(gpcf, x)
%GP_RQ_TRVAR Evaluate training variance vector
%
% Description
% C = GP_RQ_TRVAR(GPCF, TX) takes in covariance function of a
% Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
% See also
% GPCF_RQ_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C = ones(n,1).*gpcf.magnSigma2;
C(C<eps)=0;
end
function reccf = gpcf_rq_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_RQ_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_rq';
% Initialize parameters
reccf.lengthScale= [];
reccf.magnSigma2 = [];
reccf.gpcf.alpha = [];
% Set the function handles
reccf.fh.pak = @gpcf_rq_pak;
reccf.fh.unpak = @gpcf_rq_unpak;
reccf.fh.e = @gpcf_rq_lp;
reccf.fh.g = @gpcf_rq_g;
reccf.fh.cov = @gpcf_rq_cov;
reccf.fh.trcov = @gpcf_rq_trcov;
reccf.fh.trvar = @gpcf_rq_trvar;
reccf.fh.recappend = @gpcf_rq_recappend;
reccf.p=[];
reccf.p.lengthScale=[];
reccf.p.magnSigma2=[];
if isfield(ri.p,'lengthScale') && ~isempty(ri.p.lengthScale)
reccf.p.lengthScale = ri.p.lengthScale;
end
if ~isempty(ri.p.magnSigma2)
reccf.p.magnSigma2 = ri.p.magnSigma2;
end
if ~isempty(ri.p.alpha)
reccf.p.alpha = ri.p.alpha;
end
else
% Append to the record
gpp = gpcf.p;
if ~isfield(gpcf,'metric')
% record lengthScale
reccf.lengthScale(ri,:)=gpcf.lengthScale;
if isfield(gpp,'lengthScale') && ~isempty(gpp.lengthScale)
reccf.p.lengthScale = gpp.lengthScale.fh.recappend(reccf.p.lengthScale, ri, gpcf.p.lengthScale);
end
end
% record magnSigma2
reccf.magnSigma2(ri,:)=gpcf.magnSigma2;
if isfield(gpp,'magnSigma2') && ~isempty(gpp.magnSigma2)
reccf.p.magnSigma2 = gpp.magnSigma2.fh.recappend(reccf.p.magnSigma2, ri, gpcf.p.magnSigma2);
end
reccf.alpha(ri,:)=gpcf.alpha;
if isfield(gpp,'alpha') && ~isempty(ri.p.alpha)
reccf.p.alpha = gpp.alpha.fh.recappend(reccf.p.alpha, ri, gpcf.p.alpha);
end
end
end
|
github
|
lcnhappe/happe-master
|
lgcp.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lgcp.m
| 7,612 |
utf_8
|
85edf686f047863fe47af6dbc611d045
|
function [l,lq,xt,gp] = lgcp(x,varargin)
% LGCP - Log Gaussian Cox Process intensity estimate for 1D and 2D data
%
% LGCP(X)
% [P,PQ,XT,GP] = LGCP(X,XT,OPTIONS)
%
% X is 1D or 2D point data
% XT is optional test points
% OPTIONS are optional parameter-value pairs
% 'gridn' is optional number of grid points used in each axis direction
% default is 100 for 1D, 15 for grid 2D
% 'range' tells the estimation range, default is data range
% for 1D [XMIN XMAX]
% for 2D [XMIN XMAX YMIN YMAX]
% 'gpcf' is optional function handle of a GPstuff covariance function
% (default is @gpcf_sexp)
% 'latent_method' is optional 'EP' (default) or 'Laplace'
% 'int_method' is optional 'mode' (default), 'CCD' or 'grid'
%
% P is the estimated intensity
% PQ is the 5% and 95% percentiles of the intensity estimate
% XT contains the used test points
% GP is the Gaussian process formed. As the grid is scaled to
% unit range or unit square, additional field 'scale' is
% included which includes the range for the grid in the
% original x space.
% Copyright (c) 2009-2012 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LGCP';
ip.addRequired('x', @(x) isnumeric(x) && size(x,2)==1 || size(x,2)==2);
ip.addOptional('xt',NaN, @(x) isnumeric(x) && size(x,2)==1 || size(x,2)==2);
ip.addParamValue('gridn',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('range',[], @(x) isreal(x)&&(length(x)==2||length(x)==4));
ip.addParamValue('gpcf',@gpcf_sexp,@(x) ischar(x) || isa(x,'function_handle'));
ip.addParamValue('latent_method','EP', @(x) ismember(x,{'EP' 'Laplace'}))
ip.addParamValue('int_method','mode', @(x) ismember(x,{'mode' 'CCD', 'grid'}))
ip.addParamValue('normalize',false, @islogical);
ip.parse(x,varargin{:});
x=ip.Results.x;
xt=ip.Results.xt;
gridn=ip.Results.gridn;
xrange=ip.Results.range;
gpcf=ip.Results.gpcf;
latent_method=ip.Results.latent_method;
int_method=ip.Results.int_method;
normalize=ip.Results.normalize;
[n,m]=size(x);
switch m
case 1 % 1D
% Parameters for a grid
if isempty(gridn)
% number of points
gridn=100;
end
xmin=min(x);xmax=max(x);
if ~isempty(xrange)
xmin=min(xmin,xrange(1));
xmax=max(xmax,xrange(2));
end
% Discretize the data
xx=linspace(xmin,xmax,gridn)';
yy=hist(x,xx)';
ye=ones(gridn,1)./gridn.*n;
% Test points
if isnan(xt)
xt=linspace(xmin,xmax,max(gridn,200))';
end
% normalise to unit range, so that same prior is ok for different scales
xxn=(xx-min(xx))./range(xx)-0.5;
xtn=(xt-min(xx))./range(xx)-0.5;
% smooth...
[Ef,Varf,gp]=gpsmooth(xxn,yy,ye,xtn,gpcf,latent_method,int_method);
gp.scale=range(xx);
% compute mean and quantiles
A=range(xx);
lm=exp(Ef+Varf/2)./A.*n;
lq5=exp(Ef-sqrt(Varf)*1.645)./A*n;
lq95=exp(Ef+sqrt(Varf)*1.645)./A*n;
lq=[lq5 lq95];
if nargout<1
% no output, do the plot thing
newplot
hp=patch([xt; xt(end:-1:1)],[lq(:,1); lq(end:-1:1,2)],[.9 .9 .9]);
set(hp,'edgecolor',[.9 .9 .9])
xlim([xmin xmax])
line(xt,lm,'linewidth',2);
else
l=lm;
end
case 2 % 2D
% Find unique points
[xu,I,J]=unique(x,'rows');
% and count number of repeated x's
counts=crosstab(J);
nu=length(xu);
% Parameters for a grid
if isempty(gridn)
% number of points in direction
gridn=15;
end
x1min=min(x(:,1));x1max=max(x(:,1));
x2min=min(x(:,2));x2max=max(x(:,2));
if ~isempty(xrange)
% range extension
x1min=min(x1min,xrange(1));
x1max=max(x1max,xrange(2));
x2min=min(x2min,xrange(3));
x2max=max(x2max,xrange(4));
end
% Form regular grid to discretize the data
zz1=linspace(x1min,x1max,gridn)';
zz2=linspace(x2min,x2max,gridn)';
[z1,z2]=meshgrid(zz1,zz2);
z=[z1(:),z2(:)];
nz=length(z);
% form data for GP (xx,yy,ye)
xx=z;
yy=zeros(nz,1);
zi=interp2(z1,z2,reshape(1:nz,gridn,gridn),xu(:,1),xu(:,2),'nearest');
for i1=1:nu
yy(zi(i1),1)=yy(zi(i1),1)+counts(i1);
end
ye=ones(nz,1)./nz.*n;
% Default test points
if isnan(xt)
[xt1,xt2]=meshgrid(linspace(x1min,x1max,max(100,gridn)),...
linspace(x2min,x2max,max(100,gridn)));
xt=[xt1(:) xt2(:)];
end
% normalise to unit square, so that same prior is ok for different scales
xxn=bsxfun(@rdivide,bsxfun(@minus,xx,min(xx,[],1)),range(xx,1))-.5;
xtn=bsxfun(@rdivide,bsxfun(@minus,xt,min(xx,[],1)),range(xx,1))-.5;
% smooth...
[Ef,Varf,gp]=gpsmooth(xxn,yy,ye,xtn,gpcf,latent_method,int_method);
gp.scale=[range(xx(:,1)) range(xx(:,2))];
% compute mean
A = range(xx(:,1)).*range(xx(:,2));
lm=exp(Ef+Varf/2)./A.*n;
lq5=exp(Ef-sqrt(Varf)*1.645)./A.*n;
lq95=exp(Ef+sqrt(Varf)*1.645)./A.*n;
lq=[lq5 lq95];
if nargout<1
% no output, do the plot thing
G=zeros(size(xt1));
G(:)=lm;
pcolor(xt1,xt2,G);
shading flat
colormap('jet')
cx=caxis;
cx(1)=0;
caxis(cx);
colorbar
else
l=lm;
end
otherwise
error('X has to be Nx1 or Nx2')
end
end
function [Ef,Varf,gp] = gpsmooth(xx,yy,ye,xt,gpcf,latent_method,int_method)
% Make inference with log Gaussian process and EP or Laplace approximation
nin = size(xx,2);
% init gp
if strfind(func2str(gpcf),'ppcs')
% ppcs still have nin parameter...
gpcf1 = gpcf('nin',nin);
else
gpcf1 = gpcf();
end
% default vague prior
pm = prior_sqrtt('s2', 1^2, 'nu', 4);
pl = prior_t('s2', 2^2, 'nu', 4);
%pm = prior_logunif();
%pl = prior_logunif();
pa = prior_t('s2', 10^2, 'nu', 4);
% different covariance functions have different parameters
if isfield(gpcf1,'magnSigma2')
gpcf1 = gpcf(gpcf1, 'magnSigma2', .1, 'magnSigma2_prior', pm);
end
if isfield(gpcf1,'lengthScale')
gpcf1 = gpcf(gpcf1, 'lengthScale', .1, 'lengthScale_prior', pl);
end
if isfield(gpcf1,'alpha')
gpcf1 = gpcf(gpcf1, 'alpha', 20, 'alpha_prior', pa);
end
if isfield(gpcf1,'weightSigma2')
gpcf1 = gpcf(gpcf1, 'weightSigma2_prior', prior_logunif(), 'biasSigma2_prior', prior_logunif());
end
% Create the GP structure
gp = gp_set('lik', lik_poisson(), 'cf', {gpcf1}, 'jitterSigma2', 1e-4);
% Set the approximate inference method
gp = gp_set(gp, 'latent_method', latent_method);
% Optimize hyperparameters
opt=optimset('TolX', 1e-3, 'Display', 'off');
if exist('fminunc')
gp = gp_optim(gp, xx, yy, 'z', ye, 'optimf', @fminunc, 'opt', opt);
else
gp = gp_optim(gp, xx, yy, 'z', ye, 'optimf', @fminlbfgs, 'opt', opt);
end
% Make prediction for the test points
if strcmpi(int_method,'mode')
% point estimate for the hyperparameters
[Ef,Varf] = gp_pred(gp, xx, yy, xt, 'z', ye);
else
% integrate over the hyperparameters
%[~, ~, ~, Ef, Varf] = gp_ia(opt, gp, xx, yy, xt, param);
[notused, notused, notused, Ef, Varf]=...
gp_ia(gp, xx, yy, xt, 'z', ye, 'int_method', int_method);
end
end
|
github
|
lcnhappe/happe-master
|
gpep_e.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpep_e.m
| 106,791 |
UNKNOWN
|
adc9d7d1debecd4cbbbb4f948a6cb72b
|
function [e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta] = gpep_e(w, gp, varargin)
%GPEP_E Do Expectation propagation and return marginal log posterior estimate
%
% Description
% E = GPEP_E(W, GP, X, Y, OPTIONS) takes a GP structure GP
% together with a matrix X of input vectors and a matrix Y of
% target vectors, and finds the EP approximation for the
% conditional posterior p(Y | X, th), where th is the
% parameters. Returns the energy at th (see below). Each row of
% X corresponds to one input vector and each row of Y
% corresponds to one target vector.
%
% [E, EDATA, EPRIOR] = GPEP_E(W, GP, X, Y, OPTIONS) returns also
% the data and prior components of the total energy.
%
% The energy is minus log posterior cost function for th:
% E = EDATA + EPRIOR
% = - log p(Y|X, th) - log p(th),
% where th represents the parameters (lengthScale,
% magnSigma2...), X is inputs and Y is observations.
%
% OPTIONS is optional parameter-value pair
% z - optional observed quantity in triplet (x_i,y_i,z_i)
% Some likelihoods may use this. For example, in case of
% Poisson likelihood we have z_i=E_i, that is, expected
% value for ith case.
%
% See also
% GP_SET, GP_E, GPEP_G, GPEP_PRED
% Description 2
% Additional properties meant only for internal use.
%
% GP = GPEP_E('init', GP) takes a GP structure GP and
% initializes required fields for the EP algorithm.
%
% GPEP_E('clearcache', GP) takes a GP structure GP and cleares
% the internal cache stored in the nested function workspace
%
% [e, edata, eprior, site_tau, site_nu, L, La2, b, muvec_i, sigm2vec_i]
% = GPEP_E(w, gp, x, y, options)
% returns many useful quantities produced by EP algorithm.
%
% Copyright (c) 2007 Jaakko Riihim�ki
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Heikki Peura
% Copyright (c) 2010-2012 Aki Vehtari
% Copyright (c) 2011 Pasi Jyl�nki
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
% parse inputs
ip=inputParser;
ip.FunctionName = 'GPEP_E';
ip.addRequired('w', @(x) ...
isempty(x) || ...
(ischar(x) && ismember(x, {'init' 'clearcache'})) || ...
(isvector(x) && isreal(x) && all(isfinite(x))) || ...
all(isnan(x)));
ip.addRequired('gp',@isstruct);
ip.addOptional('x', [], @(x) isnumeric(x) && isreal(x) && all(isfinite(x(:))))
ip.addOptional('y', [], @(x) isnumeric(x) && isreal(x) && all(isfinite(x(:))))
ip.addParamValue('z', [], @(x) isnumeric(x) && isreal(x) && all(isfinite(x(:))))
ip.parse(w, gp, varargin{:});
x=ip.Results.x;
y=ip.Results.y;
z=ip.Results.z;
if strcmp(w, 'init')
% intialize cache
ch = [];
% return function handle to the nested function ep_algorithm
% this way each gp has its own peristent memory for EP
gp.fh.ne = @ep_algorithm;
% set other function handles
gp.fh.e=@gpep_e;
gp.fh.g=@gpep_g;
gp.fh.pred=@gpep_pred;
gp.fh.jpred=@gpep_jpred;
gp.fh.looe=@gpep_looe;
gp.fh.loog=@gpep_loog;
gp.fh.loopred=@gpep_loopred;
e = gp;
% remove clutter from the nested workspace
clear w gp varargin ip x y z
elseif strcmp(w, 'clearcache')
% clear the cache
gp.fh.ne('clearcache');
else
% call ep_algorithm using the function handle to the nested function
% this way each gp has its own peristent memory for EP
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta] = gp.fh.ne(w, gp, x, y, z);
end
function [e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta] = ep_algorithm(w, gp, x, y, z)
if strcmp(w, 'clearcache')
ch=[];
return
end
switch gp.latent_opt.optim_method
case 'basic-EP'
% check whether saved values can be used
if isempty(z)
datahash=hash_sha512([x y]);
else
datahash=hash_sha512([x y z]);
end
if ~isempty(ch) && all(size(w)==size(ch.w)) && all(abs(w-ch.w)<1e-8) && isequal(datahash,ch.datahash)
% The covariance function parameters or data haven't changed
% so we can return the energy and the site parameters that are saved
e = ch.e;
edata = ch.edata;
eprior = ch.eprior;
tautilde = ch.tautilde;
nutilde = ch.nutilde;
L = ch.L;
La2 = ch.La2;
b = ch.b;
muvec_i = ch.muvec_i;
sigm2vec_i = ch.sigm2vec_i;
logZ_i = ch.logZ_i;
eta = ch.eta;
else
% The parameters or data have changed since
% the last call for gpep_e. In this case we need to
% re-evaluate the EP approximation
gp=gp_unpak(gp, w);
ncf = length(gp.cf);
n = size(x,1);
% EP iteration parameters
iter=1;
maxiter = gp.latent_opt.maxiter;
tol = gp.latent_opt.tol;
df = gp.latent_opt.df;
nutilde = zeros(size(y));
tautilde = zeros(size(y));
muvec_i=zeros(size(y));
sigm2vec_i=zeros(size(y));
logZep_old=0; logZep=Inf;
if ~isfield(gp,'meanf')
mf = zeros(size(y));
else
[H,b_m,B_m]=mean_prep(gp,x,[]);
mf = H'*b_m;
end
logM0 = zeros(n,1);
muhat = zeros(n,1);
sigm2hat = zeros(n,1);
% =================================================
% First Evaluate the data contribution to the error
switch gp.type
% ============================================================
% FULL
% ============================================================
case 'FULL' % A full GP
[K,C] = gp_trcov(gp, x);
if ~issparse(C)
% The EP algorithm for full support covariance function
if ~isfield(gp,'meanf')
Sigm = C;
meanfp=false;
else
Sigm = C + H'*B_m*H;
meanfp=true;
end
% The EP -algorithm
convergence=false;
while iter<=maxiter && ~convergence
logZep_old=logZep;
logM0_old=logM0;
if isequal(gp.latent_opt.init_prev, 'on') && iter==1 && ~isempty(ch) && all(size(w)==size(ch.w)) && all(abs(w-ch.w)<1) && isequal(datahash,ch.datahash)
tautilde=ch.tautilde;
nutilde=ch.nutilde;
else
if isequal(gp.latent_opt.parallel,'on')
% parallel-EP
% compute marginal and cavity parameters
dSigm=diag(Sigm);
tau=1./dSigm-tautilde;
nu = 1./dSigm.*mf-nutilde;
muvec_i=nu./tau;
sigm2vec_i=1./tau;
% compute moments of tilted distributions
[logM0, muhat, sigm2hat] = gp.lik.fh.tiltedMoments(gp.lik, y, 1:n, sigm2vec_i, muvec_i, z);
if any(isnan(logM0))
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% update site parameters
deltatautilde=1./sigm2hat-tau-tautilde;
tautilde=tautilde+df.*deltatautilde;
deltanutilde=1./sigm2hat.*muhat-nu-nutilde;
nutilde=nutilde+df.*deltanutilde;
else
% sequential-EP
muvec_i = zeros(n,1); sigm2vec_i = zeros(n,1);
for i1=1:n
% Algorithm utilizing Cholesky updates
% This is numerically more stable but slower
% $$$ % approximate cavity parameters
% $$$ S11 = sum(Ls(:,i1).^2);
% $$$ S1 = Ls'*Ls(:,i1);
% $$$ tau_i=S11^-1-tautilde(i1);
% $$$ nu_i=S11^-1*mf(i1)-nutilde(i1);
% $$$
% $$$ mu_i=nu_i/tau_i;
% $$$ sigm2_i=tau_i^-1;
% $$$
% $$$ if sigm2_i < 0
% $$$ [ii i1]
% $$$ end
% $$$
% $$$ % marginal moments
% $$$ [M0(i1), muhat, sigm2hat] = feval(gp.lik.fh.tiltedMoments, gp.lik, y, i1, sigm2_i, mu_i, z);
% $$$
% $$$ % update site parameters
% $$$ deltatautilde = sigm2hat^-1-tau_i-tautilde(i1);
% $$$ tautilde(i1) = tautilde(i1)+deltatautilde;
% $$$ nutilde(i1) = sigm2hat^-1*muhat-nu_i;
% $$$
% $$$ upfact = 1./(deltatautilde^-1+S11);
% $$$ if upfact > 0
% $$$ Ls = cholupdate(Ls, S1.*sqrt(upfact), '-');
% $$$ else
% $$$ Ls = cholupdate(Ls, S1.*sqrt(-upfact));
% $$$ end
% $$$ Sigm = Ls'*Ls;
% $$$ mf=Sigm*nutilde;
% $$$
% $$$ muvec_i(i1,1)=mu_i;
% $$$ sigm2vec_i(i1,1)=sigm2_i;
% Algorithm as in Rasmussen and Williams 2006
% approximate cavity parameters
Sigmi=Sigm(:,i1);
Sigmii=Sigmi(i1);
tau_i=1/Sigmii-tautilde(i1);
nu_i = 1/Sigmii*mf(i1)-nutilde(i1);
mu_i=nu_i/tau_i;
sigm2_i=1/tau_i;
% marginal moments
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2_i, mu_i, z);
if isnan(logM0(i1))
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% update site parameters
deltatautilde=sigm2hat(i1)^-1-tau_i-tautilde(i1);
tautilde(i1)=tautilde(i1)+df*deltatautilde;
deltanutilde=sigm2hat(i1)^-1*muhat(i1)-nu_i-nutilde(i1);
nutilde(i1)=nutilde(i1)+df*deltanutilde;
% Update mean and variance after each site update (standard EP)
ds = deltatautilde/(1+deltatautilde*Sigmii);
Sigm = Sigm - ((ds*Sigmi)*Sigmi');
%Sigm = Sigm - ((ds*Sigm(:,i1))*Sigm(:,i1)');
% The below is how Rasmussen and Williams
% (2006) do the update. The above version is
% more robust.
%ds = deltatautilde^-1+Sigm(i1,i1);
%ds = (Sigm(:,i1)/ds)*Sigm(:,i1)';
%Sigm = Sigm - ds;
%Sigm=Sigm-(deltatautilde^-1+Sigm(i1,i1))^-1*(Sigm(:,i1)*Sigm(:,i1)');
if ~meanfp
mf=Sigm*nutilde;
else
mf=Sigm*(C\(H'*b_m)+nutilde);
end
muvec_i(i1)=mu_i;
sigm2vec_i(i1)=sigm2_i;
end
end
end
% Recompute the approximate posterior parameters
% parallel- and sequential-EP
Stilde=tautilde;
Stildesqr=sqrt(Stilde);
if ~meanfp % zero mean function used
% NOTICE! upper triangle matrix! cf. to
% line 13 in the algorithm 3.5, p. 58.
%B=eye(n)+Stildesqr*C*Stildesqr;
B=bsxfun(@times,bsxfun(@times,Stildesqr,C),Stildesqr');
B(1:n+1:end)=B(1:n+1:end)+1;
[L,notpositivedefinite] = chol(B,'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
%V=(L\Stildesqr)*C;
V=L\bsxfun(@times,Stildesqr,C);
Sigm=C-V'*V;
mf=Sigm*nutilde;
% Compute the marginal likelihood
% Direct formula (3.65):
% Sigmtilde=diag(1./tautilde);
% mutilde=inv(Stilde)*nutilde;
%
% logZep=-0.5*log(det(Sigmtilde+K))-0.5*mutilde'*inv(K+Sigmtilde)*mutilde+
% sum(log(normcdf(y.*muvec_i./sqrt(1+sigm2vec_i))))+
% 0.5*sum(log(sigm2vec_i+1./tautilde))+
% sum((muvec_i-mutilde).^2./(2*(sigm2vec_i+1./tautilde)))
% 4. term & 1. term
term41=0.5*sum(log(1+tautilde.*sigm2vec_i))-sum(log(diag(L)));
% 5. term (1/2 element) & 2. term
T=1./sigm2vec_i;
Cnutilde = C*nutilde;
L2 = V*nutilde;
term52 = nutilde'*Cnutilde - L2'*L2 - (nutilde'./(T+Stilde)')*nutilde;
term52 = term52.*0.5;
% 5. term (2/2 element)
term5=0.5*muvec_i'.*(T./(Stilde+T))'*(Stilde.*muvec_i-2*nutilde);
% 3. term
term3 = sum(logM0);
logZep = -(term41+term52+term5+term3);
iter=iter+1;
else
% mean function used
% help variables
hBh = H'*B_m*H;
C_t = C + hBh;
CHb = C\H'*b_m;
S = diag(Stildesqr.^2);
%B = eye(n)+Stildesqroot*C*Stildesqroot;
B=bsxfun(@times,bsxfun(@times,Stildesqr,C),Stildesqr');
B(1:n+1:end)=B(1:n+1:end)+1;
%B_h = eye(n) + Stildesqroot*C_t*Stildesqroot;
B_h=bsxfun(@times,bsxfun(@times,Stildesqr,C_t),Stildesqr');
B_h(1:n+1:end)=B_h(1:n+1:end)+1;
% L to return, without the hBh term
[L,notpositivedefinite]=chol(B,'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% L for the calculation with mean term
[L_m,notpositivedefinite]=chol(B_h,'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% Recompute the approximate posterior parameters
% parallel- and sequential-EP
%V=(L_m\Stildesqroot)*C_t;
V=L_m\bsxfun(@times,Stildesqr,C_t);
Sigm=C_t-V'*V;
mf=Sigm*(CHb+nutilde);
T=1./sigm2vec_i;
Cnutilde = (C_t - S^-1)*(S*H'*b_m-nutilde);
L2 = V*(S*H'*b_m-nutilde);
Stildesqroot = diag(Stildesqr);
zz = Stildesqroot*(L'\(L\(Stildesqroot*C)));
% inv(K + S^-1)*S^-1
Ks = eye(size(zz)) - zz;
% 5. term (1/2 element)
term5_1 = 0.5.*((nutilde'*S^-1)./(T.^-1+Stilde.^-1)')*(S^-1*nutilde);
% 2. term
term2 = 0.5.*((S*H'*b_m-nutilde)'*Cnutilde - L2'*L2);
% 4. term
term4 = 0.5*sum(log(1+tautilde.*sigm2vec_i));
% 1. term
term1 = -1.*sum(log(diag(L_m)));
% 3. term
term3 = sum(logM0);
% 5. term (2/2 element)
term5 = 0.5*muvec_i'.*(T./(Stilde+T))'*(Stilde.*muvec_i-2*nutilde);
logZep = -(term4+term1+term5_1+term5+term2+term3);
iter=iter+1;
end
convergence=max(abs(logM0_old-logM0))<tol && abs(logZep_old-logZep)<tol;
end
else
% EP algorithm for compactly supported covariance function
% (C is a sparse matrix)
p = analyze(K);
r(p) = 1:n;
if ~isempty(z)
z = z(p,:);
end
y = y(p);
K = K(p,p);
Inn = sparse(1:n,1:n,1,n,n);
sqrtS = sparse(1:n,1:n,0,n,n);
mf = zeros(size(y));
sigm2 = zeros(size(y));
dSigm=full(diag(K));
gamma = zeros(size(y));
VD = sparse(1:n,1:n,1,n,n);
% The EP -algorithm
convergence=false;
while iter<=maxiter && ~convergence
logZep_old=logZep;
logM0_old=logM0;
if isequal(gp.latent_opt.parallel,'on')
% parallel-EP
% approximate cavity parameters
sqrtSK = ssmult(sqrtS, K);
tttt = ldlsolve(VD,sqrtSK);
sigm2 = full(diag(K) - sum(sqrtSK.*tttt)');
mf = gamma - tttt'*sqrtS*gamma;
tau=1./sigm2-tautilde;
nu = 1./sigm2.*mf-nutilde;
muvec_i=nu./tau;
sigm2vec_i=1./tau;
% compute moments of tilted distributions
for i1=1:n
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2vec_i(i1), muvec_i(i1), z);
end
if any(isnan(logM0))
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% update site parameters
deltatautilde=1./sigm2hat-tau-tautilde;
tautilde=tautilde+df*deltatautilde;
deltanutilde=1./sigm2hat.*muhat-nu-nutilde;
nutilde=nutilde+df*deltanutilde;
gamma = gamma + sum(bsxfun(@times,K,df.*deltanutilde'),2);
else
% sequential-EP
muvec_i = zeros(n,1); sigm2vec_i = zeros(n,1);
for i1=1:n
% approximate cavity parameters
Ki1 = K(:,i1);
sqrtSKi1 = ssmult(sqrtS, Ki1);
tttt = ldlsolve(VD,sqrtSKi1);
sigm2(i1) = Ki1(i1) - sqrtSKi1'*tttt;
mf(i1) = gamma(i1) - tttt'*sqrtS*gamma;
tau_i=sigm2(i1)^-1-tautilde(i1);
nu_i=sigm2(i1)^-1*mf(i1)-nutilde(i1);
mu_i=nu_i/tau_i;
sigm2_i=tau_i^-1;
% marginal moments
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2_i, mu_i, z);
% update site parameters
tautilde_old = tautilde(i1);
deltatautilde=sigm2hat(i1)^-1-tau_i-tautilde(i1);
tautilde(i1)=tautilde(i1)+df*deltatautilde;
deltanutilde=sigm2hat(i1)^-1*muhat(i1)-nu_i-nutilde(i1);
nutilde(i1)=nutilde(i1)+df*deltanutilde;
gamma = gamma + Ki1.*df*deltanutilde;
% Update the LDL decomposition
sqrtS(i1,i1) = sqrt(tautilde(i1));
sqrtSKi1(i1) = sqrt(tautilde(i1)).*Ki1(i1);
D2_n = sqrtSKi1.*sqrtS(i1,i1) + Inn(:,i1);
if tautilde_old == 0
VD = ldlrowupdate(i1,VD,VD(:,i1),'-');
VD = ldlrowupdate(i1,VD,D2_n,'+');
else
VD = ldlrowmodify(VD, D2_n, i1);
end
muvec_i(i1,1)=mu_i;
sigm2vec_i(i1,1)=sigm2_i;
end
end
% Recompute the approximate posterior parameters
% parallel- and sequential-EP
sqrtS = sparse(1:n,1:n,sqrt(tautilde),n,n);
KsqrtS = ssmult(K,sqrtS);
B = ssmult(sqrtS,KsqrtS) + Inn;
[VD, notpositivedefinite] = ldlchol(B);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
Knutilde = K*nutilde;
mf = Knutilde - KsqrtS*ldlsolve(VD,sqrtS*Knutilde);
% Compute the marginal likelihood
% 4. term & 1. term
term41=0.5*sum(log(1+tautilde.*sigm2vec_i)) - 0.5.*sum(log(diag(VD)));
% 5. term (1/2 element) & 2. term
T=1./sigm2vec_i;
term52 = nutilde'*mf - (nutilde'./(T+tautilde)')*nutilde;
term52 = term52.*0.5;
% 5. term (2/2 element)
term5=0.5*muvec_i'.*(T./(tautilde+T))'*(tautilde.*muvec_i-2*nutilde);
% 3. term
term3 = sum(logM0);
logZep = -(term41+term52+term5+term3);
iter=iter+1;
convergence=max(abs(logM0_old-logM0))<tol && abs(logZep_old-logZep)<tol;
%[iter-1 max(abs(muhat-mf)./abs(mf)) max(abs(sqrt(sigm2hat)-s)./abs(s)) max(abs(logM0_old-logM0)) abs(logZep_old-logZep)]
%[iter-1 max(abs(muhat-mf)./abs(mf)) max(abs(logM0_old-logM0)) abs(logZep_old-logZep)]
end
% Reorder all the returned and stored values
B = B(r,r);
nutilde = nutilde(r);
tautilde = tautilde(r);
muvec_i = muvec_i(r);
sigm2vec_i = sigm2vec_i(r);
logM0 = logM0(r);
mf = mf(r);
y = y(r);
if ~isempty(z)
z = z(r,:);
end
[L, notpositivedefinite] = ldlchol(B);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
end
edata = logZep;
% Set something into La2
La2 = B;
b = 0;
% ============================================================
% FIC
% ============================================================
case 'FIC'
u = gp.X_u;
m = size(u,1);
% First evaluate needed covariance matrices
% v defines that parameter is a vector
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % f x 1 vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
[Luu, notpositivedefinite] = chol(K_uu, 'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
% Here we need only the diag(Q_ff), which is evaluated below
B=Luu\(K_fu'); % u x f
Qv_ff=sum(B.^2)';
Lav = Cv_ff-Qv_ff; % f x 1, Vector of diagonal elements
% iLaKfu = diag(iLav)*K_fu = inv(La)*K_fu
% First some helper parameters
iLaKfu = zeros(size(K_fu)); % f x u,
for i=1:n
iLaKfu(i,:) = K_fu(i,:)./Lav(i); % f x u
end
A = K_uu+K_fu'*iLaKfu; A = (A+A')./2; % Ensure symmetry
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
L = iLaKfu/A;
Lahat = 1./Lav;
I = eye(size(K_uu));
[R0, notpositivedefinite] = chol(inv(K_uu));
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
R = R0;
P = K_fu;
mf = zeros(size(y));
eta = zeros(size(y));
gamma = zeros(size(K_uu,1),1);
D_vec = Lav;
Ann=0;
% The EP -algorithm
convergence=false;
while iter<=maxiter && ~convergence
logZep_old=logZep;
logM0_old=logM0;
if isequal(gp.latent_opt.parallel,'on')
% parallel-EP
% approximate cavity parameters
Ann = D_vec+sum((P*R').^2,2);
mf = eta + sum(bsxfun(@times,P,gamma'),2);
tau = 1./Ann-tautilde;
nu = 1./Ann.*mf-nutilde;
muvec_i=nu./tau;
sigm2vec_i=1./tau;
% compute moments of tilted distributions
for i1=1:n
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2vec_i(i1), muvec_i(i1), z);
end
if any(isnan(logM0))
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% update site parameters
deltatautilde=1./sigm2hat-tau-tautilde;
tautilde=tautilde+df*deltatautilde;
deltanutilde=1./sigm2hat.*muhat-nu-nutilde;
nutilde=nutilde+df*deltanutilde;
else
% sequential-EP
muvec_i = zeros(n,1); sigm2vec_i = zeros(n,1);
for i1=1:n
% approximate cavity parameters
pn = P(i1,:)';
Ann = D_vec(i1) + sum((R*pn).^2);
tau_i = Ann^-1-tautilde(i1);
mf(i1) = eta(i1) + pn'*gamma;
nu_i = Ann^-1*mf(i1)-nutilde(i1);
mu_i=nu_i/tau_i;
sigm2_i=tau_i^-1;
% marginal moments
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2_i, mu_i, z);
% update site parameters
deltatautilde = sigm2hat(i1)^-1-tau_i-tautilde(i1);
tautilde(i1) = tautilde(i1)+df*deltatautilde;
deltanutilde = sigm2hat(i1)^-1*muhat(i1)-nu_i - nutilde(i1);
nutilde(i1) = nutilde(i1)+df*deltanutilde;
% Update the parameters
dn = D_vec(i1);
D_vec(i1) = D_vec(i1) - deltatautilde.*D_vec(i1).^2 ./ (1+deltatautilde.*D_vec(i1));
P(i1,:) = pn' - (deltatautilde.*dn ./ (1+deltatautilde.*dn)).*pn';
updfact = deltatautilde./(1 + deltatautilde.*Ann);
if updfact > 0
RtRpnU = R'*(R*pn).*sqrt(updfact);
R = cholupdate(R, RtRpnU, '-');
elseif updfact < 0
RtRpnU = R'*(R*pn).*sqrt(abs(updfact));
R = cholupdate(R, RtRpnU, '+');
end
eta(i1) = eta(i1) + (deltanutilde - deltatautilde.*eta(i1)).*dn./(1+deltatautilde.*dn);
gamma = gamma + (deltanutilde - deltatautilde.*mf(i1))./(1+deltatautilde.*dn) * R'*(R*pn);
% mf = eta + P*gamma;
% Store cavity parameters
muvec_i(i1,1)=mu_i;
sigm2vec_i(i1,1)=sigm2_i;
end
end
% Recompute the approximate posterior parameters
% parallel- and sequential-EP
temp1 = (1+Lav.*tautilde).^(-1);
D_vec = temp1.*Lav;
R0P0t = R0*K_fu';
temp2 = zeros(size(R0P0t));
% for i2 = 1:length(temp1)
% P(i2,:) = temp1(i2).*K_fu(i2,:);
% temp2(:,i2) = R0P0t(:,i2).*tautilde(i2).*temp1(i2);
% end
% R = chol(inv(eye(size(R0)) + temp2*R0P0t')) * R0;
P=bsxfun(@times,temp1,K_fu);
temp2=bsxfun(@times,(tautilde.*temp1)',R0P0t);
temp2=temp2*R0P0t';
temp2(1:m+1:end)=temp2(1:m+1:end)+1;
R = chol(inv(temp2)) * R0;
eta = D_vec.*nutilde;
gamma = R'*(R*(P'*nutilde));
mf = eta + P*gamma;
% Compute the marginal likelihood, see FULL model for
% details about equations
Lahat = 1./Lav + tautilde;
Lhat = bsxfun(@rdivide,L,Lahat);
H = I-L'*Lhat;
B = H\L';
Bhat = B./repmat(Lahat',m,1);
% 4. term & 1. term
Stildesqroot=sqrt(tautilde);
D = Stildesqroot.*Lav.*Stildesqroot + 1;
SsqrtKfu = K_fu.*repmat(Stildesqroot,1,m);
AA = K_uu + (SsqrtKfu'./repmat(D',m,1))*SsqrtKfu; AA = (AA+AA')/2;
[AA, notpositivedefinite] = chol(AA,'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
term41 = - 0.5*sum(log(1+tautilde.*sigm2vec_i)) - sum(log(diag(Luu))) + sum(log(diag(AA))) + 0.5.*sum(log(D));
% 5. term (1/2 element) & 2. term
T=1./sigm2vec_i;
term52 = -0.5*( (nutilde./Lahat)'*nutilde + (nutilde'*Lhat)*(Bhat*nutilde) - (nutilde./(T+tautilde))'*nutilde);
% 5. term (2/2 element)
term5 = - 0.5*muvec_i'.*(T./(tautilde+T))'*(tautilde.*muvec_i-2*nutilde);
% 3. term
term3 = -sum(logM0);
logZep = term41+term52+term5+term3;
iter=iter+1;
convergence=max(abs(logM0_old-logM0))<tol && abs(logZep_old-logZep)<tol;
end
edata = logZep;
%L = iLaKfu;
% b' = (La + Kfu*iKuu*Kuf + 1./S)*1./S * nutilde
% = (S - S * (iLa - L*L' + S)^(-1) * S) * 1./S
% = I - S * (Lahat - L*L')^(-1)
% L = S*Kfu * (Lav + 1./S)^(-1) / chol(K_uu + SsqrtKfu'*(Lav + 1./S)^(-1)*SsqrtKfu)
% La2 = D./S = Lav + 1./S,
%
% The way evaluations are done is numerically more stable
% See equations (3.71) and (3.72) in Rasmussen and Williams (2006)
b = nutilde'.*(1 - Stildesqroot./Lahat.*Stildesqroot)' - (nutilde'*Lhat)*Bhat.*tautilde'; % part of eq. (3.71)
L = ((repmat(Stildesqroot,1,m).*SsqrtKfu)./repmat(D',m,1)')/AA'; % part of eq. (3.72)
La2 = 1./(Stildesqroot./D.*Stildesqroot); % part of eq. (3.72)
D = D_vec;
% ============================================================
% PIC
% ============================================================
case {'PIC' 'PIC_BLOCK'}
ind = gp.tr_index;
u = gp.X_u;
m = length(u);
% First evaluate needed covariance matrices
% v defines that parameter is a vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
[Luu, notpositivedefinite] = chol(K_uu, 'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
% Here we need only the diag(Q_ff), which is evaluated below
B=Luu\(K_fu'); % u x f
% First some helper parameters
iLaKfu = zeros(size(K_fu)); % f x u
for i=1:length(ind)
Qbl_ff = B(:,ind{i})'*B(:,ind{i});
[Kbl_ff, Cbl_ff] = gp_trcov(gp, x(ind{i},:));
Labl{i} = Cbl_ff - Qbl_ff;
[Llabl, notpositivedefinite] = chol(Labl{i});
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
iLaKfu(ind{i},:) = Llabl\(Llabl'\K_fu(ind{i},:));
end
A = K_uu+K_fu'*iLaKfu;
A = (A+A')./2; % Ensure symmetry
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
L = iLaKfu/A;
I = eye(size(K_uu));
[R0, notpositivedefinite] = chol(inv(K_uu));
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
R = R0;
P = K_fu;
R0P0t = R0*K_fu';
mf = zeros(size(y));
eta = zeros(size(y));
gamma = zeros(size(K_uu,1),1);
D = Labl;
Ann=0;
% The EP -algorithm
convergence=false;
while iter<=maxiter && ~convergence
logZep_old=logZep;
logM0_old=logM0;
if isequal(gp.latent_opt.parallel,'on')
% parallel-EP
% approximate cavity parameters
for bl=1:length(ind)
bl_ind = ind{bl};
Pbl=P(bl_ind,:);
Ann = diag(D{bl}) +sum((Pbl*R').^2,2);
tau(bl_ind,1) = 1./Ann-tautilde(bl_ind);
mf(bl_ind,1) = eta(bl_ind) + sum(bsxfun(@times,Pbl,gamma'),2);
nu(bl_ind,1) = 1./Ann.*mf(bl_ind)-nutilde(bl_ind);
end
muvec_i=nu./tau;
sigm2vec_i=1./tau;
% compute moments of tilted distributions
for i1=1:n
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2vec_i(i1), muvec_i(i1), z);
end
if any(isnan(logM0))
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% update site parameters
deltatautilde = 1./sigm2hat-tau-tautilde;
tautilde = tautilde+df*deltatautilde;
deltanutilde = 1./sigm2hat.*muhat-nu-nutilde;
nutilde = nutilde+df*deltanutilde;;
else
muvec_i = zeros(n,1); sigm2vec_i = zeros(n,1);
for bl=1:length(ind)
bl_ind = ind{bl};
for in=1:length(bl_ind)
i1 = bl_ind(in);
% approximate cavity parameters
Dbl = D{bl}; dn = Dbl(in,in); pn = P(i1,:)';
Ann = dn + sum((R*pn).^2);
tau_i = Ann^-1-tautilde(i1);
mf(i1) = eta(i1) + pn'*gamma;
nu_i = Ann^-1*mf(i1)-nutilde(i1);
mu_i=nu_i/tau_i;
sigm2_i=tau_i^-1;
% marginal moments
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2_i, mu_i, z);
% update site parameters
deltatautilde = sigm2hat(i1)^-1-tau_i-tautilde(i1);
tautilde(i1) = tautilde(i1)+df*deltatautilde;
deltanutilde = sigm2hat(i1)^-1*muhat(i1)-nu_i - nutilde(i1);
nutilde(i1) = nutilde(i1) + df*deltanutilde;
% Update the parameters
Dblin = Dbl(:,in);
Dbl = Dbl - deltatautilde ./ (1+deltatautilde.*dn) * Dblin*Dblin';
%Dbl = inv(inv(Dbl) + diag(tautilde(bl_ind)));
P(bl_ind,:) = P(bl_ind,:) - ((deltatautilde ./ (1+deltatautilde.*dn)).* Dblin)*pn';
updfact = deltatautilde./(1 + deltatautilde.*Ann);
if updfact > 0
RtRpnU = R'*(R*pn).*sqrt(updfact);
R = cholupdate(R, RtRpnU, '-');
elseif updfact < 0
RtRpnU = R'*(R*pn).*sqrt(abs(updfact));
R = cholupdate(R, RtRpnU, '+');
end
eta(bl_ind) = eta(bl_ind) + (deltanutilde - deltatautilde.*eta(i1))./(1+deltatautilde.*dn).*Dblin;
gamma = gamma + (deltanutilde - deltatautilde.*mf(i1))./(1+deltatautilde.*dn) * (R'*(R*pn));
%mf = eta + P*gamma;
D{bl} = Dbl;
% Store cavity parameters
muvec_i(i1,1)=mu_i;
sigm2vec_i(i1,1)=sigm2_i;
end
end
end
% Recompute the approximate posterior parameters
% parallel- and sequential-EP
temp2 = zeros(size(R0P0t));
Stildesqroot=sqrt(tautilde);
for i=1:length(ind)
sdtautilde = diag(Stildesqroot(ind{i}));
Dhat = sdtautilde*Labl{i}*sdtautilde + eye(size(Labl{i}));
[Ldhat{i}, notpositivedefinite] = chol(Dhat);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
D{i} = Labl{i} - Labl{i}*sdtautilde*(Ldhat{i}\(Ldhat{i}'\sdtautilde*Labl{i}));
P(ind{i},:) = D{i}*(Labl{i}\K_fu(ind{i},:));
temp2(:,ind{i}) = R0P0t(:,ind{i})*sdtautilde/Dhat*sdtautilde;
eta(ind{i}) = D{i}*nutilde(ind{i});
end
R = chol(inv(eye(size(R0)) + temp2*R0P0t')) * R0;
gamma = R'*(R*(P'*nutilde));
mf = eta + P*gamma;
% Compute the marginal likelihood, see FULL model for
% details about equations
%
% First some helper parameters
for i = 1:length(ind)
Lhat(ind{i},:) = D{i}*L(ind{i},:);
end
H = I-L'*Lhat;
B = H\L';
% Compute the marginal likelihood, see FULL model for
% details about equations
term41 = 0; term52 = 0;
for i=1:length(ind)
Bhat(:,ind{i}) = B(:,ind{i})*D{i};
SsqrtKfu(ind{i},:) = bsxfun(@times,K_fu(ind{i},:),Stildesqroot(ind{i}));
%SsqrtKfu(ind{i},:) = gtimes(K_fu(ind{i},:),Stildesqroot(ind{i}));
iDSsqrtKfu(ind{i},:) = Ldhat{i}\(Ldhat{i}'\SsqrtKfu(ind{i},:));
term41 = term41 + sum(log(diag(Ldhat{i})));
term52 = term52 + nutilde(ind{i})'*(D{i}*nutilde(ind{i}));
end
AA = K_uu + SsqrtKfu'*iDSsqrtKfu; AA = (AA+AA')/2;
[AA, notpositivedefinite] = chol(AA,'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
term41 = term41 - 0.5*sum(log(1+tautilde.*sigm2vec_i)) - sum(log(diag(Luu))) + sum(log(diag(AA)));
% 5. term (1/2 element) & 2. term
T=1./sigm2vec_i;
term52 = -0.5*( term52 + (nutilde'*Lhat)*(Bhat*nutilde) - (nutilde./(T+tautilde))'*nutilde);
% 5. term (2/2 element)
term5 = - 0.5*muvec_i'.*(T./(tautilde+T))'*(tautilde.*muvec_i-2*nutilde);
% 3. term
term3 = -sum(logM0);
logZep = term41+term52+term5+term3;
iter=iter+1;
convergence=max(abs(logM0_old-logM0))<tol && abs(logZep_old-logZep)<tol;
end
edata = logZep;
b = zeros(1,n);
for i=1:length(ind)
b(ind{i}) = nutilde(ind{i})'*D{i};
La2{i} = inv(diag(Stildesqroot(ind{i}))*(Ldhat{i}\(Ldhat{i}'\diag(Stildesqroot(ind{i})))));
end
b = nutilde' - ((b + (nutilde'*Lhat)*Bhat).*tautilde');
L = (repmat(Stildesqroot,1,m).*iDSsqrtKfu)/AA';
% ============================================================
% CS+FIC
% ============================================================
case 'CS+FIC'
u = gp.X_u;
m = length(u);
cf_orig = gp.cf;
cf1 = {};
cf2 = {};
j = 1;
k = 1;
for i = 1:ncf
if ~isfield(gp.cf{i},'cs')
cf1{j} = gp.cf{i};
j = j + 1;
else
cf2{k} = gp.cf{i};
k = k + 1;
end
end
gp.cf = cf1;
% First evaluate needed covariance matrices
% v defines that parameter is a vector
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % f x 1 vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
[Luu, notpositivedefinite] = chol(K_uu, 'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
B=Luu\(K_fu'); % u x f
Qv_ff=sum(B.^2)';
Lav = Cv_ff-Qv_ff; % f x 1, Vector of diagonal elements
gp.cf = cf2;
K_cs = gp_trcov(gp,x);
La = sparse(1:n,1:n,Lav,n,n) + K_cs;
gp.cf = cf_orig;
% clear unnecessary variables
clear K_cs; clear Qv_ff; clear Kv_ff; clear Cv_ff; clear Lav;
% Find fill reducing permutation and permute all the
% matrices
p = analyze(La);
r(p) = 1:n;
if ~isempty(z)
z = z(p,:);
end
y = y(p);
La = La(p,p);
K_fu = K_fu(p,:);
[VD, notpositivedefinite] = ldlchol(La);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
iLaKfu = ldlsolve(VD,K_fu);
A = K_uu+K_fu'*iLaKfu; A = (A+A')./2; % Ensure symmetry
[A, notpositivedefinite] = chol(A);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
L = iLaKfu/A;
I = eye(size(K_uu));
Inn = sparse(1:n,1:n,1,n,n);
sqrtS = sparse(1:n,1:n,0,n,n);
[R0, notpositivedefinite] = chol(inv(K_uu));
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
R = R0;
P = K_fu;
R0P0t = R0*K_fu';
mf = zeros(size(y));
eta = zeros(size(y));
gamma = zeros(size(K_uu,1),1);
Ann=0;
LasqrtS = La*sqrtS;
[VD, notpositivedefinite] = ldlchol(Inn);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% The EP -algorithm
convergence=false;
while iter<=maxiter && ~convergence
logZep_old=logZep;
logM0_old=logM0;
if isequal(gp.latent_opt.parallel,'on')
% parallel-EP
% approximate cavity parameters
tttt = ldlsolve(VD,ssmult(sqrtS,La));
D_vec = full(diag(La) - sum(LasqrtS'.*tttt)');
Ann = D_vec+sum((P*R').^2,2);
mf = eta + sum(bsxfun(@times,P,gamma'),2);
tau = 1./Ann-tautilde;
nu = 1./Ann.*mf-nutilde;
muvec_i=nu./tau;
sigm2vec_i= 1./tau;
% compute moments of tilted distributions
for i1=1:n
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2vec_i(i1), muvec_i(i1), z);
end
if any(isnan(logM0))
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% update site parameters
deltatautilde=1./sigm2hat-tau-tautilde;
tautilde=tautilde+df*deltatautilde;
deltanutilde=1./sigm2hat.*muhat-nu-nutilde;
nutilde=nutilde+df*deltanutilde;
else
% sequential-EP
muvec_i = zeros(n,1); sigm2vec_i = zeros(n,1);
for i1=1:n
% approximate cavity parameters
tttt = ldlsolve(VD,ssmult(sqrtS,La(:,i1)));
Di1 = La(:,i1) - ssmult(LasqrtS,tttt);
dn = Di1(i1);
pn = P(i1,:)';
Ann = dn + sum((R*pn).^2);
tau_i = Ann^-1-tautilde(i1);
mf(i1) = eta(i1) + pn'*gamma;
nu_i = Ann^-1*mf(i1)-nutilde(i1);
mu_i=nu_i/tau_i;
sigm2_i= tau_i^-1; % 1./tau_i; %
% marginal moments
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2_i, mu_i, z);
% update site parameters
deltatautilde = sigm2hat(i1)^-1-tau_i-tautilde(i1);
tautilde(i1) = tautilde(i1)+df*deltatautilde;
deltanutilde = sigm2hat(i1)^-1*muhat(i1)-nu_i - nutilde(i1);
nutilde(i1) = nutilde(i1) + df*deltanutilde;
% Update the parameters
P = P - ((deltatautilde ./ (1+deltatautilde.*dn)).* Di1)*pn';
updfact = deltatautilde./(1 + deltatautilde.*Ann);
if updfact > 0
RtRpnU = R'*(R*pn).*sqrt(updfact);
R = cholupdate(R, RtRpnU, '-');
elseif updfact < 0
RtRpnU = R'*(R*pn).*sqrt(abs(updfact));
R = cholupdate(R, RtRpnU, '+');
end
eta = eta + (deltanutilde - deltatautilde.*eta(i1))./(1+deltatautilde.*dn).*Di1;
gamma = gamma + (deltanutilde - deltatautilde.*mf(i1))./(1+deltatautilde.*dn) * (R'*(R*pn));
% Store cavity parameters
muvec_i(i1,1)=mu_i;
sigm2vec_i(i1,1)=sigm2_i;
D2_o = ssmult(sqrtS,LasqrtS(:,i1)) + Inn(:,i1);
sqrtS(i1,i1) = sqrt(tautilde(i1));
LasqrtS(:,i1) = La(:,i1).*sqrtS(i1,i1);
D2_n = ssmult(sqrtS,LasqrtS(:,i1)) + Inn(:,i1);
if tautilde(i1) - deltatautilde == 0
VD = ldlrowupdate(i1,VD,VD(:,i1),'-');
VD = ldlrowupdate(i1,VD,D2_n,'+');
else
VD = ldlrowmodify(VD, D2_n, i1);
end
end
end
% Recompute the approximate posterior parameters
% parallel- and sequential-EP
sqrtS = sparse(1:n,1:n,sqrt(tautilde),n,n);
sqrtSLa = ssmult(sqrtS,La);
D2 = ssmult(sqrtSLa,sqrtS) + Inn;
LasqrtS = ssmult(La,sqrtS);
[VD, notpositivedefinite] = ldlchol(D2);
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
SsqrtKfu = sqrtS*K_fu;
iDSsqrtKfu = ldlsolve(VD,SsqrtKfu);
P = K_fu - sqrtSLa'*iDSsqrtKfu;
R = chol(inv( eye(size(R0)) + R0P0t*sqrtS*ldlsolve(VD,sqrtS*R0P0t'))) * R0;
eta = La*nutilde - sqrtSLa'*ldlsolve(VD,sqrtSLa*nutilde);
gamma = R'*(R*(P'*nutilde));
mf = eta + P*gamma;
% Compute the marginal likelihood,
Lhat = La*L - sqrtSLa'*ldlsolve(VD,sqrtSLa*L);
H = I-L'*Lhat;
B = H\L';
Bhat = B*La - ldlsolve(VD,sqrtSLa*B')'*sqrtSLa;
% 4. term & 1. term
AA = K_uu + SsqrtKfu'*iDSsqrtKfu; AA = (AA+AA')/2;
[AA, notpositivedefinite] = chol(AA,'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
term41 = - 0.5*sum(log(1+tautilde.*sigm2vec_i)) - sum(log(diag(Luu))) + sum(log(diag(AA))) + 0.5*sum(log(diag(VD)));
% 5. term (1/2 element) & 2. term
T=1./sigm2vec_i;
term52 = -0.5*( nutilde'*(eta) + (nutilde'*Lhat)*(Bhat*nutilde) - (nutilde./(T+tautilde))'*nutilde);
% 5. term (2/2 element)
term5 = - 0.5*muvec_i'.*(T./(tautilde+T))'*(tautilde.*muvec_i-2*nutilde);
% 3. term
term3 = -sum(logM0);
logZep = term41+term52+term5+term3;
iter=iter+1;
convergence=max(abs(logM0_old-logM0))<tol && abs(logZep_old-logZep)<tol;
end
edata = logZep;
% b' = (K_fu/K_uu*K_fu' + La + diag(1./tautilde)) \ (tautilde.\nutilde)
% L = S*Kfu * (Lav + 1./S)^(-1) / chol(K_uu + SsqrtKfu'*(Lav + 1./S)^(-1)*SsqrtKfu)
% La2 = D./S = Lav + 1./S,
%
% The way evaluations are done is numerically more stable than with inversion of S (tautilde)
% See equations (3.71) and (3.72) in Rasmussen and Williams (2006)
b = nutilde' - ((eta' + (nutilde'*Lhat)*Bhat).*tautilde');
L = (sqrtS*iDSsqrtKfu)/AA';
La2 = sqrtS\D2/sqrtS;
% Reorder all the returned and stored values
b = b(r);
L = L(r,:);
La2 = La2(r,r);
D = La(r,r);
nutilde = nutilde(r);
tautilde = tautilde(r);
logM0 = logM0(r);
muvec_i = muvec_i(r);
sigm2vec_i = sigm2vec_i(r);
mf = mf(r);
P = P(r,:);
y = y(r);
if ~isempty(z)
z = z(r,:);
end
% ============================================================
% DTC,VAR
% ============================================================
case {'DTC' 'VAR' 'SOR'}
% First evaluate needed covariance matrices
% v defines that parameter is a vector
u = gp.X_u;
m = size(u,1);
% First evaluate needed covariance matrices
% v defines that parameter is a vector
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % f x 1 vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
[Luu, notpositivedefinite] = chol(K_uu, 'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
% Here we need only the diag(Q_ff), which is evaluated below
B=Luu\(K_fu'); % u x f
Phi = B';
m = size(Phi,2);
R = eye(m,m);
P = Phi;
mf = zeros(size(y));
gamma = zeros(m,1);
Ann=0;
% The EP -algorithm
convergence=false;
while iter<=maxiter && ~convergence
logZep_old=logZep;
logM0_old=logM0;
if isequal(gp.latent_opt.parallel,'on')
% parallel-EP
% approximate cavity parameters
Ann = sum((P*R').^2,2);
mf = sum(bsxfun(@times,Phi,gamma'),2);%phi'*gamma;
tau = 1./Ann-tautilde;
nu = 1./Ann.*mf-nutilde;
muvec_i=nu./tau;
sigm2vec_i= 1./tau;
% compute moments of tilted distributions
for i1=1:n
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2vec_i(i1), muvec_i(i1), z);
end
if any(isnan(logM0))
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
% update site parameters
deltatautilde=1./sigm2hat-tau-tautilde;
tautilde=tautilde+df*deltatautilde;
deltanutilde=1./sigm2hat.*muhat-nu-nutilde;
nutilde=nutilde+df*deltanutilde;
else
% sequential-EP
muvec_i = zeros(n,1); sigm2vec_i = zeros(n,1);
for i1=1:n
% approximate cavity parameters
phi = Phi(i1,:)';
Ann = sum((R*phi).^2);
tau_i = Ann^-1-tautilde(i1);
mf(i1) = phi'*gamma;
nu_i = Ann^-1*mf(i1)-nutilde(i1);
mu_i=nu_i/tau_i;
sigm2_i=tau_i^-1;
% marginal moments
[logM0(i1), muhat(i1), sigm2hat(i1)] = gp.lik.fh.tiltedMoments(gp.lik, y, i1, sigm2_i, mu_i, z);
% update site parameters
deltatautilde = sigm2hat(i1)^-1-tau_i-tautilde(i1);
tautilde(i1) = tautilde(i1)+df*deltatautilde;
deltanutilde = sigm2hat(i1)^-1*muhat(i1)-nu_i - nutilde(i1);
nutilde(i1) = nutilde(i1) + df*deltanutilde;
% Update the parameters
lnn = sum((R*phi).^2);
updfact = deltatautilde/(1 + deltatautilde*lnn);
if updfact > 0
RtLphiU = R'*(R*phi).*sqrt(updfact);
R = cholupdate(R, RtLphiU, '-');
elseif updfact < 0
RtLphiU = R'*(R*phi).*sqrt(updfact);
R = cholupdate(R, RtLphiU, '+');
end
gamma = gamma - R'*(R*phi)*(deltatautilde*mf(i1)-deltanutilde);
% Store cavity parameters
muvec_i(i1,1)=mu_i;
sigm2vec_i(i1,1)=sigm2_i;
end
end
% Recompute the approximate posterior parameters
% parallel- and sequential-EP
R = chol(inv(eye(m,m) + Phi'*(repmat(tautilde,1,m).*Phi)));
gamma = R'*(R*(Phi'*nutilde));
mf = Phi*gamma;
% Compute the marginal likelihood, see FULL model for
% details about equations
% 4. term & 1. term
Stildesqroot=sqrt(tautilde);
SsqrtPhi = Phi.*repmat(Stildesqroot,1,m);
AA = eye(m,m) + SsqrtPhi'*SsqrtPhi; AA = (AA+AA')/2;
[AA, notpositivedefinite] = chol(AA,'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
term41 = - 0.5*sum(log(1+tautilde.*sigm2vec_i)) + sum(log(diag(AA)));
% 5. term (1/2 element) & 2. term
T=1./sigm2vec_i;
bb = nutilde'*Phi;
bb2 = bb*SsqrtPhi';
bb3 = bb2*SsqrtPhi/AA';
term52 = -0.5*( bb*bb' - bb2*bb2' + bb3*bb3' - (nutilde./(T+tautilde))'*nutilde);
% 5. term (2/2 element)
term5 = - 0.5*muvec_i'.*(T./(tautilde+T))'*(tautilde.*muvec_i-2*nutilde);
% 3. term
term3 = -sum(logM0);
logZep = term41+term52+term5+term3;
iter=iter+1;
convergence=max(abs(logM0_old-logM0))<tol && abs(logZep_old-logZep)<tol;
end
edata = logZep;
%L = iLaKfu;
if strcmp(gp.type,'VAR')
Qv_ff = sum(B.^2)';
edata = edata + 0.5*sum((Kv_ff-Qv_ff).*tautilde);
end
temp = Phi*(SsqrtPhi'*(SsqrtPhi*bb'));
% b = Phi*bb' - temp + Phi*(SsqrtPhi'*(SsqrtPhi*(AA'\(AA\temp))));
b = nutilde - bb2'.*Stildesqroot + repmat(tautilde,1,m).*Phi*(AA'\bb3');
b = b';
% StildeKfu = zeros(size(K_fu)); % f x u,
% for i=1:n
% StildeKfu(i,:) = K_fu(i,:).*tautilde(i); % f x u
% end
% A = K_uu+K_fu'*StildeKfu; A = (A+A')./2; % Ensure symmetry
% A = chol(A);
% L = StildeKfu/A;
L = repmat(tautilde,1,m).*Phi/AA';
%L = repmat(tautilde,1,m).*K_fu/AA';
mu=nutilde./tautilde;
%b = nutilde - mu'*L*L'*mu;
%b=b';
La2 = 1./tautilde;
D = 0;
otherwise
error('Unknown type of Gaussian process!')
end
% ==================================================
% Evaluate the prior contribution to the error from
% covariance functions and likelihood
% ==================================================
% Evaluate the prior contribution to the error from covariance
% functions
eprior = 0;
for i=1:ncf
gpcf = gp.cf{i};
eprior = eprior - gpcf.fh.lp(gpcf);
end
% Evaluate the prior contribution to the error from likelihood
% functions
if isfield(gp.lik, 'p')
lik = gp.lik;
eprior = eprior - lik.fh.lp(lik);
end
% The last things to do
if isfield(gp.latent_opt, 'display') && ismember(gp.latent_opt.display,{'final','iter'})
fprintf('GPEP_E: Number of iterations in EP: %d \n', iter-1)
end
e = edata + eprior;
logZ_i = logM0(:);
eta = [];
% store values to the cache
ch.w = w;
ch.e = e;
ch.edata = edata;
ch.eprior = eprior;
ch.tautilde = tautilde;
ch.nutilde = nutilde;
ch.L = L;
ch.La2 = La2;
ch.b = b;
ch.muvec_i = muvec_i;
ch.sigm2vec_i = sigm2vec_i;
ch.logZ_i = logZ_i;
ch.eta = eta;
ch.datahash=datahash;
global iter_lkm
iter_lkm=iter;
end
case 'robust-EP'
% function [e,edata,eprior,tau_q,nu_q,L, La2, b, muvec_i,sigm2vec_i,Z_i, eta] = ep_algorithm2(w,gp,x,y,z)
%
% if strcmp(w, 'clearcache')
% ch=[];
% return
% end
% check whether saved values can be used
if isempty(z)
datahash=hash_sha512([x y]);
else
datahash=hash_sha512([x y z]);
end
if ~isempty(ch) && all(size(w)==size(ch.w)) && all(abs(w-ch.w) < 1e-8) && isequal(datahash,ch.datahash)
% The covariance function parameters haven't changed so just
% return the Energy and the site parameters that are saved
e = ch.e;
edata = ch.edata;
eprior = ch.eprior;
L = ch.L;
La2 = ch.La2;
b = ch.b;
nutilde = ch.nu_q;
tautilde = ch.tau_q;
eta = ch.eta;
muvec_i = ch.muvec_i;
sigm2vec_i = ch.sigm2vec_i;
logZ_i = ch.logZ_i;
else
% The parameters or data have changed since
% the last call for gpep_e. In this case we need to
% re-evaluate the EP approximation
% preparations
ninit=gp.latent_opt.ninit; % max number of initial parallel iterations
maxiter=gp.latent_opt.maxiter; % max number of double-loop iterations
max_ninner=gp.latent_opt.max_ninner; % max number of inner loop iterations in the double-loop algorithm
tolStop=gp.latent_opt.tolStop; % converge tolerance
tolUpdate=gp.latent_opt.tolUpdate; % tolerance for the EP site updates
tolInner=gp.latent_opt.tolInner; % inner loop energy tolerance
tolGrad=gp.latent_opt.tolGrad; % minimum gradient (g) decrease in the search direction, abs(g_new)<tolGrad*abs(g)
Vc_lim=gp.latent_opt.cavity_var_lim; % limit for the cavity variance Vc, Vc < Vc_lim*diag(K)
df0=gp.latent_opt.df; % the intial damping factor
eta1=gp.latent_opt.eta; % the initial fraction parameter
eta2=gp.latent_opt.eta2; % the secondary fraction parameter
display=gp.latent_opt.display; % control the display
gp=gp_unpak(gp,w);
likelih=gp.lik;
ncf = length(gp.cf);
n=length(y);
pvis=0;
eta=repmat(eta1,n,1); % the initial vector of fraction parameters
fh_tm=@(si,m_c,V_c,eta) likelih.fh.tiltedMoments2(likelih,y,si,V_c,m_c,z,eta);
switch gp.type
case 'FULL'
% prior covariance
K = gp_trcov(gp, x);
case 'FIC'
% Sparse
u = gp.X_u;
m = size(u,1);
K_uu = gp_trcov(gp,u);
K_uu = (K_uu + K_uu')./2;
K_fu = gp_cov(gp,x,u);
[Kv_ff, Cv_ff] = gp_trvar(gp,x);
[Luu, notpositivedefinite] = chol(K_uu, 'lower');
if notpositivedefinite
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
B=Luu\(K_fu');
Qv_ff=sum(B.^2)';
Sf = [];
Sf2 = [];
L2 = [];
end
% prior (zero) initialization
[nu_q,tau_q]=deal(zeros(n,1));
% initialize the q-distribution (the multivariate Gaussian posterior approximation)
switch gp.type
case 'FULL'
[mf,Sf,lnZ_q]=evaluate_q(nu_q,tau_q,K,display);
Vf = diag(Sf);
case 'FIC'
[mf,Vf,lnZ_q]=evaluate_q2(nu_q,tau_q,Luu, K_fu, Kv_ff, Qv_ff, display);
otherwise
error('Robust-EP not implemented for this type of GP!');
end
% initialize the surrogate distribution (the independent Gaussian marginal approximations)
nu_s=mf./Vf;
tau_s=1./Vf;
lnZ_s=0.5*sum( (-log(tau_s) +nu_s.^2 ./tau_s)./eta ); % minus 0.5*log(2*pi)./eta
% initialize r-distribution (the tilted distributions)
[lnZ_r,lnZ_i,m_r,V_r]=evaluate_r(nu_q,tau_q,eta,fh_tm,nu_s,tau_s,display);
% initial energy (lnZ_ep)
e = lnZ_q + lnZ_r -lnZ_s;
if ismember(display,{'final','iter'})
fprintf('\nInitial energy: e=%.4f, hyperparameters:\n',e)
fprintf('Cov:%s \n',sprintf(' %.2g,',gp_pak(gp,'covariance')))
fprintf('Lik:%s \n',sprintf(' %.2g,',gp_pak(gp,'likelihood')))
end
if isfinite(e) % do not run the algorithm if the prior energy is not defined
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% initialize with ninit rounds of parallel EP
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% EP search direction
up_mode='ep'; % choose the moment matching
[dnu_q,dtau_q]=ep_update_dir(mf,Vf,m_r,V_r,eta,up_mode,tolUpdate);
convergence=false; % convergence indicator
df=df0; % initial damping factor
tol_m=zeros(1,2); % absolute moment tolerances
switch gp.type
case 'FULL'
tauc_min=1./(Vc_lim*diag(K)); % minimum cavity precision
case 'FIC'
tauc_min=1./(Vc_lim*Cv_ff);
end
% Adjust damping by setting an upper limit (Vf_mult) to the increase
% of the marginal variance
Vf_mult=2;
i1=0;
while i1<ninit
i1=i1+1;
%%%%%%%%%%%%%%%%%%%
% the damped update
dfi=df(ones(n,1));
temp=(1/Vf_mult-1)./Vf;
ii2=df*dtau_q<temp;
if any(ii2)
dfi(ii2)=temp(ii2)./dtau_q(ii2);
end
% proposal site parameters
nu_q2=nu_q+dfi.*dnu_q;
tau_q2=tau_q+dfi.*dtau_q;
%%%%%%%%%%%%%%%%%%%%%%%%%%%
% a proposal q-distribution
switch gp.type
case 'FULL'
[mf2,Sf2,lnZ_q2,L1,L2]=evaluate_q(nu_q2,tau_q2,K,display);
Vf2 = diag(Sf2);
case 'FIC'
[mf2,Vf2,lnZ_q2,L1,L2]=evaluate_q2(nu_q2,tau_q2,Luu, K_fu, Kv_ff, Qv_ff, display);
otherwise
error('Robust-EP not implemented for this type of GP!');
end
% check that the new cavity variances do not exceed the limit
tau_s2=1./Vf2;
pcavity=all( (tau_s2-eta.*tau_q2 )>=tauc_min);
if isempty(L2) || ~pcavity
% In case of too small cavity precisions, half the step size
df=df*0.5;
if df<0.1,
% If mediocre damping is not sufficient, proceed to
% the double-loop algorithm
break
else
if ismember(display,{'iter'})
fprintf('%d, e=%.6f, dm=%.4f, dV=%.4f, increasing damping to df=%g.\n',i1,e,tol_m(1),tol_m(2),df)
end
continue
end
end
% a proposal surrogate distribution
nu_s2=mf2./Vf2;
lnZ_s2=0.5*sum( (-log(tau_s2) +nu_s2.^2 ./tau_s2)./eta );
%%%%%%%%%%%%%%%%%%%%%%%%%%%
% a proposal r-distribution
[lnZ_r2,lnZ_i2,m_r2,V_r2,p]=evaluate_r(nu_q2,tau_q2,eta,fh_tm,nu_s2,tau_s2,display);
% the new energy
e2 = lnZ_q2 + lnZ_r2 -lnZ_s2;
% check that the energy is defined and that the tilted moments are proper
if ~all(p) || ~isfinite(e2)
df=df*0.5;
if df<0.1,
break
else
if ismember(display,{'iter'})
fprintf('%d, e=%.6f, dm=%.4f, dV=%.4f, increasing damping to df=%g.\n',i1,e,tol_m(1),tol_m(2),df)
end
continue
end
end
% accept the new state
[nu_q,tau_q,mf,Vf,Sf,lnZ_q]=deal(nu_q2,tau_q2,mf2,Vf2,Sf2,lnZ_q2);
[lnZ_r,lnZ_i,m_r,V_r,lnZ_s,nu_s,tau_s]=deal(lnZ_r2,lnZ_i2,m_r2,V_r2,lnZ_s2,nu_s2,tau_s2);
% EP search direction (moment matching)
[dnu_q,dtau_q]=ep_update_dir(mf,Vf,m_r,V_r,eta,up_mode,tolUpdate);
% Check for convergence
% the difference between the marginal moments
% Vf=diag(Sf);
tol_m=[abs(mf-m_r) abs(Vf-V_r)];
% measure the convergence by the moment difference
convergence=all(tol_m(:,1)<tolStop*abs(mf)) && all(tol_m(:,2)<tolStop*abs(Vf));
% measure the convergence by the change of energy
%convergence=abs(e2-e)<tolStop;
tol_m=max(tol_m);
e=e2;
if ismember(display,{'iter'})
fprintf('%d, e=%.6f, dm=%.4f, dV=%.4f, df=%g.\n',i1,e,tol_m(1),tol_m(2),df)
end
if convergence
if ismember(display,{'final','iter'})
fprintf('Convergence with parallel EP, iter %d, e=%.6f, dm=%.4f, dV=%.4f, df=%g.\n',i1,e,tol_m(1),tol_m(2),df)
end
break
end
end
end % end of initial rounds of parallel EP
if isfinite(e) && ~convergence
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% if no convergence with the parallel EP
% start double-loop iterations
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
up_mode=gp.latent_opt.up_mode; % update mode in double-loop iterations
%up_mode='ep'; % choose the moment matching
%up_mode='grad'; % choose the gradients
df_lim=gp.latent_opt.df_lim; % step size limit (1 suitable for ep updates)
tol_e=inf; % the energy difference for measuring convergence (tol_e < tolStop)
ninner=0; % counter for the inner loop iterations
df=df0; % initial step size (damping factor)
% the intial gradient in the search direction
g = sum( (mf -m_r).*dnu_q ) +0.5*sum( (V_r +m_r.^2 -Vf -mf.^2).*dtau_q );
sdir_reset=false;
rec_sadj=[0 e g]; % record for step size adjustment
for i1=1:maxiter
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% calculate a new proposal state
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Limit the step size separately for each site so that the cavity variances
% do not exceed the upper limit (this will change the search direction)
% this should not happen after step size adjustment
ii1=tau_s-eta.*(tau_q+df*dtau_q)<tauc_min;
if any(ii1)
%ii1=dtau_q>0; df1=min( ( (tau_s(ii1)-tauc_min(ii1))./eta(ii1)-tau_q(ii1) )./dtau_q(ii1)/df ,1);
df1=( (tau_s(ii1)-tauc_min(ii1))./eta(ii1) -tau_q(ii1) )./dtau_q(ii1)/df;
dnu_q(ii1)=dnu_q(ii1).*df1;
dtau_q(ii1)=dtau_q(ii1).*df1;
% the intial gradient in the search direction
g = sum( (mf -m_r).*dnu_q ) +0.5*sum( (V_r +m_r.^2 -Vf -mf.^2).*dtau_q );
% re-init the step size adjustment record
rec_sadj=[0 e g];
end
% proposal
nu_q2=nu_q+df*dnu_q;
tau_q2=tau_q+df*dtau_q;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% energy for the proposal state
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% update the q-distribution
% [mf2,Sf2,lnZ_q2,L1,L2]=evaluate_q(nu_q2,tau_q2,K,display,K_uu, K_fu, Kv_ff, Qv_ff);
switch gp.type
case 'FULL'
[mf2,Sf2,lnZ_q2,L1,L2]=evaluate_q(nu_q2,tau_q2,K,display);
Vf2 = diag(Sf2);
case 'FIC'
[mf2,Vf2,lnZ_q2,L1,L2]=evaluate_q2(nu_q2,tau_q2,Luu, K_fu, Kv_ff, Qv_ff, display);
otherwise
error('Robust-EP not implemented for this type of GP!');
end
% check cavity
pcavity=all( (1./Vf2-eta.*tau_q2 )>=tauc_min);
g2=NaN;
if isempty(L2)
% the q-distribution not defined (the posterior covariance
% not positive definite)
e2=inf;
elseif pcavity
% the tilted distribution
[lnZ_r2,lnZ_i2,m_r2,V_r2]=evaluate_r(nu_q2,tau_q2,eta,fh_tm,nu_s,tau_s,display);
% the new energy
e2 = lnZ_q2 + lnZ_r2 -lnZ_s;
% gradients in the search direction
g2 = sum( (mf2 -m_r2).*dnu_q ) +0.5*sum( (V_r2 +m_r2.^2 -Vf2 -mf2.^2).*dtau_q );
if ismember(display,{'iter'})
% ratio of the gradients
fprintf('dg=%6.3f, ',min(abs(g2)/abs(g),99))
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% check if the energy decreases
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if ~isfinite(e2) || ( pcavity && g2>10*abs(g) )
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% ill-conditioned q-distribution or very large increase
% in the gradient
% => half the step size
df=df*0.5;
if ismember(display,{'iter'})
fprintf('decreasing step size, ')
end
elseif ~pcavity && ~pvis
% The cavity distributions resulting from the proposal distribution
% are not well defined, reset the site parameters by doing
% one parallel update with a zero initialization and continue
% with double loop iterations
if ismember(display,{'iter'})
fprintf('re-init the posterior due to ill-conditioned cavity distributions, ')
end
% Do resetting only once
pvis=1;
up_mode='ep';
nu_q=zeros(size(y));tau_q=zeros(size(y));
mf=zeros(size(y));
switch gp.type
case 'FULL'
Sf=K;Vf=diag(K);
case 'FIC'
Vf=Cv_ff;
end
nu_s=mf./Vf;
tau_s=1./Vf;
% lnZ_s=0.5*sum( (-log(tau_s) +nu_s.^2 ./tau_s)./eta ); % minus 0.5*log(2*pi)./eta
[lnZ_r,lnZ_i,m_r,V_r]=evaluate_r(nu_q,tau_q,eta,fh_tm,nu_s,tau_s,display);
% e = lnZ_q + lnZ_r -lnZ_s;
[dnu_q,dtau_q]=ep_update_dir(mf,Vf,m_r,V_r,eta,up_mode,tolUpdate);
%nu_q=dnu_q; tau_q=dtau_q;
nu_q=0.9.*dnu_q; tau_q=0.9.*dtau_q;
switch gp.type
case 'FULL'
[mf,Sf,lnZ_q]=evaluate_q(nu_q,tau_q,K,display);
Vf = diag(Sf);
case 'FIC'
[mf,Vf,lnZ_q]=evaluate_q2(nu_q,tau_q,Luu, K_fu, Kv_ff, Qv_ff, display);
otherwise
error('Robust-EP not implemented for this type of GP!');
end
nu_s=mf./Vf; tau_s=1./Vf;
lnZ_s=0.5*sum( (-log(tau_s) +nu_s.^2 ./tau_s)./eta ); % minus 0.5*log(2*pi)./eta
[lnZ_r,lnZ_i,m_r,V_r]=evaluate_r(nu_q,tau_q,eta,fh_tm,nu_s,tau_s,display);
e = lnZ_q + lnZ_r -lnZ_s;
[dnu_q,dtau_q]=ep_update_dir(mf,Vf,m_r,V_r,eta,up_mode,tolUpdate);
df=0.8;
g = sum( (mf -m_r).*dnu_q ) +0.5*sum( (V_r +m_r.^2 -Vf -mf.^2).*dtau_q );
rec_sadj=[0 e g];
elseif size(rec_sadj,1)<=1 && ( e2>e || abs(g2)>abs(g)*tolGrad )
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% no decrease in energy or the new gradient exceeds the
% pre-defined limit
% => adjust the step size
if ismember(display,{'iter'})
fprintf('adjusting step size, ')
end
% update the record for step size adjustment
ii1=find(df>rec_sadj(:,1),1,'last');
ii2=find(df<rec_sadj(:,1),1,'first');
rec_sadj=[rec_sadj(1:ii1,:); df e2 g2; rec_sadj(ii2:end,:)];
df_new=0;
if size(rec_sadj,1)>1
if exist('csape','file')==2
if g2>0
% adjust the step size with spline interpolation
pp=csape(rec_sadj(:,1)',[rec_sadj(1,3) rec_sadj(:,2)' rec_sadj(end,3)],[1 1]);
[tmp,df_new]=fnmin(pp,[0 df]);
elseif isfinite(g2)
% extrapolate with Hessian end-conditions
H=(rec_sadj(end,3)-rec_sadj(end-1,3))/(rec_sadj(end,1)-rec_sadj(end-1,1));
pp=csape(rec_sadj(:,1)',[rec_sadj(1,3) rec_sadj(:,2)' H],[1 2]);
% extrapolate at most by 100% at a time
[tmp,df_new]=fnmin(pp,[df df*1.5]);
end
else
% if curvefit toolbox does not exist, use a simple Hessian
% approximation
[tmp,ind]=sort(rec_sadj(:,2),'ascend');
ind=ind(1:2);
H=(rec_sadj(ind(1),3)-rec_sadj(ind(2),3))/(rec_sadj(ind(1),1)-rec_sadj(ind(2),1));
df_new=rec_sadj(ind(1),1) -rec_sadj(ind(1),3)/H;
if g2>0
% interpolate
df_new=max(min(df_new,df),0);
else
% extrapolate at most 100%
df_new=max(min(df_new,1.5*df),df);
end
end
df_new=min(df_new,df_lim);
end
if df_new==0
% the spline approxmation fails or no record of the previous gradients
if g2>0
df=df*0.9; % too long step since the gradient is positive
else
df=df*1.1; % too short step since the gradient is negative
end
else
df=df_new;
end
% prevent too small cavity-variances after the step-size adjustment
ii1=dtau_q>0;
if any(ii1)
df_max=min( ( (tau_s(ii1)-tauc_min(ii1)-1e-8)./eta(ii1) -tau_q(ii1) )./dtau_q(ii1) );
df=min(df,df_max);
end
elseif e2>e+tolInner || (abs(g2)>abs(g)*tolGrad && strcmp(up_mode,'ep'))
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% No decrease in energy despite the step size adjustments.
% In some difficult cases the EP search direction may not
% result in decrease of the energy or the gradient
% despite of the step size adjustment. One reason for this
% may be the parallel EP search direction
% => try the negative gradient as the search direction
%
% or if the problem persists
% => try resetting the search direction
if abs(g2)>abs(g)*tolGrad && strcmp(up_mode,'ep')
% try switching to gradient based updates
up_mode='grad';
df_lim=1e3;
df=0.1;
if ismember(display,{'iter'})
fprintf('switch to gradient updates, ')
end
elseif ~sdir_reset
if ismember(display,{'iter'})
fprintf('reset the search direction, ')
end
sdir_reset=true;
elseif g2<0 && abs(g2)<abs(g) && e2>e
if ismember(display,{'final','iter'})
fprintf('Unable to continue: gradients of the inner-loop objective are inconsistent\n')
end
break;
else
df=df*0.1;
end
% the new search direction
[dnu_q,dtau_q]=ep_update_dir(mf,Vf,m_r,V_r,eta,up_mode,tolUpdate);
% the initial gradient in the search direction
g = sum( (mf -m_r).*dnu_q ) +0.5*sum( (V_r +m_r.^2 -Vf -mf.^2).*dtau_q );
% re-init the step size adjustment record
rec_sadj=[0 e g];
else
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% decrease of energy => accept the new state
dInner=abs(e-e2); % the inner loop energy change
% accept the new site parameters (nu_q,tau_q)
[mf,Vf,Sf,nu_q,tau_q,lnZ_q]=deal(mf2,Vf2,Sf2,nu_q2,tau_q2,lnZ_q2);
% accept also the new tilted distributions
[lnZ_r,lnZ_i,m_r,V_r,e]=deal(lnZ_r2,lnZ_i2,m_r2,V_r2,e2);
% check that the new cavity variances are positive and not too large
tau_s2=1./Vf;
pcavity=all( (tau_s2-eta.*tau_q )>=tauc_min);
supdate=false;
if pcavity && (dInner<tolInner || ninner>=max_ninner)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% try to update the surrogate distribution on the condition that
% - the cavity variances are positive and not too large
% - the new tilted moments are proper
% - sufficient tolerance or the maximum number of inner
% loop updates is exceeded
% update the surrogate distribution
nu_s2=mf.*tau_s2;
lnZ_s2=0.5*sum( (-log(tau_s2) +nu_s2.^2 ./tau_s2)./eta );
% update the tilted distribution
[lnZ_r2,lnZ_i2,m_r2,V_r2]=evaluate_r(nu_q,tau_q,eta,fh_tm,nu_s2,tau_s2,display);
% evaluate the new energy
e2 = lnZ_q + lnZ_r2 -lnZ_s2;
if isfinite(e2)
% a successful surrogate update
supdate=true;
ninner=0; % reset the inner loop iteration counter
% update the convergence criteria
tol_e=abs(e2-e);
% accept the new state
[lnZ_r,lnZ_i,m_r,V_r,lnZ_s,nu_s,tau_s,e]=deal(lnZ_r2,lnZ_i2,m_r2,V_r2,lnZ_s2,nu_s2,tau_s2,e2);
if ismember(display,{'iter'})
fprintf('surrogate update, ')
end
else
% Improper tilted moments even though the cavity variances are
% positive. This is an indication of numerically unstable
% tilted moment integrations but fractional updates usually help
% => try switching to fractional updates
pcavity=false;
if ismember(display,{'iter'})
fprintf('surrogate update failed, ')
end
end
end
if all(eta==eta1) && ~pcavity && (dInner<tolInner || ninner>=max_ninner)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% If the inner loop moments (within tolerance) are matched
% but the new cavity variances are negative or the tilted moment
% integrations fail after the surrogate update
% => switch to fractional EP.
%
% This is a rare situation and most likely the
% hyperparameters are such that the approximating family
% is not flexible enough, i.e., the hyperparameters are
% unsuitable for the data.
%
% One can also try to reduce the lower limit for the
% cavity precisions tauc_min=1./(Vc_lim*diag(K)), i.e.
% increase the maximum cavity variance Vc_lim.
% try switching to fractional updates
eta=repmat(eta2,n,1);
% correct the surrogate normalization accordingly
% the surrogate distribution is not updated
lnZ_s2=0.5*sum( (-log(tau_s) +nu_s.^2 ./tau_s)./eta );
% update the tilted distribution
[lnZ_r2,lnZ_i2,m_r2,V_r2]=evaluate_r(nu_q,tau_q,eta,fh_tm,nu_s,tau_s,display);
% evaluate the new energy
e2 = lnZ_q + lnZ_r2 -lnZ_s2;
if isfinite(e2)
% successful switch to fractional energy
supdate=true;
pcavity=true;
ninner=0; % reset the inner loop iteration counter
% accept the new state
[lnZ_r,lnZ_i,m_r,V_r,lnZ_s,e]=deal(lnZ_r2,lnZ_i2,m_r2,V_r2,lnZ_s2,e2);
% start with ep search direction
up_mode='ep';
df_lim=0.9;
df=0.1;
if ismember(display,{'iter'})
fprintf('switching to fractional EP, ')
end
else
% Improper tilted moments even with fractional updates
% This is very unlikely to happen because decreasing the
% fraction parameter (eta2<eta1) stabilizes the
% tilted moment integrations
% revert back to the previous fraction parameter
eta=repmat(eta1,n,1);
if ismember(display,{'final','iter'})
fprintf('Unable to switch to the fractional EP, check that eta2<eta1\n')
end
break;
end
end
if all(eta==eta2) && ~pcavity && (dInner<tolInner || ninner>=10)
% Surrogate updates do not result into positive cavity variances
% even with fractional updates with eta2 => terminate iterations
if ismember(display,{'final','iter'})
fprintf('surrogate update failed with fractional updates, try decreasing eta2\n')
end
break
end
if ~supdate
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% no successful surrogate update, no sufficient tolerance,
% or the maximum number of inner loop updates is not yet exceeded
% => continue with the same surrogate distribution
ninner=ninner+1; % increase inner loop iteration counter
if ismember(display,{'iter'})
fprintf('inner-loop update, ')
end
end
% the new search direction
[dnu_q,dtau_q]=ep_update_dir(mf,Vf,m_r,V_r,eta,up_mode,tolUpdate);
% the initial gradient in the search direction
g = sum( (mf -m_r).*dnu_q ) +0.5*sum( (V_r +m_r.^2 -Vf -mf.^2).*dtau_q );
% re-init step size adjustment record
rec_sadj=[0 e g];
end
if ismember(display,{'iter'})
% maximum difference of the marginal moments
tol_m=[max(abs(mf-m_r)) max(abs(Vf-V_r))];
fprintf('%d, e=%.6f, dm=%.4f, dV=%.4f, df=%6f, eta=%.2f\n',i1,e,tol_m(1),tol_m(2),df,eta(1))
end
%%%%%%%%%%%%%%%%%%%%%%%
% check for convergence
convergence = tol_e<=tolStop;
if convergence
if ismember(display,{'final','iter'})
% maximum difference of the marginal moments
tol_m=[max(abs(mf-m_r)) max(abs(Vf-V_r))];
fprintf('Convergence, iter %d, e=%.6f, dm=%.4f, dV=%.4f, df=%6f, eta=%.2f\n',i1,e,tol_m(1),tol_m(2),df,eta(1))
end
break
end
end % end of the double-loop updates
end
% the current energy is not finite or no convergence
if ~isfinite(e)
fprintf('GPEP_E: Initial energy not defined, check the hyperparameters\n')
elseif ~convergence
fprintf('GPEP_E: No convergence, %d iter, e=%.6f, dm=%.4f, dV=%.4f, df=%6f, eta=%.2f\n',i1,e,tol_m(1),tol_m(2),df,eta(1))
fprintf('GPEP_E: Check the hyperparameters, increase maxiter and/or max_ninner, or decrease tolInner\n')
end
edata=-e; % the data contribution to the marginal posterior density
% =====================================================================================
% Evaluate the prior contribution to the error from covariance functions and likelihood
% =====================================================================================
% Evaluate the prior contribution to the error from covariance functions
eprior = 0;
for i=1:ncf
gpcf = gp.cf{i};
eprior = eprior - gpcf.fh.lp(gpcf);
% eprior = eprior - feval(gpcf.fh.lp, gpcf, x, y);
end
% Evaluate the prior contribution to the error from likelihood functions
if isfield(gp, 'lik') && isfield(gp.lik, 'p')
likelih = gp.lik;
eprior = eprior - likelih.fh.lp(likelih);
end
% the total energy
e = edata + eprior;
sigm2vec_i = 1./(tau_s-eta.*tau_q); % vector of cavity variances
muvec_i = (nu_s-eta.*nu_q).*sigm2vec_i; % vector of cavity means
logZ_i = lnZ_i; % vector of tilted normalization factors
% check that the posterior covariance is positive definite and
% calculate its Cholesky decomposition
switch gp.type
case 'FULL'
[L, notpositivedefinite] = chol(Sf);
b = [];
La2 = [];
if notpositivedefinite || ~isfinite(e)
[e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite();
return
end
case 'FIC'
La2 = Luu;
L = L2;
b = Kv_ff - Qv_ff;
end
nutilde = nu_q;
tautilde = tau_q;
% store values to the cache
ch.w = w;
ch.e = e;
ch.edata = edata;
ch.eprior = eprior;
ch.L = L;
ch.nu_q = nu_q;
ch.tau_q = tau_q;
ch.La2 = La2;
ch.b = b;
ch.eta = eta;
ch.logZ_i = logZ_i;
ch.sigm2vec_i = sigm2vec_i;
ch.muvec_i = muvec_i;
ch.datahash = datahash;
end
otherwise
error('Unknown optim method!');
end
end
function [e, edata, eprior, tautilde, nutilde, L, La2, b, muvec_i, sigm2vec_i, logZ_i, eta, ch] = set_output_for_notpositivedefinite()
% Instead of stopping to chol error, return NaN
e = NaN;
edata = NaN;
eprior = NaN;
tautilde = NaN;
nutilde = NaN;
L = NaN;
La2 = NaN;
b = NaN;
muvec_i = NaN;
sigm2vec_i = NaN;
logZ_i = NaN;
datahash = NaN;
eta = NaN;
w = NaN;
ch.e = e;
ch.edata = edata;
ch.eprior = eprior;
ch.tautilde = tautilde;
ch.nutilde = nutilde;
ch.L = L;
ch.La2 = La2;
ch.b = b;
ch.muvec_i = muvec_i;
ch.sigm2vec_i = sigm2vec_i;
ch.logZ_i = logZ_i;
ch.eta = eta;
ch.datahash=datahash;
ch.w = NaN;
end
end
function [m_q,S_q,lnZ_q,L1,L2]=evaluate_q(nu_q,tau_q,K,display)
% function for determining the parameters of the q-distribution
% when site variances tau_q may be negative
%
% q(f) = N(f|0,K)*exp( -0.5*f'*diag(tau_q)*f + nu_q'*f )/Z_q = N(f|m_q,S_q)
%
% S_q = inv(inv(K)+diag(tau_q))
% m_q = S_q*nu_q;
%
% det(eye(n)+K*diag(tau_q))) = det(L1)^2 * det(L2)^2
% where L1 and L2 are upper triangular
%
% see Expectation consistent approximate inference (Opper & Winther, 2005)
n=length(nu_q);
ii1=find(tau_q>0); n1=length(ii1); W1=sqrt(tau_q(ii1));
ii2=find(tau_q<0); n2=length(ii2); W2=sqrt(abs(tau_q(ii2)));
L=zeros(n);
S_q=K;
if ~isempty(ii1)
% Cholesky decomposition for the positive sites
L1=(W1*W1').*K(ii1,ii1);
L1(1:n1+1:end)=L1(1:n1+1:end)+1;
L1=chol(L1);
L(:,ii1) = bsxfun(@times,K(:,ii1),W1')/L1;
S_q=S_q-L(:,ii1)*L(:,ii1)';
else
L1=1;
end
if ~isempty(ii2)
% Cholesky decomposition for the negative sites
V=bsxfun(@times,K(ii2,ii1),W1')/L1;
L2=(W2*W2').*(V*V'-K(ii2,ii2));
L2(1:n2+1:end)=L2(1:n2+1:end)+1;
[L2,pd]=chol(L2);
if pd==0
L(:,ii2)=bsxfun(@times,K(:,ii2),W2')/L2 -L(:,ii1)*(bsxfun(@times,V,W2)'/L2);
S_q=S_q+L(:,ii2)*L(:,ii2)';
else
L2=[];
if ismember(display,{'iter'})
fprintf('Negative definite q-distribution.\n')
end
end
else
L2=1;
end
%V_q=diag(S_q);
m_q=S_q*nu_q;
% log normalization
lnZ_q = -sum(log(diag(L1))) -sum(log(diag(L2))) +0.5*sum(m_q.*nu_q);
end
function [m_q,S_q,lnZ_q,L1,L2]=evaluate_q2(nu_q,tau_q,LK_uu, K_fu, Kv_ff, Qv_ff, display)
% function for determining the parameters of the q-distribution
% when site variances tau_q may be negative
%
% q(f) = N(f|0,K)*exp( -0.5*f'*diag(tau_q)*f + nu_q'*f )/Z_q = N(f|m_q,S_q)
%
% S_q = inv(inv(K)+diag(tau_q)) where K is sparse approximation for prior
% covariance
% m_q = S_q*nu_q;
%
% det(eye(n)+K*diag(tau_q))) = det(L1)^2 * det(L2)^2
% where L1 and L2 are upper triangular
%
% see Expectation consistent approximate inference (Opper & Winther, 2005)
n=length(nu_q);
S_q = Kv_ff;
m_q = nu_q;
D = Kv_ff - Qv_ff;
L1 = sqrt(1 + D.*tau_q);
L = [];
if any(~isreal(L1))
if ismember(display,{'iter'})
fprintf('Negative definite q-distribution.\n')
end
else
U = K_fu;
WDtilde = tau_q./(1+tau_q.*D);
% Evaluate diagonal of S_q
ii1=find(WDtilde>0); n1=length(ii1); W1=sqrt(WDtilde(ii1)); % WS^-1
ii2=find(WDtilde<0); n2=length(ii2); W2=sqrt(abs(WDtilde(ii2))); % WS^-1
if ~isempty(ii2) || ~isempty(ii1)
if ~isempty(ii1)
UWS(:,ii1) = bsxfun(@times, U(ii1,:)', W1');
end
if ~isempty(ii2)
UWS(:,ii2) = bsxfun(@times, U(ii2,:)', W2');
end
[L, p] = chol(LK_uu*LK_uu' + UWS(:,ii1)*UWS(:,ii1)' - UWS(:,ii2)*UWS(:,ii2)', 'lower');
if p~=0
L=[];
if ismember(display,{'iter'})
fprintf('Negative definite q-distribution.\n')
end
else
S = 1 + D.*tau_q;
% S_q = diag(D./S) + diag(1./S)*U*inv(L*L')*U'*diag(1./S);
S_q = D./S + sum((bsxfun(@times, 1./S, U)/L').^2,2);
m_q = D.*nu_q./S + (U*(L'\(L\(U'*(nu_q./S)))))./S;
end
else
end
% end
end
% log normalization
L2 = L;
lnZ_q = -0.5*sum(log(L1.^2)) - sum(log(diag(L))) + sum(log(diag(LK_uu))) +0.5*sum(m_q.*nu_q);
end
function [lnZ_r,lnZ_i,m_r,V_r,p]=evaluate_r(nu_q,tau_q,eta,fh_tm,nu_s,tau_s,display)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% function for determining the parameters of the r-distribution
% (the product of the tilted distributions)
%
% r(f) = exp(-lnZ_r) * prod_i p(y(i)|f(i)) * exp( -0.5*f(i)^2 tau_r(i) + nu_r(i)*f(i) )
% ~ prod_i N(f(i)|m_r(i),V_r(i))
%
% tau_r = tau_s - tau_q
% nu_r = nu_s - nu_q
%
% lnZ_i(i) = log int p(y(i)|f(i)) * N(f(i)|nu_r(i)/tau_r(i),1/tau_r(i)) df(i)
%
% see Expectation consistent approximate inference (Opper & Winther, 2005)
n=length(nu_q);
[lnZ_i,m_r,V_r,nu_r,tau_r]=deal(zeros(n,1));
p=false(n,1);
for si=1:n
% cavity distribution
tau_r_si=tau_s(si)-eta(si)*tau_q(si);
if tau_r_si<=0
% if ismember(display,{'iter'})
% %fprintf('Negative cavity precision at site %d\n',si)
% end
continue
end
nu_r_si=nu_s(si)-eta(si)*nu_q(si);
% tilted moments
[lnZ_si,m_r_si,V_r_si] = fh_tm(si, nu_r_si/tau_r_si, 1/tau_r_si, eta(si));
if ~isfinite(lnZ_si) || V_r_si<=0
% if ismember(display,{'iter'})
% fprintf('Improper normalization or tilted variance at site %d\n',si)
% end
continue
end
% store the new parameters
[nu_r(si),tau_r(si),lnZ_i(si),m_r(si),V_r(si)]=deal(nu_r_si,tau_r_si,lnZ_si,m_r_si,V_r_si);
p(si)=true;
end
lnZ_r=sum(lnZ_i./eta) +0.5*sum((-log(tau_r) +nu_r.^2 ./tau_r)./eta);
end
function [dnu_q,dtau_q]=ep_update_dir(m_q,V_q,m_r,V_r,eta,up_mode,tolUpdate)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% update direction for double-loop EP
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% V_q=diag(S_q);
switch up_mode
case 'ep'
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% site updates by moment matching
[dnu_q,dtau_q]=deal(zeros(size(m_q)));
%ind_up=V_r>0 & max(abs(V_r-V_q),abs(m_r-m_q))>tolUpdate;
ind_up=V_r>0 & (abs(V_r-V_q) > tolUpdate*abs(V_q) | abs(m_r-m_q) > tolUpdate*abs(m_q));
dnu_q(ind_up) = ( m_r(ind_up)./V_r(ind_up) - m_q(ind_up)./V_q(ind_up) ) ./ eta(ind_up);
dtau_q(ind_up) = ( 1./V_r(ind_up) - 1./V_q(ind_up) )./ eta(ind_up);
case 'grad'
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% gradient descend
% Not used at the moment!
% evaluate the gradients wrt nu_q and tau_q
gnu_q = m_q - m_r;
gtau_q = 0.5*(V_r + m_r.^2 - V_q - m_q.^2);
% the search direction
dnu_q=-gnu_q;
dtau_q=-gtau_q;
end
end
|
github
|
lcnhappe/happe-master
|
lik_gaussian.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_gaussian.m
| 11,092 |
utf_8
|
3a8dfb3827eaef0a74a1d280cc4cbf20
|
function lik = lik_gaussian(varargin)
%LIK_GAUSSIAN Create a Gaussian likelihood structure
%
% Description
% LIK = LIK_GAUSSIAN('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a Gaussian likelihood structure in which the named
% parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% LIK = LIK_GAUSSIAN(LIK,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a likelihood function structure with the named
% parameters altered with the specified values.
%
% Parameters for Gaussian likelihood function [default]
% sigma2 - variance [0.1]
% sigma2_prior - prior for sigma2 [prior_logunif]
% n - number of observations per input (See using average
% observations below)
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% Using average observations
% The lik_gaussian can be used to model data where each input vector is
% attached to an average of varying number of observations. That is, we
% have input vectors x_i, average observations y_i and sample sizes n_i.
% Each observation is distributed
%
% y_i ~ N(f(x_i), sigma2/n_i)
%
% The model is constructed as lik_gaussian('n', n), where n is the same
% length as y and collects the sample sizes.
%
% See also
% GP_SET, PRIOR_*, LIK_*
% Internal note: Because Gaussian noise can be combined
% analytically to the covariance matrix, lik_gaussian is internally
% little between lik_* and gpcf_* functions.
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_GAUSSIAN';
ip.addOptional('lik', [], @(x) isstruct(x) || isempty(x));
ip.addParamValue('sigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('sigma2_prior',prior_logunif(), @(x) isstruct(x) || isempty(x));
ip.addParamValue('n',[], @(x) isreal(x) && all(x>0));
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Gaussian';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Gaussian')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('sigma2',ip.UsingDefaults)
lik.sigma2 = ip.Results.sigma2;
end
if init || ~ismember('n',ip.UsingDefaults)
lik.n = ip.Results.n;
end
% Initialize prior structure
if init
lik.p=[];
end
if init || ~ismember('sigma2_prior',ip.UsingDefaults)
lik.p.sigma2=ip.Results.sigma2_prior;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_gaussian_pak;
lik.fh.unpak = @lik_gaussian_unpak;
lik.fh.lp = @lik_gaussian_lp;
lik.fh.lpg = @lik_gaussian_lpg;
lik.fh.cfg = @lik_gaussian_cfg;
lik.fh.trcov = @lik_gaussian_trcov;
lik.fh.trvar = @lik_gaussian_trvar;
lik.fh.recappend = @lik_gaussian_recappend;
end
end
function [w s] = lik_gaussian_pak(lik)
%LIK_GAUSSIAN_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_GAUSSIAN_PAK(LIK) takes a likelihood structure LIK
% and combines the parameters into a single row vector W.
% This is a mandatory subfunction used for example in energy
% and gradient computations.
%
% w = [ log(lik.sigma2)
% (hyperparameters of lik.magnSigma2)]'
%
% See also
% LIK_GAUSSIAN_UNPAK
w = []; s = {};
if ~isempty(lik.p.sigma2)
w = [w log(lik.sigma2)];
s = [s; 'log(gaussian.sigma2)'];
% Hyperparameters of sigma2
[wh sh] = lik.p.sigma2.fh.pak(lik.p.sigma2);
w = [w wh];
s = [s; sh];
end
end
function [lik, w] = lik_gaussian_unpak(lik, w)
%LIK_GAUSSIAN_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_GAUSSIAN_UNPAK(W, LIK) takes a likelihood structure
% LIK and extracts the parameters from the vector W to the LIK
% structure. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(lik.sigma2)
% (hyperparameters of lik.magnSigma2)]'
%
% See also
% LIK_GAUSSIAN_PAK
if ~isempty(lik.p.sigma2)
lik.sigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of sigma2
[p, w] = lik.p.sigma2.fh.unpak(lik.p.sigma2, w);
lik.p.sigma2 = p;
end
end
function lp = lik_gaussian_lp(lik)
%LIK_GAUSSIAN_LP Evaluate the log prior of likelihood parameters
%
% Description
% LP = LIK_T_LP(LIK) takes a likelihood structure LIK and
% returns log(p(th)), where th collects the parameters.
% This subfunctions is needed when there are likelihood
% parameters.
%
% See also
% LIK_GAUSSIAN_PAK, LIK_GAUSSIAN_UNPAK, LIK_GAUSSIAN_G, GP_E
lp = 0;
if ~isempty(lik.p.sigma2)
likp=lik.p;
lp = likp.sigma2.fh.lp(lik.sigma2, likp.sigma2) + log(lik.sigma2);
end
end
function lpg = lik_gaussian_lpg(lik)
%LIK_GAUSSIAN_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = LIK_GAUSSIAN_LPG(LIK) takes a Gaussian likelihood
% function structure LIK and returns LPG = d log (p(th))/dth,
% where th is the vector of parameters. This subfunction is
% needed when there are likelihood parameters.
%
% See also
% LIK_GAUSSIAN_PAK, LIK_GAUSSIAN_UNPAK, LIK_GAUSSIAN_E, GP_G
lpg = [];
if ~isempty(lik.p.sigma2)
likp=lik.p;
lpgs = likp.sigma2.fh.lpg(lik.sigma2, likp.sigma2);
lpg = lpgs(1).*lik.sigma2 + 1;
if length(lpgs) > 1
lpg = [lpg lpgs(2:end)];
end
end
end
function DKff = lik_gaussian_cfg(lik, x, x2)
%LIK_GAUSSIAN_CFG Evaluate gradient of covariance with respect to
% Gaussian noise
%
% Description
% Gaussian likelihood is a special case since it can be
% analytically combined with covariance functions and thus we
% compute gradient of covariance instead of gradient of likelihood.
%
% DKff = LIK_GAUSSIAN_CFG(LIK, X) takes a Gaussian likelihood
% function structure LIK, a matrix X of input vectors and
% returns DKff, the gradients of Gaussian noise covariance
% matrix Kff = k(X,X) with respect to th (cell array with
% matrix elements). This subfunction is needed only in Gaussian
% likelihood.
%
% DKff = LIK_GAUSSIAN_CFG(LIK, X, X2) takes a Gaussian
% likelihood function structure LIK, a matrix X of input
% vectors and returns DKff, the gradients of Gaussian noise
% covariance matrix Kff = k(X,X) with respect to th (cell
% array with matrix elements). This subfunction is needed
% only in Gaussian likelihood.
%
% See also
% LIK_GAUSSIAN_PAK, LIK_GAUSSIAN_UNPAK, LIK_GAUSSIAN_E, GP_G
DKff = {};
if ~isempty(lik.p.sigma2)
if isempty(lik.n)
DKff{1}=lik.sigma2;
else
n=size(x,1);
DKff{1} = sparse(1:n, 1:n, lik.sigma2./lik.n, n, n);
end
end
end
function DKff = lik_gaussian_ginput(lik, x, t, g_ind, gdata_ind, gprior_ind, varargin)
%LIK_GAUSSIAN_GINPUT Evaluate gradient of likelihood function with
% respect to x.
%
% Description
% DKff = LIK_GAUSSIAN_GINPUT(LIK, X) takes a likelihood
% function structure LIK, a matrix X of input vectors and
% returns DKff, the gradients of likelihood matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed only in Gaussian likelihood.
%
% DKff = LIK_GAUSSIAN_GINPUT(LIK, X, X2) takes a likelihood
% function structure LIK, a matrix X of input vectors and
% returns DKff, the gradients of likelihood matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed only in Gaussian likelihood.
%
% See also
% LIK_GAUSSIAN_PAK, LIK_GAUSSIAN_UNPAK, LIK_GAUSSIAN_E, GP_G
end
function C = lik_gaussian_trcov(lik, x)
%LIK_GAUSSIAN_TRCOV Evaluate training covariance matrix
% corresponding to Gaussian noise
%
% Description
% C = LIK_GAUSSIAN_TRCOV(GP, TX) takes in covariance function
% of a Gaussian process GP and matrix TX that contains
% training input vectors. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i and j
% in TX. This subfunction is needed only in Gaussian likelihood.
%
% See also
% LIK_GAUSSIAN_COV, LIK_GAUSSIAN_TRVAR, GP_COV, GP_TRCOV
[n, m] =size(x);
n1=n+1;
if isempty(lik.n)
C = sparse(1:n,1:n,ones(n,1).*lik.sigma2,n,n);
else
C = sparse(1:n, 1:n, lik.sigma2./lik.n, n, n);
end
end
function C = lik_gaussian_trvar(lik, x)
%LIK_GAUSSIAN_TRVAR Evaluate training variance vector
% corresponding to Gaussian noise
%
% Description
% C = LIK_GAUSSIAN_TRVAR(LIK, TX) takes in covariance function
% of a Gaussian process LIK and matrix TX that contains
% training inputs. Returns variance vector C. Every element i
% of C contains variance of input i in TX. This subfunction is
% needed only in Gaussian likelihood.
%
%
% See also
% LIK_GAUSSIAN_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
if isempty(lik.n)
C=repmat(lik.sigma2,n,1);
else
C=lik.sigma2./lik.n(:);
end
end
function reclik = lik_gaussian_recappend(reclik, ri, lik)
%RECAPPEND Record append
%
% Description
% RECLIK = LIK_GAUSSIAN_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood function record structure RECLIK, record index RI
% and likelihood function structure LIK with the current MCMC
% samples of the parameters. Returns RECLIK which contains all
% the old samples and the current samples from LIK. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reclik.type = 'lik_gaussian';
% Initialize the parameters
reclik.sigma2 = [];
reclik.n = [];
% Set the function handles
reclik.fh.pak = @lik_gaussian_pak;
reclik.fh.unpak = @lik_gaussian_unpak;
reclik.fh.lp = @lik_gaussian_lp;
reclik.fh.lpg = @lik_gaussian_lpg;
reclik.fh.cfg = @lik_gaussian_cfg;
reclik.fh.trcov = @lik_gaussian_trcov;
reclik.fh.trvar = @lik_gaussian_trvar;
reclik.fh.recappend = @lik_gaussian_recappend;
reclik.p=[];
reclik.p.sigma2=[];
if ~isempty(ri.p.sigma2)
reclik.p.sigma2 = ri.p.sigma2;
end
else
% Append to the record
likp = lik.p;
% record sigma2
reclik.sigma2(ri,:)=lik.sigma2;
if isfield(likp,'sigma2') && ~isempty(likp.sigma2)
reclik.p.sigma2 = likp.sigma2.fh.recappend(reclik.p.sigma2, ri, likp.sigma2);
end
% record n if given
if isfield(lik,'n') && ~isempty(lik.n)
reclik.n(ri,:)=lik.n(:)';
end
end
end
|
github
|
lcnhappe/happe-master
|
surrogate_sls.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/surrogate_sls.m
| 20,578 |
utf_8
|
fc29ee001fe374a168e9aa872d77d472
|
function [samples,samplesf,diagn] = surrogate_sls(f, x, opt, gp, xx, yy, z, varargin)
%SURROGATE_SLS Markov Chain Monte Carlo sampling using Surrogate data Slice Sampling
%
% Description
% SAMPLES = SURROGATE_SLS(F, X, OPTIONS) uses slice sampling to sample
% from the distribution P ~ EXP(-F), where F is the first
% argument to SLS. Markov chain starts from point X and the
% sampling from multivariate distribution is implemented by
% sampling each variable at a time either using overrelaxation
% or not. See SLS_OPT for details. A simple multivariate scheme
% using hyperrectangles is utilized when method is defined 'multi'.
%
% SAMPLES = SURROGATE_SLS(F, X, OPTIONS, [], P1, P2, ...) allows additional
% arguments to be passed to F(). The fourth argument is ignored,
% but included for compatibility with HMC and the optimisers.
%
% [SAMPLES, ENERGIES, DIAGN] = SLS(F, X, OPTIONS) Returns some additional
% diagnostics for the values in SAMPLES and ENERGIES.
%
% See SSLS_OPT and SLS_OPT for the optional parameters in the OPTIONS structure.
%
% See also
% METROP2, HMC2, SLS_OPT, SLS
% Based on "Slice Sampling" by Radford M. Neal in "The Annals of Statistics"
% 2003, Vol. 31, No. 3, 705-767, (c) Institute of Mathematical Statistics, 2003
% "Slice sampling covariance hyperparameters of latent Gaussian models"
% by Iain Murray and Ryan P. Adams, 2010, Arxiv preprint arXiv:1006.0868
% Copyright (c) Toni Auranen, 2003-2006
% Copyright (c) Ville Tolvanen, 2012
% This software is distributed under the GNU General Public
% Licence (version 3 or later); please refer to the file
% Licence.txt, included with the software, for details.
% Set empty options to default values
opt = ssls_opt(opt);
%if opt.display, disp(opt); end
if opt.display == 1
opt.display = 2; % verbose
elseif opt.display == 2
opt.display = 1; % all
end
% Forces x to be a row vector
x = x(:)';
% Set up some variables
nparams = length(x);
n = size(gp.latentValues,1);
samples = zeros(opt.nsamples,nparams);
samplesf = zeros(n,opt.nsamples);
if nargout >= 2
save_energies = 1;
energies = zeros(opt.nsamples,1);
else
save_energies = 0;
end
if nargout >= 3
save_diagnostics = 1;
else
save_diagnostics = 0;
end
if nparams == 1
multivariate = 0;
if strcmp(opt.method,'multi')
opt.method = 'stepping';
end
end
if nparams > 1
multivariate = 1;
end
rej = 0;
rej_step = 0;
rej_old = 0;
x_0 = x;
f_0 = f;
ncf = length(gp.cf);
umodal = opt.unimodal;
nomit = opt.nomit;
nsamples = opt.nsamples;
display_info = opt.display;
method = opt.method;
overrelaxation = opt.overrelaxation;
overrelaxation_info = ~isempty(find(overrelaxation));
w = opt.wsize;
maxiter = opt.maxiter;
m = opt.mlimit;
p = opt.plimit;
a = opt.alimit;
mmin = opt.mmlimits(1,:);
mmax = opt.mmlimits(2,:);
if isfield(opt, 'scale')
scale = opt.scale;
else
scale = 5;
end
if multivariate
if length(w) == 1
w = w.*ones(1,nparams);
end
if length(m) == 1
m = m.*ones(1,nparams);
end
if length(p) == 1
p = p.*ones(1,nparams);
end
if length(overrelaxation) == 1
overrelaxation = overrelaxation.*ones(1,nparams);
end
if length(a) == 1
a = a.*ones(1,nparams);
end
if length(mmin) == 1
mmin = mmin.*ones(1,nparams);
end
if length(mmax) == 1
mmax = mmax.*ones(1,nparams);
end
end
if overrelaxation_info
nparams_or = length(find(overrelaxation));
end
if ~isempty(find(w<=0))
error('Parameter ''wsize'' must be positive.');
end
if (strcmp(method,'stepping') || strcmp(method,'doubling')) && isempty(find(mmax-mmin>2*w))
error('Check parameter ''mmlimits''. The interval is too small in comparison to parameter ''wsize''.');
end
if strcmp(method,'stepping') && ~isempty(find(m<1))
error('Parameter ''mlimit'' must be >0.');
end
if overrelaxation_info && ~isempty(find(a<1))
error('Parameter ''alimit'' must be >0.');
end
if strcmp(method,'doubling') && ~isempty(find(p<1))
error('Parameter ''plimit'' must be >0.');
end
ind_umodal = 0;
j = 0;
% y_new = -f(x_0,varargin{:});
% S = diag(ones(size(f)));
% The main loop of slice sampling
for i = 1-nomit:1:nsamples
% Treshold
[y, tmp, eta, g] = getY(gp, xx, yy, z, f_0, x_0, []);
% fprintf('\n')
% fprintf('Treshold: %g\n',y);
switch method
% Multivariate rectangle sampling step
case 'multi'
x_new = x_0;
L = max(x_0 - w.*rand(1,length(x_0)),mmin);
R = min(L + w,mmax);
x_new = L + rand(1,length(x_new)).*(R-L);
[y_new, f_new] = getY(gp,xx,yy,z,[], x_new, eta, g);
while y >= y_new
% disp(y_new)
L(x_new < x_0) = x_new(x_new < x_0);
R(x_new >= x_0) = x_new(x_new >= x_0);
if sum(abs(L-R))<1e-8
error('BUG DETECTED: Shrunk to minimum position and still not acceptable.');
end
x_new = L + rand(1,length(x_new)).*(R-L);
[y_new, f_new] = getY(gp, xx, yy, z, [], x_new, eta, g);
end % while
% Save sampling step and set up the new 'old' sample
x_0 = x_new;
f_0 = f_new;
if i > 0
samples(i,:) = x_new;
latent_opt = esls(opt.latent_opt);
gp = gp_unpak(gp, x_new);
for ii=1:opt.fsamples-1
f_new = esls(f_new, latent_opt, gp, xx, yy, z);
end
samplesf(:,i) = f_new;
f_0 = samplesf(:,end);
end
% Save energies
% if save_energies && i > 0
% energies(i) = -y_new;
% end
% Display energy information
if display_info == 1
fprintf('Finished multi-step %4d Energy: %g\n',i,-y_new);
end
case 'multimm'
x_new = x_0;
% if isinf(y)
% x_new = mmin + (mmax-mmin).*rand(1,length(x_new));
% y_new = -f(x_new,varargin{:});
% else
L = mmin;
R = mmax;
x_new = L + rand(1,length(x_new)).*(R-L);
[y_new, f_new] = getY(gp, xx, yy, z, [], x_new, eta, g);
while y >= y_new
L(x_new < x_0) = x_new(x_new < x_0);
R(x_new >= x_0) = x_new(x_new >= x_0);
x_new = L + rand(1,length(x_new)).*(R-L);
[y_new, f_new] = getY(gp, xx, yy, z, [], x_new, eta, g);
end % while
% end % isinf(y)
% Save sampling step and set up the new 'old' sample
% fprintf('Accepted: %g\n',y_new);
x_0 = x_new;
f_0 = f_new;
if i > 0
samples(i,:) = x_new;
latent_opt = esls(opt.latent_opt);
gp = gp_unpak(gp, x_new);
for ii=1:opt.fsamples-1
f_new = esls(f_new, latent_opt, gp, xx, yy, z);
end
samplesf(:,i) = f_new;
f_0 = samplesf(:,end);
end
% Save energies
% if save_energies && i > 0
% energies(i) = -y_new;
% end
%
% Display energy information
if display_info == 1
fprintf('Finished multimm-step %4d Energy: %g\n',i,-y_new);
end
% Other sampling steps
otherwise
ind_umodal = ind_umodal + 1;
x_new = x_0;
f_new = f_0;
for j = 1:nparams
L = x_new;
R = x_new;
switch method
case 'stepping'
[L, R] = stepping_out(f_new,y,x_new,L,R,w,m,j,mmin,mmax,display_info,umodal,xx,yy,gp,z,eta,g,varargin{:});
case 'doubling'
[L, R] = doubling(f_new,y,x_new,L,R,w,m,j,mmin,mmax,display_info,umodal,xx,yy,gp,z,eta,g,varargin{:});
case 'minmax'
L(j) = mmin(j);
R(j) = mmax(j);
otherwise
error('unknown method');
end % switch
if overrelaxation(j)
[x_new, f_new, rej_step, rej_old, y_new] = bisection(f_new,y,x_new,L,R,w,a,rej_step,j,umodal,xx,yy,gp,z,eta,g);
else
[x_new, f_new] = shrinkage(f_new,y,x_new,w,L,R,method,j,maxiter,umodal,xx,yy,gp,z,eta,g);
end % if overrelaxation
if umodal % adjust the slice if the distribution is known to be unimodal
w(j) = (w(j)*ind_umodal + abs(x_0(j)-x_new(j)))/(ind_umodal+1);
end % if umodal
% end % if isinf(y)
end % j:nparams
if overrelaxation_info & multivariate
rej = rej + rej_step/nparams_or;
elseif overrelaxation_info & ~multivariate
rej = rej + rej_step;
end
% Save sampling step and set up the new 'old' sample
x_0 = x_new;
f_0 = f_new;
if i > 0
samples(i,:) = x_new;
latent_opt = esls(opt.latent_opt);
gp = gp_unpak(gp, x_new);
for ii=1:opt.fsamples-1
f_new = esls(f_new, latent_opt, gp, xx, yy, z);
end
samplesf(:,i) = f_new;
f_0 = f_new;
end
% Save energies
% if save_energies && i > 0
% energies(i) = -y_new;
% end
% Display information and keep track of rejections (overrelaxation)
if display_info == 1
if ~multivariate && overrelaxation_info && rej_old
fprintf(' Sample %4d rejected (overrelaxation).\n',i);
rej_old = 0;
rej_step = 0;
elseif multivariate && overrelaxation_info
fprintf('Finished step %4d (RR: %1.1f, %d/%d) Energy: %g\n',i,100*rej_step/nparams_or,nparams_or,nparams,-y_new);
rej_step = 0;
rej_old = 0;
else
fprintf('Finished step %4d Energy: %g\n',i,-y_new);
end
else
rej_old = 0;
rej_step = 0;
end
end % switch
end % i:nsamples
% Save diagnostics
if save_diagnostics
diagn.opt = opt;
end
% Display rejection information after slice sampling is complete (overrelaxation)
if overrelaxation_info && nparams == 1 && display_info == 1
fprintf('\nRejected samples due to overrelaxation (percentage): %1.1f\n',100*rej/nsamples);
elseif overrelaxation_info && nparams > 1 && display_info == 1
fprintf('\nAverage rejections per step due to overrelaxation (percentage): %1.1f\n',100*rej/nsamples);
end
% Display the elapsed time
%if display_info == 1
% if (cputime-t)/60 < 4
% fprintf('\nElapsed cputime (seconds): %1.1f\n\n',cputime-t);
% else
% fprintf('\nElapsed cputime (minutes): %1.1f\n\n',(cputime-t)/60);
% end
%end
%disp(w);
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
function [x_new, f_new, rej, rej_old, y_new] = bisection(f,y,x_0,L,R,w,a,rej,j,um,xx,yy,gp,z,eta,g);
%function [x_new, y_new, rej, rej_old] = bisection(f,y,x_0,L,R,w,a,rej,j,um,varargin);
%
% Bisection for overrelaxation (stepping-out needs to be used)
x_new = x_0;
f_new = f(:,end);
M = (L + R) / 2;
l = L;
r = R;
q = w(j);
s = a(j);
if (R(j) - L(j)) < 1.1*w(j)
while 1
M(j) = (l(j) + r(j))/2;
[y_new, f_new] = getY(gp,xx,yy,z, f_new, M, eta, g);
if s == 0 || y < y_new
break;
end
if x_0(j) > M(j)
l(j) = M(j);
else
r(j) = M(j);
end
s = s - 1;
q = q / 2;
end % while
end % if
ll = l;
rr = r;
while s > 0
s = s - 1;
q = q / 2;
tmp_ll = ll;
tmp_ll(j) = tmp_ll(j) + q;
tmp_rr = rr;
tmp_rr(j) = tmp_rr(j) - q;
[y_new_ll] = getY(gp,xx,yy,z,f_new,tmp_ll, eta, g);
[y_new_rr] = getY(gp,xx,yy,z,f_new,tmp_rr, eta, g);
if y >= y_new_ll
ll(j) = ll(j) + q;
end
if y >= y_new_rr
rr(j) = rr(j) - q;
end
end % while
x_new(j) = ll(j) + rr(j) - x_0(j);
[y_new, f_new] = getY(gp,xx,yy,z,f_new, x_new, eta, g);
% y_new = -f(x_new,varargin{:});
if x_new(j) < l(j) || x_new(j) > r(j) || y >= y_new
x_new(j) = x_0(j);
rej = rej + 1;
rej_old = 1;
else
rej_old = 0;
f(:,end+1) = f_new;
end
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
function [x_new, f_new] = shrinkage(f,y,x_0,w,L,R,method,j,maxiter,um,xx,yy,gp,z,eta,g,varargin);
%function [x_new, y_new] = shrinkage(f,y,x_0,w,L,R,method,j,maxiter,um,varargin);
%
% Shrinkage with acceptance-check for doubling scheme
% - acceptance-check is skipped if the distribution is defined
% to be unimodal by the user
iter = 0;
x_new = x_0;
l = L(j);
r = R(j);
f_new = f(:,end);
% [y, tmp, eta, g] = getY(gp, xx, yy, z, f_new, x_0, []);
while 1
x_new(j) = l + (r-l).*rand;
[y_new, f_new] = getY(gp, xx, yy, z, [], x_new, eta, g);
if strcmp(method,'doubling')
if y < y_new && (um || accept(f,y,x_0,x_new,w,L,R,j,varargin{:}))
break;
end
else
if y < y_new
% f(:,end+1) = f_new;
break;
end
end % if strcmp
if x_new(j) < x_0(j)
l = x_new(j);
else
r = x_new(j);
end % if
if abs(l-r) < 1e-8
error('bug')
end
iter = iter + 1;
if iter > maxiter
fprintf('Maximum number (%d) of iterations reached for parameter %d during shrinkage.\n',maxiter,j);
if strcmp(method,'minmax')
error('Check function F, decrease the interval ''mmlimits'' or increase the value of ''maxiter''.');
else
error('Check function F or increase the value of ''maxiter''.');
end
end
end % while
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
function [L,R] = stepping_out(f,y,x_0,L,R,w,m,j,mmin,mmax,di,um,xx,yy,gp,z,eta,g,varargin);
%function [L,R] = stepping_out(f,y,x_0,L,R,w,m,j,mmin,mmax,di,um,varargin);
%
% Stepping-out procedure
f_new = f(:,end);
x_new = x_0;
if um % if the user defines the distribution to be unimodal
L(j) = x_0(j) - w(j).*rand;
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
end
R(j) = L(j) + w(j);
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
end
y_new = getY(gp,xx,yy,z,f_new, L, eta, g);
while y < y_new
% while y < -f(L,varargin{:})
L(j) = L(j) - w(j);
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
break;
else
y_new = getY(gp,xx,yy,z,f_new, L, eta, g);
end
end
y_new = getY(gp,xx,yy,z,f_new, R, eta, g);
while y < y_new
% while y < -f(R,varargin{:})
R(j) = R(j) + w(j);
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
break;
else
y_new = getY(gp,xx,yy,z,f_new, R, eta, g);
end
end
else % if the distribution is not defined to be unimodal
L(j) = x_0(j) - w(j).*rand;
J = floor(m(j).*rand);
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
J = 0;
end
R(j) = L(j) + w(j);
K = (m(j)-1) - J;
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
K = 0;
end
y_new = getY(gp,xx,yy,z,f_new, L, eta, g);
while J > 0 && y < y_new
% while J > 0 && y < -f(L,varargin{:})
L(j) = L(j) - w(j);
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
break;
end
y_new = getY(gp,xx,yy,z,f_new, L, eta, g);
J = J - 1;
end
y_new = getY(gp,xx,yy,z,f_new, R, eta, g);
while K > 0 && y < y_new
% while K > 0 && y < -f(R,varargin{:})
R(j) = R(j) + w(j);
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
break;
end
y_new = getY(gp,xx,yy,z,f_new, R, eta, g);
K = K - 1;
end
end
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
function [L,R] = doubling(f,y,x_0,L,R,w,m,j,mmin,mmax,di,um,xx,yy,gp,z,eta,g,varargin)
%function [L,R] = doubling(f,y,x_0,L,R,w,p,j,mmin,mmax,di,um,varargin);
%
% Doubling scheme for slice sampling
f_new = f(:,end);
x_new = x_0;
if um % if the user defines the distribution to be unimodal
L(j) = x_0(j) - w(j).*rand;
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
Ao = 1;
else
Ao = 0;
end
R(j) = L(j) + w(j);
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
Bo = 1;
else
Bo = 0;
end
AL = getY(gp,xx,yy,z,f_new, L, eta, g);
AR = getY(gp,xx,yy,z,f_new, R, eta, g);
while (Ao == 0 && y < AL) || (Bo == 0 && y < AR)
if rand < 1/2
L(j) = L(j) - (R(j)-L(j));
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
Ao = 1;
else
Ao = 0;
end
AL = getY(gp,xx,yy,z,f_new, L, eta, g);
else
R(j) = R(j) + (R(j)-L(j));
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
Bo = 1;
else
Bo = 0;
end
AR = getY(gp,xx,yy,z,f_new, R, eta, g);
end
end % while
else % if the distribution is not defined to be unimodal
L(j) = x_0(j) - w(j).*rand;
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
end
R(j) = L(j) + w(j);
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
end
K = p(j);
AL = getY(gp,xx,yy,z,f_new, L, eta, g);
AR = getY(gp,xx,yy,z,f_new, R, eta, g);
while K > 0 && (y < AL || y < AR)
if rand < 1/2
L(j) = L(j) - (R(j)-L(j));
if L(j) < mmin(j)
L(j) = mmin(j);
if di
fprintf('Underflow! (L:%d)\n',j);
end
end
AL = getY(gp,xx,yy,z,f_new, L, eta, g);
else
R(j) = R(j) + (R(j)-L(j));
if R(j) > mmax(j)
R(j) = mmax(j);
if di
fprintf('Overflow! (R:%d)\n',j);
end
end
AR = getY(gp,xx,yy,z,f_new, R, eta, g);
end
K = K - 1;
end % while
end
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%
function out = accept(f,y,x_0,x_new,w,L,R,j,varargin)
%function out = accept(f,y,x_0,x_new,w,L,R,j,varargin)
%
% Acceptance check for doubling scheme
out = [];
l = L;
r = R;
d = 0;
while r(j)-l(j) > 1.1*w(j)
m = (l(j)+r(j))/2;
if (x_0(j) < m && x_new(j) >= m) || (x_0(j) >= m && x_new(j) < m)
d = 1;
end
if x_new(j) < m
r(j) = m;
else
l(j) = m;
end
if d && y >= -f(l,varargin{:}) && y >= -f(r,varargin{:})
out = 0;
break;
end
end % while
if isempty(out)
out = 1;
end;
function [y, f_new, eta, g] = getY(gp, xx, yy, z, f, w, eta, g)
if isempty(f) && (isempty(eta) || isempty(g))
error('Must provide either current latent values f to get treshold or eta & g to get new latent values')
end
gp = gp_unpak(gp, w);
if ~isfield(gp.lik, 'nondiagW') || ismember(gp.lik.type, {'LGP' 'LGPC'})
[K, C] = gp_trcov(gp, xx);
else
if ~isfield(gp.lik,'xtime')
nl=[0 repmat(size(yy,1), 1, length(gp.comp_cf))];
else
xtime=gp.lik.xtime;
nl=[0 size(gp.lik.xtime,1) size(yy,1)];
end
nl=cumsum(nl);
nlp=length(nl)-1;
K = zeros(nl(end));
for i1=1:nlp
if i1==1 && isfield(gp.lik, 'xtime')
K((1+nl(i1)):nl(i1+1),(1+nl(i1)):nl(i1+1)) = gp_trcov(gp, xtime, gp.comp_cf{i1});
else
K((1+nl(i1)):nl(i1+1),(1+nl(i1)):nl(i1+1)) = gp_trcov(gp, xx, gp.comp_cf{i1});
end
end
C=K;
end
% for ii=1:size(yy,1)
% [tmp,tmp, m2(ii,:)] = gp.lik.fh.tiltedMoments(gp.lik, yy, ii, C(ii,ii), 0, z);
% end
% S = diag(1./(1./m2 - 1./diag(C)));
S = 10*eye(size(K));
if isempty(eta) || isempty(g)
g = mvnrnd(f,S)';
end
R = S-S*((S+K)\S);
R = (R+R')./2;
LR = chol(R,'lower');
m = R*(S\g);
if isempty(eta) || isempty(g)
eta = LR\(f-m);
f_new = [];
tr = 1; % return treshold
else
f_new = LR*eta + m;
tr = 0; % return y for treshold comparison
end
% Log prior for proposed hyperparameters
lp = 0;
for i3=1:length(gp.cf)
gpcf = gp.cf{i3};
lp = lp + gpcf.fh.lp(gpcf);
end
if isfield(gp, 'lik') && isfield(gp.lik, 'p')
likelih = gp.lik;
lp = lp + likelih.fh.lp(likelih);
end
if tr
% return treshold
y = log(rand(1)) + gp.lik.fh.ll(gp.lik, yy, f, z) + mnorm_lpdf (g', 0, C + S) + lp;
else
% return comparison value with proposed parameters
y = gp.lik.fh.ll(gp.lik, yy, f_new, z) + mnorm_lpdf (g', 0, C + S) + lp;
end
function opt = ssls_opt(opt)
% Default opt for surrogate sls.
% fsamples - number of latent samples per hyperparameter sample
if ~isfield(opt, 'fsamples')
opt.fsamples = 2;
end
if nargin < 1
opt=[];
end
if nargin < 1
opt=[];
end
if ~isfield(opt,'nsamples')
opt.nsamples = 1;
end
if ~isfield(opt,'nomit')
opt.nomit = 0;
end
if ~isfield(opt,'display')
opt.display = 0;
end
if ~isfield(opt,'method')
opt.method = 'multi';
end
if ~isfield(opt,'overrelaxation')
opt.overrelaxation = 0;
elseif opt.overrelaxation == 1 && (strcmp(opt.method,'doubling') || strcmp(opt.method,'minmax'))
opt.method = 'stepping';
end
if ~isfield(opt,'alimit')
opt.alimit = 4;
end
if ~isfield(opt,'wsize')
opt.wsize = 2;
end
if ~isfield(opt,'mlimit')
opt.mlimit = 4;
end
if ~isfield(opt,'maxiter')
opt.maxiter = 50;
end
if ~isfield(opt,'plimit')
opt.plimit = 2;
end
if ~isfield(opt,'unimodal')
opt.unimodal = 0;
end
if ~isfield(opt,'mmlimits')
opt.mmlimits = [opt.wsize-(opt.wsize*opt.mlimit); opt.wsize+(opt.wsize*opt.mlimit)];
end
|
github
|
lcnhappe/happe-master
|
lik_lgpc.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_lgpc.m
| 15,380 |
windows_1250
|
8b88129ba333f69e6d189e704cecc43b
|
function lik = lik_lgpc(varargin)
%LIK_LGPC Create a logistic Gaussian process likelihood structure for
% conditional density estimation
%
% Description
% LIK = LIK_LGPC creates a logistic Gaussian process likelihood
% structure for conditional density estimation
%
% The likelihood contribution for the $k$th conditional slice
% is defined as follows:
% __ n
% p(y_k|f_k) = || i=1 exp(f_ki) / Sum_{j=1}^n exp(f_kj),
%
% where f contains latent values.
%
% See also
% LGPCDENS, GP_SET, LIK_*
%
% Copyright (c) 2012 Jaakko Riihimäki and Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_LGPC';
ip.addOptional('lik', [], @isstruct);
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'LGPC';
lik.nondiagW = true;
else
if ~isfield(lik,'type') || ~isequal(lik.type,'LGPC')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_lgpc_pak;
lik.fh.unpak = @lik_lgpc_unpak;
lik.fh.ll = @lik_lgpc_ll;
lik.fh.llg = @lik_lgpc_llg;
lik.fh.llg2 = @lik_lgpc_llg2;
lik.fh.llg3 = @lik_lgpc_llg3;
lik.fh.tiltedMoments = @lik_lgpc_tiltedMoments;
lik.fh.predy = @lik_lgpc_predy;
lik.fh.invlink = @lik_lgpc_invlink;
lik.fh.recappend = @lik_lgpc_recappend;
end
end
function [w,s] = lik_lgpc_pak(lik)
%LIK_LGPC_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_LGPC_PAK(LIK) takes a likelihood structure LIK
% and returns an empty verctor W. If LGPC likelihood had
% parameters this would combine them into a single row vector
% W (see e.g. lik_negbin). This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% See also
% LIK_LGPC_UNPAK, GP_PAK
w = []; s = {};
end
function [lik, w] = lik_lgpc_unpak(lik, w)
%LIK_LGPC_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_LGPC_UNPAK(W, LIK) Doesn't do anything.
%
% If LGPC likelihood had parameters this would extract them
% parameters from the vector W to the LIK structure. This is
% a mandatory subfunction used for example in energy and
% gradient computations.
%
% See also
% LIK_LGPC_PAK, GP_UNPAK
lik=lik;
w=w;
end
function logLik = lik_lgpc_ll(lik, y, f, z)
%LIK_LGPC_LL Log likelihood
%
% Description
% E = LIK_LGPC_LL(LIK, Y, F, Z) takes a likelihood data
% structure LIK, incedence counts Y, expected counts Z, and
% latent values F. Returns the log likelihood, log p(y|f,z).
% This subfunction is needed when using Laplace approximation
% or MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria (DIC, WAIC)
% computations.
%
% See also
% LIK_LGPC_LLG, LIK_LGPC_LLG3, LIK_LGPC_LLG2, GPLA_E
y2=reshape(y,fliplr(lik.gridn));
f2=reshape(f,fliplr(lik.gridn));
n2=sum(y2);
qj2=exp(f2);
logLik=sum(sum(f2.*y2)-n2.*log(sum(qj2)));
end
function deriv = lik_lgpc_llg(lik, y, f, param, z)
%LIK_LGPC_LLG Gradient of the log likelihood
%
% Description
% G = LIK_LGPC_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z
% and latent values F. Returns the gradient of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% 'param' or 'latent'. This subfunction is needed when using Laplace
% approximation or MCMC for inference with non-Gaussian likelihoods.
%
% See also
% LIK_LGPC_LL, LIK_LGPC_LLG2, LIK_LGPC_LLG3, GPLA_E
switch param
case 'latent'
y2=reshape(y,fliplr(lik.gridn));
f2=reshape(f,fliplr(lik.gridn));
n2=sum(y2);
qj2=exp(f2);
pj2=bsxfun(@rdivide,qj2,sum(qj2));
deriv2=y2-bsxfun(@times,n2,pj2);
deriv=deriv2(:);
end
end
function g2 = lik_lgpc_llg2(lik, y, f, param, z)
%function g2 = lik_lgpc_llg2(lik, y, f, param, z)
%LIK_LGPC_LLG2 Second gradients of the log likelihood
%
% Description
% G2 = LIK_LGPC_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z,
% and latent values F. Returns the Hessian of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% only 'latent'. G2 is a vector with diagonal elements of the
% Hessian matrix (off diagonals are zero). This subfunction
% is needed when using Laplace approximation or EP for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_LGPC_LL, LIK_LGPC_LLG, LIK_LGPC_LLG3, GPLA_E
switch param
case 'latent'
% g2 is not the second gradient of the log likelihood but only a
% vector to form the exact gradient term in gpla_nd_e, gpla_nd_g and
% gpla_nd_pred functions
f2=reshape(f,fliplr(lik.gridn));
qj2=exp(f2);
pj2=bsxfun(@rdivide,qj2,sum(qj2));
g2=pj2(:);
end
end
function g3 = lik_lgpc_llg3(lik, y, f, param, z)
%LIK_LGPC_LLG3 Third gradients of the log likelihood
%
% Description
% G3 = LIK_LGPC_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, incedence counts Y, expected counts Z
% and latent values F and returns the third gradients of the
% log likelihood with respect to PARAM. At the moment PARAM
% can be only 'latent'. G3 is a vector with third gradients.
% This subfunction is needed when using Laplace approximation
% for inference with non-Gaussian likelihoods.
%
% See also
% LIK_LGPC_LL, LIK_LGPC_LLG, LIK_LGPC_LLG2, GPLA_E, GPLA_G
switch param
case 'latent'
f2=reshape(f,fliplr(lik.gridn));
qj2=exp(f2);
pj2=bsxfun(@rdivide,qj2,sum(qj2));
g3=pj2(:);
end
end
function [logM_0, m_1, sigm2hati1] = lik_lgpc_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_LGPC_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_LGPC_TILTEDMOMENTS(LIK, Y, I, S2,
% MYY, Z) takes a likelihood structure LIK, incedence counts
% Y, expected counts Z, index I and cavity variance S2 and
% mean MYY. Returns the zeroth moment M_0, mean M_1 and
% variance M_2 of the posterior marginal (see Rasmussen and
% Williams (2006): Gaussian processes for Machine Learning,
% page 55). This subfunction is needed when using EP for
% inference with non-Gaussian likelihoods.
%
% See also
% GPEP_E
if isempty(z)
error(['lik_lgpc -> lik_lgpc_tiltedMoments: missing z!'...
'LGPC likelihood needs the expected number of '...
'occurrences as an extra input z. See, for '...
'example, lik_lgpc and gpla_e. ']);
end
yy = y(i1);
avgE = z(i1);
logM_0=zeros(size(yy));
m_1=zeros(size(yy));
sigm2hati1=zeros(size(yy));
for i=1:length(i1)
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Negative-binomial * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_lgpc_norm(yy(i),myy_i(i),sigm2_i(i),avgE(i));
% Integrate with quadrature
RTOL = 1.e-6;
ATOL = 1.e-10;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
% If the second central moment is less than cavity variance
% integrate more precisely. Theoretically for log-concave
% likelihood should be sigm2hati1 < sigm2_i.
if sigm2hati1(i) >= sigm2_i(i)
ATOL = ATOL.^2;
RTOL = RTOL.^2;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
if sigm2hati1(i) >= sigm2_i(i)
error('lik_lgpc_tilted_moments: sigm2hati1 >= sigm2_i');
end
end
logM_0(i) = log(m_0);
end
end
function [lpy, Ey, Vary] = lik_lgpc_predy(lik, Ef, Varf, yt, zt)
%LIK_LGPC_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_LGPC_PREDY(LIK, EF, VARF YT, ZT)
% Returns also the predictive density of YT, that is
% p(yt | y,zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the incedence counts YT, expected counts ZT.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [LPY, EY, VARY] = LIK_LGPC_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
if isempty(zt)
error(['lik_lgpc -> lik_lgpc_predy: missing zt!'...
'LGPC likelihood needs the expected number of '...
'occurrences as an extra input zt. See, for '...
'example, lik_lgpc and gpla_e. ']);
end
avgE = zt;
lpy = zeros(size(Ef));
Ey = zeros(size(Ef));
EVary = zeros(size(Ef));
VarEy = zeros(size(Ef));
if nargout > 1
% Evaluate Ey and Vary
for i1=1:length(Ef)
%%% With quadrature
myy_i = Ef(i1);
sigm_i = sqrt(Varf(i1));
minf=myy_i-6*sigm_i;
maxf=myy_i+6*sigm_i;
F = @(f) exp(log(avgE(i1))+f+norm_lpdf(f,myy_i,sigm_i));
Ey(i1) = quadgk(F,minf,maxf);
EVary(i1) = Ey(i1);
F3 = @(f) exp(2*log(avgE(i1))+2*f+norm_lpdf(f,myy_i,sigm_i));
VarEy(i1) = quadgk(F3,minf,maxf) - Ey(i1).^2;
end
Vary = EVary + VarEy;
end
% Evaluate the posterior predictive densities of the given observations
for i1=1:length(Ef)
% get a function handle of the likelihood times posterior
% (likelihood * posterior = LGPC * Gaussian)
% and useful integration limits
[pdf,minf,maxf]=init_lgpc_norm(...
yt(i1),Ef(i1),Varf(i1),avgE(i1));
% integrate over the f to get posterior predictive distribution
lpy(i1) = log(quadgk(pdf, minf, maxf));
end
end
function [df,minf,maxf] = init_lgpc_norm(yy,myy_i,sigm2_i,avgE)
%INIT_LGPC_NORM
%
% Description
% Return function handle to a function evaluating LGPC *
% Gaussian which is used for evaluating (likelihood * cavity)
% or (likelihood * posterior) Return also useful limits for
% integration. This is private function for lik_lgpc. This
% subfunction is needed by sufunctions tiltedMoments, siteDeriv
% and predy.
%
% See also
% LIK_LGPC_TILTEDMOMENTS, LIK_LGPC_PREDY
% avoid repetitive evaluation of constant part
ldconst = -gammaln(yy+1) - log(sigm2_i)/2 - log(2*pi)/2;
% Create function handle for the function to be integrated
df = @lgpc_norm;
% use log to avoid underflow, and derivates for faster search
ld = @log_lgpc_norm;
ldg = @log_lgpc_norm_g;
ldg2 = @log_lgpc_norm_g2;
% Set the limits for integration
% LGPC likelihood is log-concave so the lgpc_norm
% function is unimodal, which makes things easier
if yy==0
% with yy==0, the mode of the likelihood is not defined
% use the mode of the Gaussian (cavity or posterior) as a first guess
modef = myy_i;
else
% use precision weighted mean of the Gaussian approximation
% of the LGPC likelihood and Gaussian
mu=log(yy/avgE);
s2=1./(yy+1./sigm2_i);
modef = (myy_i/sigm2_i + mu/s2)/(1/sigm2_i + 1/s2);
end
% find the mode of the integrand using Newton iterations
% few iterations is enough, since the first guess in the right direction
niter=3; % number of Newton iterations
mindelta=1e-6; % tolerance in stopping Newton iterations
for ni=1:niter
g=ldg(modef);
h=ldg2(modef);
delta=-g/h;
modef=modef+delta;
if abs(delta)<mindelta
break
end
end
% integrand limits based on Gaussian approximation at mode
modes=sqrt(-1/h);
minf=modef-8*modes;
maxf=modef+8*modes;
modeld=ld(modef);
iter=0;
% check that density at end points is low enough
lddiff=20; % min difference in log-density between mode and end-points
minld=ld(minf);
step=1;
while minld>(modeld-lddiff)
minf=minf-step*modes;
minld=ld(minf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_lgpc -> init_lgpc_norm: ' ...
'integration interval minimun not found ' ...
'even after looking hard!'])
end
end
maxld=ld(maxf);
step=1;
while maxld>(modeld-lddiff)
maxf=maxf+step*modes;
maxld=ld(maxf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_lgpc -> init_lgpc_norm: ' ...
'integration interval maximun not found ' ...
'even after looking hard!'])
end
end
function integrand = lgpc_norm(f)
% LGPC * Gaussian
mu = avgE.*exp(f);
integrand = exp(ldconst ...
-mu+yy.*log(mu) ...
-0.5*(f-myy_i).^2./sigm2_i);
end
function log_int = log_lgpc_norm(f)
% log(LGPC * Gaussian)
% log_lgpc_norm is used to avoid underflow when searching
% integration interval
mu = avgE.*exp(f);
log_int = ldconst ...
-mu+yy.*log(mu) ...
-0.5*(f-myy_i).^2./sigm2_i;
end
function g = log_lgpc_norm_g(f)
% d/df log(LGPC * Gaussian)
% derivative of log_lgpc_norm
mu = avgE.*exp(f);
g = -mu+yy...
+ (myy_i - f)./sigm2_i;
end
function g2 = log_lgpc_norm_g2(f)
% d^2/df^2 log(LGPC * Gaussian)
% second derivate of log_lgpc_norm
mu = avgE.*exp(f);
g2 = -mu...
-1/sigm2_i;
end
end
function mu = lik_lgpc_invlink(lik, f, z)
%LIK_LGPC_INVLINK Returns values of inverse link function
%
% Description
% P = LIK_LGPC_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values MU of inverse link function.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_LGPC_LL, LIK_LGPC_PREDY
mu = z.*exp(f);
end
function reclik = lik_lgpc_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = LIK_LGPC_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
reclik.type = 'LGPC';
% Set the function handles
reclik.fh.pak = @lik_lgpc_pak;
reclik.fh.unpak = @lik_lgpc_unpak;
reclik.fh.ll = @lik_lgpc_ll;
reclik.fh.llg = @lik_lgpc_llg;
reclik.fh.llg2 = @lik_lgpc_llg2;
reclik.fh.llg3 = @lik_lgpc_llg3;
reclik.fh.tiltedMoments = @lik_lgpc_tiltedMoments;
reclik.fh.predy = @lik_lgpc_predy;
reclik.fh.invlink = @lik_lgpc_invlink;
reclik.fh.recappend = @lik_lgpc_recappend;
return
end
end
|
github
|
lcnhappe/happe-master
|
lik_softmax.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_softmax.m
| 10,116 |
UNKNOWN
|
f393d5bbd44c08a0eaf0996ae3fb72f7
|
function lik = lik_softmax(varargin)
%LIK_SOFTMAX Create a softmax likelihood structure
%
% Description
% LIK = LIK_SOFTMAX creates Softmax likelihood for multi-class
% classification problem. The observed class label with C
% classes is given as 1xC vector where C-1 entries are 0 and the
% observed class label is 1.
%
% The likelihood is defined as follows:
% __ n
% p(y^c|f^1, ..., f^C) = || i=1 exp(f_i^C)/(sum^C_c=1 exp(f_i^c))
%
% where y^c is the observation of cth class, f^c is the latent variable
% corresponding to cth class and C is the number of classes.
%
% See also
% GP_SET, LIK_*
% Copyright (c) 2010 Jaakko Riihim�ki, Pasi Jyl�nki
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_SOFTMAX';
ip.addOptional('lik', [], @isstruct);
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Softmax';
lik.nondiagW=true;
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Softmax')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_softmax_pak;
lik.fh.unpak = @lik_softmax_unpak;
lik.fh.ll = @lik_softmax_ll;
lik.fh.llg = @lik_softmax_llg;
lik.fh.llg2 = @lik_softmax_llg2;
lik.fh.llg3 = @lik_softmax_llg3;
lik.fh.tiltedMoments = @lik_softmax_tiltedMoments;
lik.fh.predy = @lik_softmax_predy;
lik.fh.recappend = @lik_softmax_recappend;
end
end
function [w,s] = lik_softmax_pak(lik)
%LIK_SOFTMAX_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_SOFTMAX_PAK(LIK) takes a likelihood structure LIK and
% returns an empty verctor W. If Softmax likelihood had
% parameters this would combine them into a single row vector
% W (see e.g. lik_negbin). This is a mandatory subfunction used
% for example in energy and gradient computations.
%
%
% See also
% LIK_SOFTMAX_UNPAK, GP_PAK
w = []; s = {};
end
function [lik, w] = lik_softmax_unpak(lik, w)
%LIK_SOFTMAX_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_SOFTMAX_UNPAK(W, LIK) Doesn't do anything.
%
% If Softmax likelihood had parameters this would extracts them
% parameters from the vector W to the LIK structure. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
%
% See also
% LIK_SOFTMAX_PAK, GP_UNPAK
lik=lik;
w=w;
end
function ll = lik_softmax_ll(lik, y, f2, z)
%LIK_SOFTMAX_LL Log likelihood
%
% Description
% LL = LIK_SOFTMAX_LL(LIK, Y, F) takes a likelihood structure
% LIK, class labels Y (NxC matrix), and latent values F (NxC
% matrix). Returns the log likelihood, log p(y|f,z). This
% subfunction is needed when using Laplace approximation or
% MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria (DIC, WAIC)
% computations.
%
% See also
% LIK_SOFTMAX_LLG, LIK_SOFTMAX_LLG3, LIK_SOFTMAX_LLG2, GPLA_E
if ~isempty(find(y~=1 & y~=0))
error('lik_softmax: The class labels have to be {0,1}')
end
% Reshape to NxC matrix
f2=reshape(f2,size(y));
% softmax:
ll = y(:)'*f2(:) - sum(log(sum(exp(f2),2)));
end
function llg = lik_softmax_llg(lik, y, f2, param, z)
%LIK_SOFTMAX_LLG Gradient of the log likelihood
%
% Description
% LLG = LIK_SOFTMAX_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F. Returns
% the gradient of the log likelihood with respect to PARAM. At
% the moment PARAM can be 'param' or 'latent'. This subfunction
% is needed when using Laplace approximation or MCMC for inference
% with non-Gaussian likelihoods.
%
% See also
% LIK_SOFTMAX_LL, LIK_SOFTMAX_LLG2, LIK_SOFTMAX_LLG3, GPLA_E
if ~isempty(find(y~=1 & y~=0))
error('lik_softmax: The class labels have to be {0,1}')
end
% Reshape to NxC matrix
f2=reshape(f2,size(y));
expf2 = exp(f2);
pi2 = expf2./(sum(expf2, 2)*ones(1,size(y,2)));
pi_vec=pi2(:);
llg = y(:)-pi_vec;
end
function [pi_vec, pi_mat] = lik_softmax_llg2(lik, y, f2, param, z)
%LIK_SOFTMAX_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_SOFTMAX_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F. Returns
% the Hessian of the log likelihood with respect to PARAM. At
% the moment PARAM can be only 'latent'. LLG2 is a vector with
% diagonal elements of the Hessian matrix (off diagonals are
% zero). This subfunction is needed when using Laplace
% approximation or EP for inference with non-Gaussian likelihoods.
%
% See also
% LIK_SOFTMAX_LL, LIK_SOFTMAX_LLG, LIK_SOFTMAX_LLG3, GPLA_E
% softmax:
% Reshape to NxC matrix
f2=reshape(f2,size(y));
expf2 = exp(f2);
pi2 = expf2./(sum(expf2, 2)*ones(1,size(y,2)));
pi_vec=pi2(:);
[n,nout]=size(y);
pi_mat=zeros(nout*n, n);
for i1=1:nout
pi_mat((1+(i1-1)*n):(nout*n+1):end)=pi2(:,i1);
end
% D=diag(pi_vec);
% llg2=-D+pi_mat*pi_mat';
end
function dw_mat = lik_softmax_llg3(lik, y, f, param, z)
%LIK_SOFTMAX_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_SOFTMAX_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F and
% returns the third gradients of the log likelihood with
% respect to PARAM. At the moment PARAM can be only 'latent'.
% LLG3 is a vector with third gradients. This subfunction is
% needed when using Laplace approximation for inference with
% non-Gaussian likelihoods.
%
% See also
% LIK_SOFTMAX_LL, LIK_SOFTMAX_LLG, LIK_SOFTMAX_LLG2, GPLA_E, GPLA_G
if ~isempty(find(y~=1 & y~=0))
error('lik_softmax: The class labels have to be {0,1}')
end
[n,nout] = size(y);
f2 = reshape(f,n,nout);
expf2 = exp(f2);
pi2 = expf2./(sum(expf2, 2)*ones(1,nout));
pi_vec=pi2(:);
dw_mat=zeros(nout,nout,nout,n);
for cc3=1:nout
for ii1=1:n
pic=pi_vec(ii1:n:(nout*n));
for cc1=1:nout
for cc2=1:nout
% multinom third derivatives
cc_sum_tmp=0;
if cc1==cc2 && cc1==cc3 && cc2==cc3
cc_sum_tmp=cc_sum_tmp+pic(cc1);
end
if cc1==cc2
cc_sum_tmp=cc_sum_tmp-pic(cc1)*pic(cc3);
end
if cc2==cc3
cc_sum_tmp=cc_sum_tmp-pic(cc1)*pic(cc2);
end
if cc1==cc3
cc_sum_tmp=cc_sum_tmp-pic(cc1)*pic(cc2);
end
cc_sum_tmp=cc_sum_tmp+2*pic(cc1)*pic(cc2)*pic(cc3);
dw_mat(cc1,cc2,cc3,ii1)=cc_sum_tmp;
end
end
end
end
end
function [logM_0, m_1, sigm2hati1] = lik_softmax_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
end
function [lpy, Ey, Vary] = lik_softmax_predy(lik, Ef, Varf, yt, zt)
%LIK_SOFTMAX_PREDY Returns the predictive mean, variance and density of
%y
%
% Description
% LPY = LIK_SOFTMAX_PREDY(LIK, EF, VARF YT, ZT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | y, zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the succes counts YT, numbers of trials ZT.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [EY, VARY] = LIK_SOFTMAX_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This
% subfunction is needed when computing posterior predictive
% distributions for future observations.
%
%
% See also
% GPEP_PRED, GPLA_PRED, GPMC_PRED
if ~isempty(find(yt~=1 & yt~=0))
error('lik_softmax: The class labels have to be {0,1}')
end
S=10000;
[ntest,nout]=size(yt);
pi=zeros(ntest,nout);
lpy=zeros(ntest,nout);
Ef=reshape(Ef(:),ntest,nout);
[notused,notused,c] =size(Varf);
if c>1
mcmc=false;
else
mcmc=true;
Varf=reshape(Varf(:),ntest,nout);
end
for i1=1:ntest
if mcmc
Sigm_tmp = (Varf(i1,:));
f_star=bsxfun(@plus, Ef(i1,:), bsxfun(@times, sqrt(Sigm_tmp), ...
randn(S,nout)));
else
Sigm_tmp=(Varf(:,:,i1)'+Varf(:,:,i1))./2;
f_star=mvnrnd(Ef(i1,:), Sigm_tmp, S);
end
tmp = exp(f_star);
tmp = tmp./(sum(tmp, 2)*ones(1,size(tmp,2)));
pi(i1,:)=mean(tmp);
ytmp = repmat(yt(i1,:),S,1);
lpy(i1,:) = log(mean(tmp.^(ytmp).*(1-tmp).^(1-ytmp)));
end
if nargout > 1
Ey = 2*pi-1;
Vary = 1-(2*pi-1).^2;
Ey=Ey(:);
end
lpy=lpy(:);
end
function reclik = lik_softmax_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = LIK_SOFTMAX_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
reclik.type = 'Softmax';
reclik.nondiagW = true;
% Set the function handles
reclik.fh.pak = @lik_softmax_pak;
reclik.fh.unpak = @lik_softmax_unpak;
reclik.fh.ll = @lik_softmax_ll;
reclik.fh.llg = @lik_softmax_llg;
reclik.fh.llg2 = @lik_softmax_llg2;
reclik.fh.llg3 = @lik_softmax_llg3;
reclik.fh.tiltedMoments = @lik_softmax_tiltedMoments;
reclik.fh.predy = @lik_softmax_predy;
reclik.fh.recappend = @lik_softmax_recappend;
end
end
|
github
|
lcnhappe/happe-master
|
gpcf_noise.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_noise.m
| 12,126 |
utf_8
|
70defae513090d936fda63764c2de76c
|
function gpcf = gpcf_noise(varargin)
%GPCF_NOISE Create a independent noise covariance function
%
% Description
% GPCF = GPCF_NOISE('PARAM1',VALUE1,'PARAM2,VALUE2,...) creates
% independent noise covariance function structure in which the
% named parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% GPCF = GPCF_NOISE(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for independent noise covariance function [default]
% noiseSigma2 - variance of the independent noise [0.1]
% noiseSigma2_prior - prior for noiseSigma2 [prior_logunif]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% See also
% GP_SET, GPCF_*, PRIOR_*
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPCF_NOISE';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('noiseSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('noiseSigma2_prior',prior_logunif, @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_noise';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_noise')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
% Initialize parameter
if init || ~ismember('noiseSigma2',ip.UsingDefaults)
gpcf.noiseSigma2=ip.Results.noiseSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('noiseSigma2_prior',ip.UsingDefaults)
gpcf.p.noiseSigma2=ip.Results.noiseSigma2_prior;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_noise_pak;
gpcf.fh.unpak = @gpcf_noise_unpak;
gpcf.fh.lp = @gpcf_noise_lp;
gpcf.fh.lpg = @gpcf_noise_lpg;
gpcf.fh.cfg = @gpcf_noise_cfg;
gpcf.fh.ginput = @gpcf_noise_ginput;
gpcf.fh.cov = @gpcf_noise_cov;
gpcf.fh.trcov = @gpcf_noise_trcov;
gpcf.fh.trvar = @gpcf_noise_trvar;
gpcf.fh.recappend = @gpcf_noise_recappend;
end
end
function [w, s] = gpcf_noise_pak(gpcf)
%GPCF_NOISE_PAK Combine GP covariance function parameters into
% one vector.
%
% Description
% W = GPCF_NOISE_PAK(GPCF) takes a covariance function data
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% w = [ log(gpcf.noiseSigma2)
% (hyperparameters of gpcf.magnSigma2)]'
%
%
% See also
% GPCF_NOISE_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.noiseSigma2)
w(1) = log(gpcf.noiseSigma2);
s = [s 'log(noise.noiseSigma2)'];
% Hyperparameters of noiseSigma2
[wh sh] = gpcf.p.noiseSigma2.fh.pak(gpcf.p.noiseSigma2);
w = [w wh];
s = [s sh];
end
end
function [gpcf, w] = gpcf_noise_unpak(gpcf, w)
%GPCF_NOISE_UNPAK Sets the covariance function parameters
% into the structure
%
% Description
% [GPCF, W] = GPCF_NOISE_UNPAK(GPCF, W) takes a covariance
% function data structure GPCF and a hyper-parameter vector W,
% and returns a covariance function data structure identical
% to the input, except that the covariance hyper-parameters
% have been set to the values in W. Deletes the values set to
% GPCF from W and returns the modified W. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.noiseSigma2)
% (hyperparameters of gpcf.magnSigma2)]'
%
% See also
% GPCF_NOISE_PAK
if ~isempty(gpcf.p.noiseSigma2)
gpcf.noiseSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.noiseSigma2.fh.unpak(gpcf.p.noiseSigma2, w);
gpcf.p.noiseSigma2 = p;
end
end
function lp = gpcf_noise_lp(gpcf)
%GPCF_NOISE_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_NOISE_LP(GPCF) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_NOISE_PAK, GPCF_NOISE_UNPAK, GPCF_NOISE_G, GP_E
% Evaluate the prior contribution to the error. The parameters that
% are sampled are from space W = log(w) where w is all the
% "real" samples. On the other hand errors are evaluated in the
% W-space so we need take into account also the Jacobian of
% transformation W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpcf.p.noiseSigma2)
% Evaluate the prior contribution to the error.
lp = gpp.noiseSigma2.fh.lp(gpcf.noiseSigma2, gpp.noiseSigma2) +log(gpcf.noiseSigma2);
end
end
function lpg = gpcf_noise_lpg(gpcf)
%GPCF_NOISE_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_NOISE_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_NOISE_PAK, GPCF_NOISE_UNPAK, GPCF_NOISE_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.noiseSigma2)
lpgs = gpp.noiseSigma2.fh.lpg(gpcf.noiseSigma2, gpp.noiseSigma2);
lpg = [lpg lpgs(1).*gpcf.noiseSigma2+1 lpgs(2:end)];
end
end
function DKff = gpcf_noise_cfg(gpcf, x, x2, mask, i1)
%GPCF_NOISE_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_NOISE_CFG(GPCF, X) takes a covariance function
% data structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_NOISE_CFG(GPCF, X, X2) takes a covariance
% function data structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_NOISE_CFG(GPCF, X, [], MASK) takes a covariance
% function data structure GPCF, a matrix X of input vectors
% and returns DKff, the diagonal of gradients of covariance
% matrix Kff = k(X,X2) with respect to th (cell array with
% matrix elements). This subfunction is needed when using
% sparse approximations (e.g. FIC).
%
% See also
% GPCF_NOISE_PAK, GPCF_NOISE_UNPAK, GPCF_NOISE_E, GP_G
DKff = {};
if ~isempty(gpcf.p.noiseSigma2)
gpp=gpcf.p;
DKff{1}=gpcf.noiseSigma2;
end
if nargin==4
% Use memory save option
if i1==0
% Return number of hyperparameters
DKff=1;
return
end
DKff=DKff{1};
end
end
function DKff = gpcf_noise_ginput(gpcf, x, t, i1)
%GPCF_NOISE_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_NOISE_GINPUT(GPCF, X) takes a covariance
% function data structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_NOISE_GINPUT(GPCF, X, X2) takes a covariance
% function data structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% See also
% GPCF_NOISE_PAK, GPCF_NOISE_UNPAK, GPCF_NOISE_E, GP_G
end
function C = gpcf_noise_cov(gpcf, x1, x2)
% GP_NOISE_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_NOISE_COV(GP, TX, X) takes in covariance function of
% a Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_NOISE_TRCOV, GPCF_NOISE_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
if m1~=m2
error('the number of columns of X1 and X2 has to be same')
end
C = sparse([],[],[],n1,n2,0);
end
function C = gpcf_noise_trcov(gpcf, x)
%GP_NOISE_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_NOISE_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This is
% a mandatory subfunction used for example in prediction and
% energy computations.
%
%
% See also
% GPCF_NOISE_COV, GPCF_NOISE_TRVAR, GP_COV, GP_TRCOV
[n, m] =size(x);
n1=n+1;
C = sparse([],[],[],n,n,0);
C(1:n1:end)=C(1:n1:end)+gpcf.noiseSigma2;
end
function C = gpcf_noise_trvar(gpcf, x)
% GP_NOISE_TRVAR Evaluate training variance vector
%
% Description
% C = GP_NOISE_TRVAR(GPCF, TX) takes in covariance function
% of a Gaussian process GPCF and matrix TX that contains
% training inputs. Returns variance vector C. Every
% element i of C contains variance of input i in TX. This is
% a mandatory subfunction used for example in prediction and
% energy computations.
%
%
% See also
% GPCF_NOISE_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C=ones(n,1)*gpcf.noiseSigma2;
end
function reccf = gpcf_noise_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_NOISE_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the hyperparameters. Returns RECCF which contains
% all the old samples and the current samples from GPCF.
% This subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_noise';
% Initialize parameters
reccf.noiseSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_noise_pak;
reccf.fh.unpak = @gpcf_noise_unpak;
reccf.fh.e = @gpcf_noise_lp;
reccf.fh.lpg = @gpcf_noise_lpg;
reccf.fh.cfg = @gpcf_noise_cfg;
reccf.fh.cov = @gpcf_noise_cov;
reccf.fh.trcov = @gpcf_noise_trcov;
reccf.fh.trvar = @gpcf_noise_trvar;
% gpcf.fh.sampling = @hmc2;
reccf.sampling_opt = hmc2_opt;
reccf.fh.recappend = @gpcf_noise_recappend;
reccf.p=[];
reccf.p.noiseSigma2=[];
if ~isempty(ri.p.noiseSigma2)
reccf.p.noiseSigma2 = ri.p.noiseSigma2;
end
else
% Append to the record
gpp = gpcf.p;
% record noiseSigma2
reccf.noiseSigma2(ri,:)=gpcf.noiseSigma2;
if isfield(gpp,'noiseSigma2') && ~isempty(gpp.noiseSigma2)
reccf.p.noiseSigma2 = gpp.noiseSigma2.fh.recappend(reccf.p.noiseSigma2, ri, gpcf.p.noiseSigma2);
end
end
end
|
github
|
lcnhappe/happe-master
|
gpcf_scaled.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_scaled.m
| 14,371 |
utf_8
|
e5089583dd57a67a67f52a293560482a
|
function gpcf = gpcf_scaled(varargin)
%GPCF_SCALED Create a scaled covariance function
%
% Description
% GPCF = GPCF_scaled('cf', {GPCF_1, GPCF_2, ...})
% creates a scaled version of a covariance function as follows
% GPCF_scaled = diag(x(:,scaler))*GPCF*diag(x(:,scaler))
% where x is the matrix of inputs (see, e.g. gp_trcov).
%
% Parameters for the scaled covariance function are [default]
% cf - covariance function to be scaled (compulsory)
% scaler - the input that is used for scaling [1]
%
% See also
% GP_SET, GPCF_*
% For more information on models leading to scaled covariance function see,
% for example:
%
% GELFAND, KIM, SIRMANS, and BANERJEE (2003). Spatial Modeling With
% Spatially Varying Coefficient Processes. Journal of the American
% Statistical Association June 2003, Vol. 98, No. 462
%
% Copyright (c) 2009-2012 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 2 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPCF_SCALED';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('cf',[], @isstruct);
ip.addParamValue('scaler',1, @(x) isscalar(x) && x>0);
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_scaled';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_scaled')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init || ~ismember('cf',ip.UsingDefaults)
% Initialize parameters
gpcf.cf = {};
cfs=ip.Results.cf;
if ~isempty(cfs)
gpcf.cf{1} = cfs;
else
error('A covariance function has to be given in cf');
end
end
if init || ~ismember('scaler',ip.UsingDefaults)
gpcf.scaler = ip.Results.scaler;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_scaled_pak;
gpcf.fh.unpak = @gpcf_scaled_unpak;
gpcf.fh.lp = @gpcf_scaled_lp;
gpcf.fh.lpg = @gpcf_scaled_lpg;
gpcf.fh.cfg = @gpcf_scaled_cfg;
gpcf.fh.ginput = @gpcf_scaled_ginput;
gpcf.fh.cov = @gpcf_scaled_cov;
gpcf.fh.trcov = @gpcf_scaled_trcov;
gpcf.fh.trvar = @gpcf_scaled_trvar;
gpcf.fh.recappend = @gpcf_scaled_recappend;
end
end
function [w, s] = gpcf_scaled_pak(gpcf)
%GPCF_scaled_PAK Combine GP covariance function parameters into one vector
%
% Description
% W = GPCF_scaled_PAK(GPCF, W) loops through all the covariance
% functions and packs their parameters into one vector as
% described in the respective functions. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% See also
% GPCF_scaled_UNPAK
w = []; s = {};
cf = gpcf.cf{1};
[wi si] = feval(cf.fh.pak, cf);
w = [w wi];
s = [s; si];
end
function [gpcf, w] = gpcf_scaled_unpak(gpcf, w)
%GPCF_scaled_UNPAK Sets the covariance function parameters into
% the structures
%
% Description
% [GPCF, W] = GPCF_scaled_UNPAK(GPCF, W) loops through all the
% covariance functions and unpacks their parameters from W to
% each covariance function structure. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% See also
% GPCF_scaled_PAK
%
cf = gpcf.cf{1};
[cf, w] = feval(cf.fh.unpak, cf, w);
gpcf.cf{1} = cf;
end
function lp = gpcf_scaled_lp(gpcf)
%GPCF_scaled_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_scaled_LP(GPCF, X, T) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_scaled_PAK, GPCF_scaled_UNPAK, GPCF_scaled_LPG, GP_E
lp = 0;
cf = gpcf.cf{1};
lp = lp + feval(cf.fh.lp, cf);
end
function lpg = gpcf_scaled_lpg(gpcf)
%GPCF_scaled_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_scaled_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_scaled_PAK, GPCF_scaled_UNPAK, GPCF_scaled_LP, GP_G
lpg = [];
% Evaluate the gradients
cf = gpcf.cf{1};
lpg=[lpg cf.fh.lpg(cf)];
end
function DKff = gpcf_scaled_cfg(gpcf, x, x2, mask, i1)
%GPCF_scaled_CFG Evaluate gradient of covariance function
% with respect to the parameters.
%
% Description
% DKff = GPCF_scaled_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_scaled_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_scaled_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_scaled_CFG(GPCF, X, X2, [], i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using memory
% save option in gp_set.
%
% See also
% GPCF_scaled_PAK, GPCF_scaled_UNPAK, GPCF_scaled_LP, GP_G
[n, m] =size(x);
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
DKff=gpcf.cf{1}.fh.cfg(gpcf.cf{1},[],[],[],0);
return
end
else
savememory=0;
end
DKff = {};
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters are transformed
% through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
scale = sparse(1:n,1:n,x(:,gpcf.scaler),n,n);
% Evaluate the gradients
DKff = {};
cf = gpcf.cf{1};
if ~savememory
DK = cf.fh.cfg(cf, x);
else
DK = {cf.fh.cfg(cf,x,[],[],i1)};
end
for j = 1:length(DK)
DKff{end+1} = scale*DK{j}*scale;
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_scaled -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
scale = sparse(1:n,1:n,x(:,gpcf.scaler),n,n);
n2 = length(x2);
scale2 = sparse(1:n2,1:n2,x2(:,gpcf.scaler),n2,n2);
% Evaluate the gradients
DKff = {};
cf = gpcf.cf{1};
if ~savememory
DK = cf.fh.cfg(cf, x, x2);
else
DK = {cf.fh.cfg(cf,x, x2, [], i1)};
end
for j = 1:length(DK)
DKff{end+1} = scale*DK{j}*scale2;
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
% Evaluate the gradients
DKff = {};
scale = x(:,gpcf.scaler);
cf = gpcf.cf{1};
if ~savememory
DK = cf.fh.cfg(cf, x, [], 1);
else
DK = cf.fh.cfg(cf, x, [], 1, i1);
end
for j = 1:length(DK)
DKff{end+1} = scale.*DK{j}.*scale;
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_scaled_ginput(gpcf, x, x2,i1)
%GPCF_scaled_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_scaled_GINPUT(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to X (cell array with matrix elements). This subfunction
% is needed when computing gradients with respect to inducing
% inputs in sparse approximations.
%
% DKff = GPCF_scaled_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_scaled_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X (cell array with matrix elements). This
% subfunction is needed when using memory save option in
% gp_set.
%
% See also
% GPCF_scaled_PAK, GPCF_scaled_UNPAK, GPCF_scaled_LP, GP_G
[n, m] =size(x);
if nargin==4
% Use memory save option
savememory=1;
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
else
savememory=0;
end
% evaluate the gradient for training covariance
if nargin == 2 || isempty(x2)
scale = sparse(1:n,1:n,x(:,gpcf.scaler),n,n);
DKff = {};
cf = gpcf.cf{1};
if ~savememory
DK = cf.fh.ginput(cf, x);
else
DK = cf.fh.ginput(cf,x,[],i1);
end
for j = 1:length(DK)
DKff{end+1} = scale*DK{j}*scale;
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || nargin == 4
if size(x,2) ~= size(x2,2)
error('gpcf_scaled -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
scale = sparse(1:n,1:n,x(:,gpcf.scaler),n,n);
n2 = length(x2);
scale2 = sparse(1:n2,1:n2,x2(:,gpcf.scaler),n2,n2);
cf = gpcf.cf{1};
if ~savememory
DK = cf.fh.ginput(cf, x, x2);
else
DK = cf.fh.ginput(cf,x,x2,i1);
end
for j = 1:length(DK)
DKff{end+1} = scale*DK{j}*scale2;
end
end
end
function C = gpcf_scaled_cov(gpcf, x1, x2)
%GP_scaled_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_scaled_COV(GP, TX, X) takes in covariance function of a
% Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
%
% See also
% GPCF_scaled_TRCOV, GPCF_scaled_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
scale = sparse(1:n1,1:n1,x1(:,gpcf.scaler),n1,n1);
scale2 = sparse(1:n2,1:n2,x2(:,gpcf.scaler),n2,n2);
if m1~=m2
error('the number of columns of X1 and X2 has to be same')
end
cf = gpcf.cf{1};
C = scale*feval(cf.fh.cov, cf, x1, x2)*scale2;
end
function C = gpcf_scaled_trcov(gpcf, x)
%GP_scaled_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_scaled_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This
% is a mandatory subfunction used for example in prediction
% and energy computations.
%
% See also
% GPCF_scaled_COV, GPCF_scaled_TRVAR, GP_COV, GP_TRCOV
n = length(x);
scale = sparse(1:n,1:n,x(:,gpcf.scaler),n,n);
cf = gpcf.cf{1};
C = scale*feval(cf.fh.trcov, cf, x)*scale;
end
function C = gpcf_scaled_trvar(gpcf, x)
% GP_scaled_TRVAR Evaluate training variance vector
%
% Description
% C = GP_scaled_TRVAR(GPCF, TX) takes in covariance function of
% a Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
% See also
% GPCF_scaled_COV, GP_COV, GP_TRCOV
cf = gpcf.cf{1};
C = x(:,gpcf.scaler).*feval(cf.fh.trvar, cf, x).*x(:,gpcf.scaler);
end
function reccf = gpcf_scaled_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_scaled_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC, GP_MC->RECAPPEND
% Initialize record
if nargin == 2
reccf.type = 'gpcf_scaled';
cf = ri.cf{1};
reccf.cf{1} = feval(cf.fh.recappend, [], ri.cf{1});
reccf.scaler = ri.scaler;
% Set the function handles
reccf.fh.pak = @gpcf_scaled_pak;
reccf.fh.unpak = @gpcf_scaled_unpak;
reccf.fh.e = @gpcf_scaled_lp;
reccf.fh.lpg = @gpcf_scaled_lpg;
reccf.fh.cfg = @gpcf_scaled_cfg;
reccf.fh.cov = @gpcf_scaled_cov;
reccf.fh.trcov = @gpcf_scaled_trcov;
reccf.fh.trvar = @gpcf_scaled_trvar;
reccf.fh.recappend = @gpcf_scaled_recappend;
return
end
%loop over all of the covariance functions
cf = gpcf.cf{1};
reccf.cf{1} = feval(cf.fh.recappend, reccf.cf{1}, ri, cf);
end
|
github
|
lcnhappe/happe-master
|
gpcf_ppcs2.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_ppcs2.m
| 39,334 |
utf_8
|
edb49e8f28a995e0555b3b9deb81b3e9
|
function gpcf = gpcf_ppcs2(varargin)
%GPCF_PPCS2 Create a piece wise polynomial (q=2) covariance function
%
% Description
% GPCF = GPCF_PPCS2('nin',nin,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates piece wise polynomial (q=2) covariance function
% structure in which the named parameters have the specified
% values. Any unspecified parameters are set to default values.
% Obligatory parameter is 'nin', which tells the dimension
% of input space.
%
% GPCF = GPCF_PPCS2(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for piece wise polynomial (q=2) covariance function [default]
% magnSigma2 - magnitude (squared) [0.1]
% lengthScale - length scale for each input. [1]
% This can be either scalar corresponding
% to an isotropic function or vector
% defining own length-scale for each input
% direction.
% l_nin - order of the polynomial [floor(nin/2) + 3]
% Has to be greater than or equal to default.
% magnSigma2_prior - prior for magnSigma2 [prior_logunif]
% lengthScale_prior - prior for lengthScale [prior_t]
% metric - metric structure used by the covariance function []
% selectedVariables - vector defining which inputs are used [all]
% selectedVariables is shorthand for using
% metric_euclidean with corresponding components
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The piecewise polynomial function is the following:
%
% k_pp2(x_i, x_j) = ma2*cs^(l+2)*((l^2+4*l+3)*r^2 + (3*l+6)*r +3)
%
% where r = sum( (x_i,d - x_j,d)^2/l^2_d )
% l = floor(l_nin/2) + 3
% cs = max(0,1-r)
% and l_nin must be greater or equal to gpcf.nin
%
% NOTE! Use of gpcf_ppcs2 requires that you have installed
% GPstuff with SuiteSparse.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, METRIC_*
% Copyright (c) 2007-2010 Jarno Vanhatalo, Jouni Hartikainen
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if nargin>0 && ischar(varargin{1}) && ismember(varargin{1},{'init' 'set'})
% remove init and set
varargin(1)=[];
end
ip=inputParser;
ip.FunctionName = 'GPCF_PPCS2';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('nin',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('magnSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('lengthScale',1, @(x) isvector(x) && all(x>0));
ip.addParamValue('l_nin',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('metric',[], @isstruct);
ip.addParamValue('magnSigma2_prior', prior_logunif(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('lengthScale_prior',prior_t(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isempty(x) || ...
(isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
% Check that SuiteSparse is available
if ~exist('ldlchol')
error('SuiteSparse is not installed (or it is not in the path). gpcf_ppcs2 cannot be used!')
end
init=true;
gpcf.nin=ip.Results.nin;
if isempty(gpcf.nin)
error('nin has to be given for ppcs: gpcf_ppcs2(''nin'',NIN,...)')
end
gpcf.type = 'gpcf_ppcs2';
% cf is compactly supported
gpcf.cs = 1;
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_ppcs2')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_ppcs2_pak;
gpcf.fh.unpak = @gpcf_ppcs2_unpak;
gpcf.fh.lp = @gpcf_ppcs2_lp;
gpcf.fh.lpg = @gpcf_ppcs2_lpg;
gpcf.fh.cfg = @gpcf_ppcs2_cfg;
gpcf.fh.ginput = @gpcf_ppcs2_ginput;
gpcf.fh.cov = @gpcf_ppcs2_cov;
gpcf.fh.trcov = @gpcf_ppcs2_trcov;
gpcf.fh.trvar = @gpcf_ppcs2_trvar;
gpcf.fh.recappend = @gpcf_ppcs2_recappend;
end
% Initialize parameters
if init || ~ismember('l_nin',ip.UsingDefaults)
gpcf.l=ip.Results.l_nin;
if isempty(gpcf.l)
gpcf.l = floor(gpcf.nin/2) + 3;
end
if gpcf.l < gpcf.nin
error('The l_nin has to be greater than or equal to the number of inputs!')
end
end
if init || ~ismember('lengthScale',ip.UsingDefaults)
gpcf.lengthScale = ip.Results.lengthScale;
end
if init || ~ismember('magnSigma2',ip.UsingDefaults)
gpcf.magnSigma2 = ip.Results.magnSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.p.lengthScale=ip.Results.lengthScale_prior;
end
if init || ~ismember('magnSigma2_prior',ip.UsingDefaults)
gpcf.p.magnSigma2=ip.Results.magnSigma2_prior;
end
%Initialize metric
if ~ismember('metric',ip.UsingDefaults)
if ~isempty(ip.Results.metric)
gpcf.metric = ip.Results.metric;
gpcf = rmfield(gpcf, 'lengthScale');
gpcf.p = rmfield(gpcf.p, 'lengthScale');
elseif isfield(gpcf,'metric')
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
% selectedVariables options implemented using metric_euclidean
if ~ismember('selectedVariables',ip.UsingDefaults)
if ~isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.selectedVariables = ip.Results.selectedVariables;
% gpcf.metric=metric_euclidean('components',...
% num2cell(ip.Results.selectedVariables),...
% 'lengthScale',gpcf.lengthScale,...
% 'lengthScale_prior',gpcf.p.lengthScale);
% gpcf = rmfield(gpcf, 'lengthScale');
% gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
elseif isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.metric=metric_euclidean(gpcf.metric,...
'components',...
num2cell(ip.Results.selectedVariables));
if ~ismember('lengthScale',ip.UsingDefaults)
gpcf.metric.lengthScale=ip.Results.lengthScale;
gpcf = rmfield(gpcf, 'lengthScale');
end
if ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.metric.p.lengthScale=ip.Results.lengthScale_prior;
gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
else
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
end
end
function [w,s] = gpcf_ppcs2_pak(gpcf)
%GPCF_PPCS2_PAK Combine GP covariance function parameters into
% one vector
%
% Description
% W = GPCF_PPCS2_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for
% example in energy and gradient computations.
%
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_PPCS2_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.magnSigma2)
w = [w log(gpcf.magnSigma2)];
s = [s; 'log(ppcs2.magnSigma2)'];
% Hyperparameters of magnSigma2
[wh sh] = gpcf.p.magnSigma2.fh.pak(gpcf.p.magnSigma2);
w = [w wh];
s = [s; sh];
end
if isfield(gpcf,'metric')
[wh sh]=gpcf.metric.fh.pak(gpcf.metric);
w = [w wh];
s = [s; sh];
else
if ~isempty(gpcf.p.lengthScale)
w = [w log(gpcf.lengthScale)];
if numel(gpcf.lengthScale)>1
s = [s; sprintf('log(ppcs2.lengthScale x %d)',numel(gpcf.lengthScale))];
else
s = [s; 'log(ppcs2.lengthScale)'];
end
% Hyperparameters of lengthScale
[wh sh] = gpcf.p.lengthScale.fh.pak(gpcf.p.lengthScale);
w = [w wh];
s = [s; sh];
end
end
end
function [gpcf, w] = gpcf_ppcs2_unpak(gpcf, w)
%GPCF_PPCS2_UNPAK Sets the covariance function parameters into
% the structure
%
% Description
% [GPCF, W] = GPCF_PPCS2_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W,
% and returns a covariance function structure identical
% to the input, except that the covariance hyper-parameters
% have been set to the values in W. Deletes the values set to
% GPCF from W and returns the modified W. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_PPCS2_PAK
gpp=gpcf.p;
if ~isempty(gpp.magnSigma2)
gpcf.magnSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.magnSigma2.fh.unpak(gpcf.p.magnSigma2, w);
gpcf.p.magnSigma2 = p;
end
if isfield(gpcf,'metric')
[metric, w] = gpcf.metric.fh.unpak(gpcf.metric, w);
gpcf.metric = metric;
else
if ~isempty(gpp.lengthScale)
i1=1;
i2=length(gpcf.lengthScale);
gpcf.lengthScale = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.lengthScale.fh.unpak(gpcf.p.lengthScale, w);
gpcf.p.lengthScale = p;
end
end
end
function lp = gpcf_ppcs2_lp(gpcf)
%GPCF_PPCS2_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_PPCS2_LP(GPCF, X, T) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_PPCS2_PAK, GPCF_PPCS2_UNPAK, GPCF_PPCS2_LPG, GP_E
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lp = lp +gpp.magnSigma2.fh.lp(gpcf.magnSigma2, ...
gpp.magnSigma2) +log(gpcf.magnSigma2);
end
if isfield(gpcf,'metric')
lp = lp +gpcf.metric.fh.lp(gpcf.metric);
elseif ~isempty(gpp.lengthScale)
lp = lp +gpp.lengthScale.fh.lp(gpcf.lengthScale, ...
gpp.lengthScale) +sum(log(gpcf.lengthScale));
end
end
function lpg = gpcf_ppcs2_lpg(gpcf)
%GPCF_PPCS2_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_PPCS2_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_PPCS2_PAK, GPCF_PPCS2_UNPAK, GPCF_PPCS2_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lpgs = gpp.magnSigma2.fh.lpg(gpcf.magnSigma2, gpp.magnSigma2);
lpg = [lpg lpgs(1).*gpcf.magnSigma2+1 lpgs(2:end)];
end
if isfield(gpcf,'metric')
lpg_dist = gpcf.metric.fh.lpg(gpcf.metric);
lpg=[lpg lpg_dist];
else
if ~isempty(gpcf.p.lengthScale)
lll = length(gpcf.lengthScale);
lpgs = gpp.lengthScale.fh.lpg(gpcf.lengthScale, gpp.lengthScale);
lpg = [lpg lpgs(1:lll).*gpcf.lengthScale+1 lpgs(lll+1:end)];
end
end
end
function DKff = gpcf_ppcs2_cfg(gpcf, x, x2, mask, i1)
%GPCF_PPCS2_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_PPCS2_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_PPCS2_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_PPCS2_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_PPCS2_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using
% memory save option in gp_set.
%
% See also
% GPCF_PPCS2_PAK, GPCF_PPCS2_UNPAK, GPCF_PPCS2_LP, GP_G
gpp=gpcf.p;
i2=1;
DKff = {};
gprior = [];
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
i=0;
if ~isempty(gpcf.p.magnSigma2)
i=i+1;
end
if ~isempty(gpcf.p.lengthScale)
i=i+length(gpcf.lengthScale);
end
DKff=i;
return
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters
% are transformed through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
Cdm = gpcf_ppcs2_trcov(gpcf, x);
ii1=0;
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = Cdm;
end
l = gpcf.l;
[I,J] = find(Cdm);
if isfield(gpcf,'metric')
% Compute the sparse distance matrix and its gradient.
[n, m] =size(x);
ntriplets = (nnz(Cdm)-n)./2;
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n-1
col_ind = ii + find(Cdm(ii+1:n,ii));
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii,:));
gd = gpcf.metric.fh.distg(gpcf.metric, x(col_ind,:), x(ii,:));
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(d);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = col_ind;
J(ind_tr) = ii;
dist(ind_tr) = d;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj};
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = 2.*l^2+8.*l+6;
const2 = (l+2)*0.5*const1;
const3 = ma2/3.*cs.^(l+1);
Dd = const3.*(cs.*(const1.*dist+3*l+6) - (const2.*dist.^2 + (l+2)*(3*l+6).*dist+(l+2)*3));
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n,n);
DKff{ii1} = D + D';
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
ii1=ii1-1;
end
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic PPCS2
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
d2 = 0;
for i = 1:m
d2 = d2 + s2.*(x(I,i) - x(J,i)).^2;
end
d = sqrt(d2);
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
% Calculate the gradient matrix
const1 = 2.*l^2+8.*l+6;
const2 = l^2+4.*l+3;
D = -ma2.*cs.^(l+1).*d.*(cs.*(const1.*d+3.*l+6)-(l+2).*(const2.*d2+(3.*l+6).*d+3))/3;
D = sparse(I,J,D,n,n);
ii1 = ii1+1;
DKff{ii1} = D;
else
% In the case ARD is used
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
d2 = 0;
d_l2 = [];
for i = 1:m
d_l2(:,i) = s2(i).*(x(I,i) - x(J,i)).^2;
d2 = d2 + d_l2(:,i);
end
d = sqrt(d2);
d_l = d_l2;
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
const1 = 2.*l^2+8.*l+6;
const2 = (l+2)*0.5*const1;
const3 = -ma2/3.*cs.^(l+1);
Dd = const3.*(cs.*(const1.*d+3*l+6)-(const2.*d2+(l+2)*(3*l+6).*d+(l+2)*3));
int = d ~= 0;
for i = i1
% Calculate the gradient matrix
D = d_l(:,i).*Dd;
% Divide by r in cases where r is non-zero
D(int) = D(int)./d(int);
D = sparse(I,J,D,n,n);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_ppcs -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = K;
end
l = gpcf.l;
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x);
[n2,m2]=size(x2);
ma = gpcf.magnSigma2;
% Compute the sparse distance matrix.
ntriplets = nnz(K);
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n2
d = zeros(n1,1);
d = gpcf.metric.fh.dist(gpcf.metric, x, x2(ii,:));
gd = gpcf.metric.fh.distg(gpcf.metric, x, x2(ii,:));
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric, x, x2(ii,:));
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = I2;
J(ind_tr) = ii;
dist(ind_tr) = R2;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj}(I2);
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = 2.*l^2+8.*l+6;
const2 = (l+2)*0.5*const1;
const3 = ma2/3.*cs.^(l+1);
Dd = const3.*(cs.*(const1.*dist+3*l+6) - (const2.*dist.^2 + (l+2)*(3*l+6).*dist+(l+2)*3));
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n1,n2);
DKff{ii1} = D;
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
ii1=ii1-1;
end
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic PPCS2
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
dist1 = 0;
for i=1:m
dist1 = dist1 + s2.*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
d1 = sqrt(dist1);
cs1 = max(1-d1,0);
const1 = 2.*l^2+8.*l+6;
const2 = l^2+4.*l+3;
DK_l = -ma2.*cs1.^(l+1).*d1.*(cs1.*(const1.*d1+3.*l+6)-(l+2).*(const2.*dist1+(3.*l+6).*d1+3))/3;
ii1=ii1+1;
DKff{ii1} = DK_l;
else
% In the case ARD is used
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
dist1 = 0;
d_l1 = [];
for i = 1:m
dist1 = dist1 + s2(i).*bsxfun(@minus,x(:,i),x2(:,i)').^2;
d_l1{i} = s2(i).*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
d1 = sqrt(dist1);
cs1 = max(1-d1,0);
const1 = l^2+4.*l+3;
const2 = 3.*l+6;
for i = i1
% Calculate the gradient matrix
DK_l = ma2.*(l+2).*d_l1{i}.*cs1.^(l+1).*(const1.*dist1 + const2.*d1 + 3)./3;
DK_l = DK_l - ma2.*cs1.^(l+2).*d_l1{i}.*(2.*const1.*d1 + const2)./3;
% Divide by r in cases where r is non-zero
DK_l(d1 ~= 0) = DK_l(d1 ~= 0)./d1(d1 ~= 0);
ii1=ii1+1;
DKff{ii1} = DK_l;
end
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
ii1=0;
[n, m] =size(x);
if ~isempty(gpcf.p.magnSigma2) && (~savememory || all(i1==1))
ii1 = ii1+1;
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
end
if isfield(gpcf,'metric')
dist = 0;
distg = gpcf.metric.fh.distg(gpcf.metric, x, [], 1);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = 0;
end
else
if ~isempty(gpcf.p.lengthScale)
for i2=1:length(gpcf.lengthScale)
ii1 = ii1+1;
DKff{ii1} = 0; % d mask(Kff,I) / d lengthScale
end
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_ppcs2_ginput(gpcf, x, x2, i1)
%GPCF_PPCS2_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_PPCS2_GINPUT(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_PPCS2_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_PPCS2_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X (cell array with matrix elements). This
% subfunction is needed when using memory save option in
% gp_set.
%
% See also
% GPCF_PPCS2_PAK, GPCF_PPCS2_UNPAK, GPCF_PPCS2_LP, GP_G
[n, m] =size(x);
ii1=0;
if nargin<4
i1=1:m;
else
% Use memory save option
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
end
% evaluate the gradient for training covariance
if nargin == 2 || isempty(x2)
K = gpcf_ppcs2_trcov(gpcf, x);
l = gpcf.l;
[I,J] = find(K);
if isfield(gpcf,'metric')
% Compute the sparse distance matrix and its gradient.
ntriplets = (nnz(Cdm)-n)./2;
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n-1
col_ind = ii + find(Cdm(ii+1:n,ii));
d = zeros(length(col_ind),1);
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii,:));
[gd, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x(col_ind,:), x(ii,:));
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(d);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = col_ind;
J(ind_tr) = ii;
dist(ind_tr) = d;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj};
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = 2.*l^2+8.*l+6;
const2 = (l+2)*0.5*const1;
const3 = ma2/3.*cs.^(l+1);
Dd = const3.*(cs.*(const1.*dist+3*l+6) - (const2.*dist.^2 + (l+2)*(3*l+6).*dist+(l+2)*3));
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n,n);
DKff{ii1} = D + D';
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic
s2 = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s2 = 1./gpcf.lengthScale.^2;
end
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
d2 = 0;
for i = 1:m
d2 = d2 + s2(i).*(x(I,i) - x(J,i)).^2;
end
d = sqrt(d2);
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
const1 = 2.*l^2+8.*l+6;
const2 = (l+2)*0.5*const1;
const3 = ma2/3.*cs.^(l+1);
Dd = const3.*(cs.*(const1.*d+3*l+6) - (const2.*d.^2 + (l+2)*(3*l+6).*d+(l+2)*3));
Dd = sparse(I,J,Dd,n,n);
d = sparse(I,J,d,n,n);
row = ones(n,1);
cols = 1:n;
for i = i1
for j = 1:n
% Calculate the gradient matrix
ind = find(d(:,j));
apu = full(Dd(:,j)).*s2(i).*(x(j,i)-x(:,i));
apu(ind) = apu(ind)./d(ind,j);
D = sparse(row*j, cols, apu, n, n);
D = D+D';
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || nargin == 4
if size(x,2) ~= size(x2,2)
error('gpcf_ppcs -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
n2 = size(x2,1);
l = gpcf.l;
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x);
[n2,m2]=size(x2);
ma = gpcf.magnSigma2;
% Compute the sparse distance matrix.
ntriplets = nnz(K);
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n2
d = zeros(n1,1);
d = gpcf.metric.fh.dist(gpcf.metric, x, x2(ii,:));
[gd, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x, x2(ii,:));
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = I2;
J(ind_tr) = ii;
dist(ind_tr) = R2;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj}(I2);
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
const1 = 2.*l^2+8.*l+6;
const2 = (l+2)*0.5*const1;
const3 = ma2/3.*cs.^(l+1);
Dd = const3.*(cs.*(const1.*dist+3*l+6) - (const2.*dist.^2 + (l+2)*(3*l+6).*dist+(l+2)*3));
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n1,n2);
DKff{ii1} = D;
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic PPCS2
s2 = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s2 = 1./gpcf.lengthScale.^2;
end
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
d2 = 0;
for i = 1:m
d2 = d2 + s2(i).*bsxfun(@minus,x(:,i),x2(:,i)').^2;
end
d = sqrt(d2);
cs = max(1-d,0);
const1 = 2.*l^2+8.*l+6;
const2 = (l+2)*0.5*const1;
const3 = ma2/3.*cs.^(l+1);
Dd = const3.*(cs.*(const1.*d+3*l+6) - (const2.*d.^2 + (l+2)*(3*l+6).*d+(l+2)*3));
row = ones(n2,1);
cols = 1:n2;
for i = i1
for j = 1:n
% Calculate the gradient matrix
ind = find(d(j,:));
apu = Dd(j,:).*s2(i).*(x(j,i)-x2(:,i))';
apu(ind) = apu(ind)./d(j,ind);
D = sparse(row*j, cols, apu, n, n2);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
end
function C = gpcf_ppcs2_cov(gpcf, x1, x2, varargin)
%GP_PPCS2_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_PPCS2_COV(GP, TX, X) takes in covariance function of
% a Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_PPCS2_TRCOV, GPCF_PPCS2_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x1);
[n2,m2]=size(x2);
else
% If scaled euclidean metric
if isfield(gpcf, 'selectedVariables')
x1 = x1(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
s = 1./(gpcf.lengthScale);
s2 = s.^2;
if size(s)==1
s2 = repmat(s2,1,m1);
end
end
ma2 = gpcf.magnSigma2;
l = gpcf.l;
% Compute the sparse distance matrix.
ntriplets = max(1,floor(0.03*n1*n2));
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
ntriplets = 0;
I0=zeros(ntriplets,1);
J0=zeros(ntriplets,1);
nn0=0;
for ii1=1:n2
d = zeros(n1,1);
if isfield(gpcf, 'metric')
d = gpcf.metric.fh.dist(gpcf.metric, x1, x2(ii1,:));
else
for j=1:m1
d = d + s2(j).*(x1(:,j)-x2(ii1,j)).^2;
end
end
%d = sqrt(d);
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
R2 = sqrt(R2);
%len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
I(ntrip_prev+1:ntriplets) = I2;
J(ntrip_prev+1:ntriplets) = ii1;
R(ntrip_prev+1:ntriplets) = R2;
I0(nn0+1:nn0+length(I0t)) = I0t;
J0(nn0+1:nn0+length(I0t)) = ii1;
nn0 = nn0+length(I0t);
end
r = sparse(I(1:ntriplets),J(1:ntriplets),R(1:ntriplets));
[I,J,r] = find(r);
cs = full(sparse(max(0, 1-r)));
C = ma2.*cs.^(l+2).*((l^2+4*l+3).*r.^2+(3*l+6).*r+3)/3;
C = sparse(I,J,C,n1,n2) + sparse(I0,J0,ma2,n1,n2);
end
function C = gpcf_ppcs2_trcov(gpcf, x)
%GP_PPCS2_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_PPCS2_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This
% is a mandatory subfunction used for example in prediction and
% energy computations.
%
% See also
% GPCF_PPCS2_COV, GPCF_PPCS2_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n, m] =size(x);
else
% If a scaled euclidean metric try first mex-implementation
% and if there is not such...
C = trcov(gpcf,x);
% ... evaluate the covariance here.
if isnan(C)
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
s = 1./(gpcf.lengthScale);
s2 = s.^2;
if size(s)==1
s2 = repmat(s2,1,m);
end
else
return
end
end
ma2 = gpcf.magnSigma2;
l = gpcf.l;
% Compute the sparse distance matrix.
ntriplets = max(1,floor(0.03*n*n));
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
ntriplets = 0;
ntripletsz = max(1,floor(0.03.^2*n*n));
Iz = zeros(ntripletsz,1);
Jz = zeros(ntripletsz,1);
ntripletsz = 0;
for ii1=1:n-1
d = zeros(n-ii1,1);
col_ind = ii1+1:n;
if isfield(gpcf, 'metric')
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii1,:));
else
for ii2=1:m
d = d+s2(ii2).*(x(col_ind,ii2)-x(ii1,ii2)).^2;
end
end
%d = sqrt(d);
% store zero distance index
[I2z,J2z] = find(d==0);
% create triplets for distances 0<d<1
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
if (ntriplets > len)
I(2*len) = 0;
J(2*len) = 0;
R(2*len) = 0;
end
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = ii1+I2;
J(ind_tr) = ii1;
R(ind_tr) = sqrt(R2);
% create triplets for distances d==0 (i~=j)
lenz = length(Iz);
ntrip_prevz = ntripletsz;
ntripletsz = ntripletsz + length(I2z);
if (ntripletsz > lenz)
Iz(2*lenz) = 0;
Jz(2*lenz) = 0;
end
ind_trz = ntrip_prevz+1:ntripletsz;
Iz(ind_trz) = ii1+I2z;
Jz(ind_trz) = ii1;
end
% create a lower triangular sparse distance matrix from the triplets for distances 0<d<1
R = sparse(I(1:ntriplets),J(1:ntriplets),R(1:ntriplets),n,n);
% create a lower triangular sparse covariance matrix from the
% triplets for distances d==0 (i~=j)
Rz = sparse(Iz(1:ntripletsz),Jz(1:ntripletsz),repmat(ma2,1,ntripletsz),n,n);
% Find the non-zero elements of R.
[I,J,rn] = find(R);
% Compute covariances for distances 0<d<1
const1 = l^2+4*l+3;
const2 = 3*l+6;
cs = max(0,1-rn);
C = ma2.*cs.^(l+2).*(const1.*rn.^2+const2.*rn+3)/3;
% create a lower triangular sparse covariance matrix from the triplets for distances 0<d<1
C = sparse(I,J,C,n,n);
% add the lower triangular covariance matrix for distances d==0 (i~=j)
C = C + Rz;
% form a square covariance matrix and add the covariance matrix for i==j (d==0)
C = C + C' + sparse(1:n,1:n,ma2,n,n);
end
function C = gpcf_ppcs2_trvar(gpcf, x)
%GP_PPCS2_TRVAR Evaluate training variance vector
%
% Description
% C = GP_PPCS2_TRVAR(GPCF, TX) takes in covariance function of
% a Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
% See also
% GPCF_PPCS2_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C = ones(n,1).*gpcf.magnSigma2;
C(C<eps)=0;
end
function reccf = gpcf_ppcs2_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_PPCS2_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_ppcs2';
reccf.nin = ri.nin;
reccf.l = floor(reccf.nin/2)+3;
% cf is compactly supported
reccf.cs = 1;
% Initialize parameters
reccf.lengthScale= [];
reccf.magnSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_ppcs2_pak;
reccf.fh.unpak = @gpcf_ppcs2_unpak;
reccf.fh.e = @gpcf_ppcs2_lp;
reccf.fh.lpg = @gpcf_ppcs2_lpg;
reccf.fh.cfg = @gpcf_ppcs2_cfg;
reccf.fh.cov = @gpcf_ppcs2_cov;
reccf.fh.trcov = @gpcf_ppcs2_trcov;
reccf.fh.trvar = @gpcf_ppcs2_trvar;
reccf.fh.recappend = @gpcf_ppcs2_recappend;
reccf.p=[];
reccf.p.lengthScale=[];
reccf.p.magnSigma2=[];
if isfield(ri.p,'lengthScale') && ~isempty(ri.p.lengthScale)
reccf.p.lengthScale = ri.p.lengthScale;
end
if ~isempty(ri.p.magnSigma2)
reccf.p.magnSigma2 = ri.p.magnSigma2;
end
if isfield(ri, 'selectedVariables')
reccf.selectedVariables = ri.selectedVariables;
end
else
% Append to the record
gpp = gpcf.p;
if ~isfield(gpcf,'metric')
% record lengthScale
reccf.lengthScale(ri,:)=gpcf.lengthScale;
if isfield(gpp,'lengthScale') && ~isempty(gpp.lengthScale)
reccf.p.lengthScale = gpp.lengthScale.fh.recappend(reccf.p.lengthScale, ri, gpcf.p.lengthScale);
end
end
% record magnSigma2
reccf.magnSigma2(ri,:)=gpcf.magnSigma2;
if isfield(gpp,'magnSigma2') && ~isempty(gpp.magnSigma2)
reccf.p.magnSigma2 = gpp.magnSigma2.fh.recappend(reccf.p.magnSigma2, ri, gpcf.p.magnSigma2);
end
end
end
|
github
|
lcnhappe/happe-master
|
gpcf_sum.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_sum.m
| 14,153 |
utf_8
|
b881f1be2b12d89b06edcbbfe5dbad0c
|
function gpcf = gpcf_sum(varargin)
%GPCF_SUM Create a sum form covariance function
%
% Description
% GPCF = GPCF_SUM('cf', {GPCF_1, GPCF_2, ...})
% creates a sum form covariance function
% GPCF = GPCF_1 + GPCF_2 + ... + GPCF_N
%
% See also
% GP_SET, GPCF_*
% Copyright (c) 2009-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPCF_SUM';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('cf',[], @iscell);
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_sum';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_sum')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init || ~ismember('cf',ip.UsingDefaults)
% Initialize parameters
gpcf.cf = {};
cfs=ip.Results.cf;
if ~isempty(cfs)
for i = 1:length(cfs)
gpcf.cf{i} = cfs{i};
end
else
error('At least one covariance function has to be given in cf');
end
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_sum_pak;
gpcf.fh.unpak = @gpcf_sum_unpak;
gpcf.fh.lp = @gpcf_sum_lp;
gpcf.fh.lpg = @gpcf_sum_lpg;
gpcf.fh.cfg = @gpcf_sum_cfg;
gpcf.fh.ginput = @gpcf_sum_ginput;
gpcf.fh.cov = @gpcf_sum_cov;
gpcf.fh.trcov = @gpcf_sum_trcov;
gpcf.fh.trvar = @gpcf_sum_trvar;
gpcf.fh.recappend = @gpcf_sum_recappend;
end
end
function [w, s] = gpcf_sum_pak(gpcf)
%GPCF_SUM_PAK Combine GP covariance function parameters into one vector
%
% Description
% W = GPCF_SUM_PAK(GPCF, W) loops through all the covariance
% functions and packs their parameters into one vector as
% described in the respective functions. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% See also
% GPCF_SUM_UNPAK
ncf = length(gpcf.cf);
w = []; s = {};
for i=1:ncf
cf = gpcf.cf{i};
[wi si] = cf.fh.pak(cf);
w = [w wi];
s = [s; si];
end
end
function [gpcf, w] = gpcf_sum_unpak(gpcf, w)
%GPCF_SUM_UNPAK Sets the covariance function parameters into
% the structures
%
% Description
% [GPCF, W] = GPCF_SUM_UNPAK(GPCF, W) loops through all the
% covariance functions and unpacks their parameters from W to
% each covariance function structure. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% See also
% GPCF_SUM_PAK
%
ncf = length(gpcf.cf);
for i=1:ncf
cf = gpcf.cf{i};
[cf, w] = cf.fh.unpak(cf, w);
gpcf.cf{i} = cf;
end
end
function lp = gpcf_sum_lp(gpcf)
%GPCF_SUM_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_SUM_LP(GPCF, X, T) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_SUM_PAK, GPCF_SUM_UNPAK, GPCF_SUM_LPG, GP_E
lp = 0;
ncf = length(gpcf.cf);
for i=1:ncf
cf = gpcf.cf{i};
lp = lp + cf.fh.lp(cf);
end
end
function lpg = gpcf_sum_lpg(gpcf)
%GPCF_SUM_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_SUM_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_SUM_PAK, GPCF_SUM_UNPAK, GPCF_SUM_LP, GP_G
lpg = [];
ncf = length(gpcf.cf);
% Evaluate the gradients
for i=1:ncf
cf = gpcf.cf{i};
lpg=[lpg cf.fh.lpg(cf)];
end
end
function DKff = gpcf_sum_cfg(gpcf, x, x2, mask, i1)
%GPCF_SUM_CFG Evaluate gradient of covariance function
% with respect to the parameters.
%
% Description
% DKff = GPCF_SUM_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_SUM_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_SUM_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_SUM_CFG(GPCF, X, X2, [], i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using
% memory save option in gp_set.
%
% See also
% GPCF_SUM_PAK, GPCF_SUM_UNPAK, GPCF_SUM_LP, GP_G
[n, m] =size(x);
ncf = length(gpcf.cf);
DKff = {};
if nargin==5
% Use memory save option
savememory=1;
i3=0;
for k=1:ncf
% Number of hyperparameters for each covariance funtion
cf=gpcf.cf{k};
i3(k)=cf.fh.cfg(cf,[],[],[],0);
end
if i1==0
% Return number of hyperparameters
DKff=sum(i3);
return
end
% Help indices
i3=cumsum(i3);
ind=find(cumsum(i3 >= i1)==1);
if ind>1
i1=[ind i1-i3(ind-1)];
else
i1=[ind i1];
end
i2=i1(1);
else
savememory=0;
i2=1:ncf;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters are transformed
% through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
% % evaluate the individual covariance functions
% for i=1:ncf
% cf = gpcf.cf{i};
% C{i} = cf.fh.trcov(cf, x);
% end
% Evaluate the gradients
ind = 1:ncf;
DKff = {};
for i=i2
cf = gpcf.cf{i};
if ~savememory
DK = cf.fh.cfg(cf, x);
else
DK{1} = cf.fh.cfg(cf,x,[],[],i1(2));
end
DKff = [DKff DK];
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_sum -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
% Evaluate the gradients
ind = 1:ncf;
DKff = {};
for i=i2
cf = gpcf.cf{i};
if ~savememory
DK = cf.fh.cfg(cf, x, x2);
else
DK{1} = cf.fh.cfg(cf,x,x2,[],i1(2));
end
DKff = [DKff DK];
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
% Evaluate the gradients
ind = 1:ncf;
DKff = {};
for i=i2
cf = gpcf.cf{i};
if savememory
DK = cf.fh.cfg(cf, x, [], 1, i1(2));
else
DK = cf.fh.cfg(cf, x, [], 1);
end
DKff = [DKff DK];
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_sum_ginput(gpcf, x, x2, i1)
%GPCF_SUM_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_SUM_GINPUT(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to X (cell array with matrix elements). This subfunction
% is needed when computing gradients with respect to inducing
% inputs in sparse approximations.
%
% DKff = GPCF_SUM_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_SUM_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X. This subfunction is needed when using
% memory save option in gp_set.
%
% See also
% GPCF_SUM_PAK, GPCF_SUM_UNPAK, GPCF_SUM_LP, GP_G
[n, m] =size(x);
ncf = length(gpcf.cf);
if nargin==4
% Use memory save option
savememory=1;
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
else
savememory=0;
end
% evaluate the gradient for training covariance
if ~savememory
DKff=cellfun(@(a) zeros(n,n), cell(1,numel(x)), 'UniformOutput', 0);
else
DKff=cellfun(@(a) zeros(n,n), cell(1,n), 'UniformOutput', 0);
end
if nargin == 2 || isempty(x2)
% Evaluate the gradients
ind = 1:ncf;
for i=1:ncf
cf = gpcf.cf{i};
if ~savememory
DK = cf.fh.ginput(cf, x);
else
DK = cf.fh.ginput(cf,x,[],i1);
end
for j=1:length(DK)
DKff{j} = DKff{j} + DK{j};
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || nargin == 4
if size(x,2) ~= size(x2,2)
error('gpcf_sum -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
% Evaluate the gradients
for i=1:ncf
cf = gpcf.cf{i};
if ~savememory
DK = cf.fh.ginput(cf, x, x2);
else
DK = cf.fh.ginput(cf,x,x2,i1);
end
for j=1:length(DK)
DKff{j} = DKff{j} + DK{j};
end
end
end
end
function C = gpcf_sum_cov(gpcf, x1, x2)
%GP_SUM_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_SUM_COV(GP, TX, X) takes in covariance function of a
% Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
%
% See also
% GPCF_SUM_TRCOV, GPCF_SUM_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
if m1~=m2
error('the number of columns of X1 and X2 has to be same')
end
ncf = length(gpcf.cf);
% evaluate the individual covariance functions
C = 0;
for i=1:ncf
cf = gpcf.cf{i};
C = C + cf.fh.cov(cf, x1, x2);
end
end
function C = gpcf_sum_trcov(gpcf, x)
%GP_SUM_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_SUM_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX. This
% is a mandatory subfunction used for example in prediction and
% energy computations.
%
% See also
% GPCF_SUM_COV, GPCF_SUM_TRVAR, GP_COV, GP_TRCOV
ncf = length(gpcf.cf);
% evaluate the individual covariance functions
C = 0;
for i=1:ncf
cf = gpcf.cf{i};
C = C + cf.fh.trcov(cf, x);
end
end
function C = gpcf_sum_trvar(gpcf, x)
% GP_SUM_TRVAR Evaluate training variance vector
%
% Description
% C = GP_SUM_TRVAR(GPCF, TX) takes in covariance function of
% a Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
% See also
% GPCF_SUM_COV, GP_COV, GP_TRCOV
ncf = length(gpcf.cf);
% evaluate the individual covariance functions
C = 0;
for i=1:ncf
cf = gpcf.cf{i};
C = C + cf.fh.trvar(cf, x);
end
end
function reccf = gpcf_sum_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_SUM_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC, GP_MC->RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_sum';
% Initialize parameters
ncf = length(ri.cf);
for i=1:ncf
cf = ri.cf{i};
reccf.cf{i} = cf.fh.recappend([], ri.cf{i});
end
% Set the function handles
reccf.fh.pak = @gpcf_sum_pak;
reccf.fh.unpak = @gpcf_sum_unpak;
reccf.fh.e = @gpcf_sum_lp;
reccf.fh.lpg = @gpcf_sum_lpg;
reccf.fh.cfg = @gpcf_sum_cfg;
reccf.fh.cov = @gpcf_sum_cov;
reccf.fh.trcov = @gpcf_sum_trcov;
reccf.fh.trvar = @gpcf_sum_trvar;
reccf.fh.recappend = @gpcf_sum_recappend;
else
% Append to the record
% Loop over all of the covariance functions
ncf = length(gpcf.cf);
for i=1:ncf
cf = gpcf.cf{i};
reccf.cf{i} = cf.fh.recappend(reccf.cf{i}, ri, cf);
end
end
end
|
github
|
lcnhappe/happe-master
|
gpcf_matern32.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_matern32.m
| 27,919 |
utf_8
|
578c80a98a7b515abf2ed96d9a055e23
|
function gpcf = gpcf_matern32(varargin)
%GPCF_MATERN32 Create a Matern nu=3/2 covariance function
%
% Description
% GPCF = GPCF_MATERN32('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates Matern nu=3/2 covariance function structure in which
% the named parameters have the specified values. Any
% unspecified parameters are set to default values.
%
% GPCF = GPCF_MATERN32(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for Matern nu=3/2 covariance function [default]
% magnSigma2 - magnitude (squared) [0.1]
% lengthScale - length scale for each input. [1]
% This can be either scalar corresponding
% to an isotropic function or vector
% defining own length-scale for each input
% direction.
% magnSigma2_prior - prior for magnSigma2 [prior_logunif]
% lengthScale_prior - prior for lengthScale [prior_t]
% metric - metric structure used by the covariance function []
% selectedVariables - vector defining which inputs are used [all]
% selectedVariables is shorthand for using
% metric_euclidean with corresponding components
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, METRIC_*
% Copyright (c) 2007-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if nargin>0 && ischar(varargin{1}) && ismember(varargin{1},{'init' 'set'})
% remove init and set
varargin(1)=[];
end
ip=inputParser;
ip.FunctionName = 'GPCF_MATERN32';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('magnSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('lengthScale',1, @(x) isvector(x) && all(x>0));
ip.addParamValue('metric',[], @isstruct);
ip.addParamValue('magnSigma2_prior', prior_logunif(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('lengthScale_prior',prior_t(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isempty(x) || ...
(isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
init=true;
gpcf.type = 'gpcf_matern32';
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_matern32')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_matern32_pak;
gpcf.fh.unpak = @gpcf_matern32_unpak;
gpcf.fh.lp = @gpcf_matern32_lp;
gpcf.fh.lpg = @gpcf_matern32_lpg;
gpcf.fh.cfg = @gpcf_matern32_cfg;
gpcf.fh.ginput = @gpcf_matern32_ginput;
gpcf.fh.cov = @gpcf_matern32_cov;
gpcf.fh.trcov = @gpcf_matern32_trcov;
gpcf.fh.trvar = @gpcf_matern32_trvar;
gpcf.fh.recappend = @gpcf_matern32_recappend;
end
% Initialize parameters
if init || ~ismember('lengthScale',ip.UsingDefaults)
gpcf.lengthScale = ip.Results.lengthScale;
end
if init || ~ismember('magnSigma2',ip.UsingDefaults)
gpcf.magnSigma2 = ip.Results.magnSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.p.lengthScale=ip.Results.lengthScale_prior;
end
if init || ~ismember('magnSigma2_prior',ip.UsingDefaults)
gpcf.p.magnSigma2=ip.Results.magnSigma2_prior;
end
%Initialize metric
if ~ismember('metric',ip.UsingDefaults)
if ~isempty(ip.Results.metric)
gpcf.metric = ip.Results.metric;
gpcf = rmfield(gpcf, 'lengthScale');
gpcf.p = rmfield(gpcf.p, 'lengthScale');
elseif isfield(gpcf,'metric')
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
% selectedVariables options implemented using metric_euclidean
if ~ismember('selectedVariables',ip.UsingDefaults)
if ~isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.selectedVariables = ip.Results.selectedVariables;
% gpcf.metric=metric_euclidean('components',...
% num2cell(ip.Results.selectedVariables),...
% 'lengthScale',gpcf.lengthScale,...
% 'lengthScale_prior',gpcf.p.lengthScale);
% gpcf = rmfield(gpcf, 'lengthScale');
% gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
elseif isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.metric=metric_euclidean(gpcf.metric,...
'components',...
num2cell(ip.Results.selectedVariables));
if ~ismember('lengthScale',ip.UsingDefaults)
gpcf.metric.lengthScale=ip.Results.lengthScale;
gpcf = rmfield(gpcf, 'lengthScale');
end
if ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.metric.p.lengthScale=ip.Results.lengthScale_prior;
gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
else
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
end
end
function [w,s] = gpcf_matern32_pak(gpcf, w)
%GPCF_MATERN32_PAK Combine GP covariance function hyper-parameters
% into one vector.
%
% Description
% W = GPCF_MATERN32_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_MATERN32_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.magnSigma2)
w = [w log(gpcf.magnSigma2)];
s = [s; 'log(matern32.magnSigma2)'];
% Hyperparameters of magnSigma2
[wh sh] = gpcf.p.magnSigma2.fh.pak(gpcf.p.magnSigma2);
w = [w wh];
s = [s; sh];
end
if isfield(gpcf,'metric')
[wm sm] = gpcf.metric.fh.pak(gpcf.metric);
w = [w wm];
s = [s; sm];
else
if ~isempty(gpcf.p.lengthScale)
w = [w log(gpcf.lengthScale)];
if numel(gpcf.lengthScale)>1
s = [s; sprintf('log(matern32.lengthScale x %d)',numel(gpcf.lengthScale))];
else
s = [s; 'log(matern32.lengthScale)'];
end
% Hyperparameters of lengthScale
[wh sh] = gpcf.p.lengthScale.fh.pak(gpcf.p.lengthScale);
w = [w wh];
s = [s; sh];
end
end
end
function [gpcf, w] = gpcf_matern32_unpak(gpcf, w)
%GPCF_MATERN32_UNPAK Sets the covariance function parameters
% into the structure
%
% Description
% [GPCF, W] = GPCF_MATERN32_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W,
% and returns a covariance function structure identical to
% the input, except that the covariance hyper-parameters have
% been set to the values in W. Deletes the values set to GPCF
% from W and returns the modified W. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_MATERN32_PAK
gpp=gpcf.p;
if ~isempty(gpp.magnSigma2)
gpcf.magnSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.magnSigma2.fh.unpak(gpcf.p.magnSigma2, w);
gpcf.p.magnSigma2 = p;
end
if isfield(gpcf,'metric')
[metric, w] = gpcf.metric.fh.unpak(gpcf.metric, w);
gpcf.metric = metric;
else
if ~isempty(gpp.lengthScale)
i1=1;
i2=length(gpcf.lengthScale);
gpcf.lengthScale = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.lengthScale.fh.unpak(gpcf.p.lengthScale, w);
gpcf.p.lengthScale = p;
end
end
end
function lp = gpcf_matern32_lp(gpcf)
%GPCF_MATERN32_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_MATERN32_LP(GPCF, X, T) takes a covariance function
% structure GPCF together with a matrix X of input
% vectors and a vector T of target vectors and evaluates log
% p(th) x J, where th is a vector of MATERN32 parameters and J
% is the Jacobian of transformation exp(w) = th. (Note that
% the parameters are log transformed, when packed.) This is
% a mandatory subfunction used for example in energy computations.
%
% See also
% GPCF_MATERN32_PAK, GPCF_MATERN32_UNPAK, GPCF_MATERN32_LPG, GP_E
%
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lp = lp +gpp.magnSigma2.fh.lp(gpcf.magnSigma2, ...
gpp.magnSigma2) +log(gpcf.magnSigma2);
end
if isfield(gpcf,'metric')
lp = lp +gpcf.metric.fh.lp(gpcf.metric);
elseif ~isempty(gpp.lengthScale)
lp = lp +gpp.lengthScale.fh.lp(gpcf.lengthScale, ...
gpp.lengthScale) +sum(log(gpcf.lengthScale));
end
end
function lpg = gpcf_matern32_lpg(gpcf)
%GPCF_MATERN32_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_MATERN32_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in gradient computations.
%
% See also
% GPCF_MATERN32_PAK, GPCF_MATERN32_UNPAK, GPCF_MATERN32_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lpgs = gpp.magnSigma2.fh.lpg(gpcf.magnSigma2, gpp.magnSigma2);
lpg = [lpg lpgs(1).*gpcf.magnSigma2+1 lpgs(2:end)];
end
if isfield(gpcf,'metric')
lpg_dist = gpcf.metric.fh.lpg(gpcf.metric);
lpg=[lpg lpg_dist];
else
if ~isempty(gpcf.p.lengthScale)
lll = length(gpcf.lengthScale);
lpgs = gpp.lengthScale.fh.lpg(gpcf.lengthScale, gpp.lengthScale);
lpg = [lpg lpgs(1:lll).*gpcf.lengthScale+1 lpgs(lll+1:end)];
end
end
end
function DKff = gpcf_matern32_cfg(gpcf, x, x2, mask,i1)
%GPCF_MATERN32_CFG Evaluate gradient of covariance function
% hyper-prior with respect to the parameters.
%
% Description
% DKff = GPCF_MATERN32_CFG(GPCF, X) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradients of covariance matrix
% Kff = k(X,X) with respect to th (cell array with matrix
% elements). This is a mandatory subfunction used for example
% in gradient computations.
%
% DKff = GPCF_MATERN32_CFG(GPCF, X, X2) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_MATERN32_CFG(GPCF, X, [], MASK)
% takes a covariance function structure GPCF, a matrix X
% of input vectors and returns DKff, the diagonal of gradients
% of covariance matrix Kff = k(X,X2) with respect to th (cell
% array with matrix elements). This subfunction is needed when
% using sparse approximations (e.g. FIC).
%
% DKff = GPCF_MATERN32_CFG(GPCF, X, X2, [], i) takes a
% covariance function structure GPCF, a matrix X of input
% vectors and returns DKff, the gradient of covariance matrix
% Kff = k(X,X2) with respect to ith hyperparameter (matrix).
% 5th input can also be used without X2. This subfunction is
% needed when using memory save option in gp_set.
%
% See also
% GPCF_MATERN32_PAK, GPCF_MATERN32_UNPAK, GPCF_MATERN32_LP, GP_G
gpp=gpcf.p;
i2=1;
DKff = {};
gprior = [];
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
if ~isempty(gpcf.p.magnSigma2)
i=1;
end
if ~isempty(gpcf.p.lengthScale)
i=i+length(gpcf.lengthScale);
end
DKff=i;
return
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters
% are transformed through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
Cdm = gpcf_matern32_trcov(gpcf, x);
ii1=0;
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = Cdm;
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x);
distg = gpcf.metric.fh.distg(gpcf.metric, x);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = -gpcf.magnSigma2.*3.*dist.*distg{i}.*exp(-sqrt(3).*dist);
end
else
if isfield(gpcf,'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if savememory
if i1==1
DKff=DKff{ii1};
return
else
ii1=ii1-1;
i1=i1-1;
end
else
i1=1:m;
end
if ~isempty(gpcf.p.lengthScale)
ma2 = gpcf.magnSigma2;
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic MATERN32
s = 1./gpcf.lengthScale;
dist = 0;
for i=1:m
D = bsxfun(@minus,x(:,i),x(:,i)');
dist = dist + D.^2;
end
D = ma2.*3.*dist.*s.^2.*exp(-sqrt(3.*dist).*s);
ii1 = ii1+1;
DKff{ii1} = D;
else
% In the case ARD is used
s = 1./gpcf.lengthScale.^2;
dist = 0;
for i=1:m
dist = dist + s(i).*(bsxfun(@minus,x(:,i),x(:,i)')).^2;
end
dist=sqrt(dist);
for i=i1
D = 3.*ma2.*s(i).*(bsxfun(@minus,x(:,i),x(:,i)')).^2.*exp(-sqrt(3).*dist);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_matern32 -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = K;
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x, x2);
distg = gpcf.metric.fh.distg(gpcf.metric, x, x2);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = -gpcf.magnSigma2.*3.*dist.*distg{i}.*exp(-sqrt(3).*dist);
end
else
if isfield(gpcf,'selectedVariables')
x = x(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if savememory
if i1==1
DKff=DKff{ii1};
return
else
ii1=ii1-1;
i1=i1-1;
end
else
i1=1:m;
end
if ~isempty(gpcf.p.lengthScale)
% Evaluate help matrix for calculations of derivatives with respect
% to the lengthScale
if length(gpcf.lengthScale) == 1
% In the case of an isotropic matern32
s = 1./gpcf.lengthScale;
ma2 = gpcf.magnSigma2;
dist = 0;
for i=1:m
dist = dist + (bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
DK_l = 3.*ma2.*s.^2.*dist.*exp(-s.*sqrt(3.*dist));
ii1=ii1+1;
DKff{ii1} = DK_l;
else
% In the case ARD is used
s = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
dist = 0;
for i=1:m
dist = dist + s(i).*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
for i=i1
DK_l = 3.*ma2.*s(i).*(bsxfun(@minus,x(:,i),x2(:,i)')).^2.*exp(-sqrt(3.*dist));
ii1=ii1+1;
DKff{ii1} = DK_l;
end
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
ii1=0;
if ~isempty(gpcf.p.magnSigma2) && (~savememory || all(i1==1))
ii1 = ii1+1;
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
end
if isfield(gpcf,'metric')
dist = 0;
distg = gpcf.metric.fh.distg(gpcf.metric, x, [], 1);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = 0;
end
else
if ~isempty(gpcf.p.lengthScale)
for i2=1:length(gpcf.lengthScale)
ii1 = ii1+1;
DKff{ii1} = 0; % d mask(Kff,I) / d lengthScale
end
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_matern32_ginput(gpcf, x, x2, i1)
%GPCF_MATERN32_GINPUT Evaluate gradient of covariance function with
% respect to x.
%
% Description
% DKff = GPCF_MATERN32_GINPUT(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_MATERN32_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_MATERN32_GINPUT(GPCF, X, X2, i) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to ith covariate in X (matrix). This
% subfunction is needed when using memory save option in gp_set.
%
% See also
% GPCF_MATERN32_PAK, GPCF_MATERN32_UNPAK, GPCF_MATERN32_LP, GP_G
[n, m] =size(x);
ma2 = gpcf.magnSigma2;
ii1 = 0;
if nargin==4
% Use memory save option
savememory=1;
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
else
savememory=0;
end
if nargin == 2 || isempty(x2)
if isfield(gpcf,'metric')
K = gpcf.fh.trcov(gpcf, x);
dist = gpcf.metric.fh.dist(gpcf.metric, x);
gdist = gpcf.metric.fh.ginput(gpcf.metric, x);
for i=1:length(gdist)
ii1 = ii1+1;
DKff{ii1} = -K./(1+sqrt(3)*dist).*3.*dist.*gdist{ii1};
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
dist=0;
for i2=1:m
dist = dist + s(i2).*(bsxfun(@minus,x(:,i2),x(:,i2)')).^2;
end
if ~savememory
i1=1:m;
end
for i=i1
for j = 1:n
D1 = zeros(n,n);
D1(j,:) = (s(i)).*bsxfun(@minus,x(j,i),x(:,i)');
D1 = D1 + D1';
DK = -3.*ma2.*exp(-sqrt(3.*dist)).*D1;
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
end
elseif nargin == 3 || nargin == 4
if isfield(gpcf,'metric')
K = gpcf.fh.cov(gpcf, x, x2);
dist = gpcf.metric.fh.dist(gpcf.metric, x, x2);
gdist = gpcf.metric.fh.ginput(gpcf.metric, x, x2);
for i=1:length(gdist)
ii1 = ii1+1;
DKff{ii1} = -K./(1+sqrt(3)*dist).*3.*dist.*gdist{ii1};
end
else
[n2, m2] =size(x2);
if length(gpcf.lengthScale) == 1
s = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s = 1./gpcf.lengthScale.^2;
end
dist=0;
for i2=1:m
dist = dist + s(i2).*(bsxfun(@minus,x(:,i2),x2(:,i2)')).^2;
end
if ~savememory
i1=1:m;
end
ii1 = 0;
for i=i1
for j = 1:n
D1 = zeros(n,n2);
D1(j,:) = (s(i)).*bsxfun(@minus,x(j,i),x2(:,i)');
DK = -3.*ma2.*exp(-sqrt(3.*dist)).*D1;
ii1 = ii1 + 1;
DKff{ii1} = DK;
end
end
end
end
end
function C = gpcf_matern32_cov(gpcf, x1, x2)
%GP_MATERN32_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_MATERN32_COV(GP, TX, X) takes in covariance function
% of a Gaussian process GP and two matrixes TX and X that
% contain input vectors to GP. Returns covariance matrix C.
% Every element ij of C contains covariance between inputs i
% in TX and j in X. This is a mandatory subfunction used for
% example in prediction and energy computations.
%
%
% See also
% GPCF_MATERN32_TRCOV, GPCF_MATERN32_TRVAR, GP_COV, GP_TRCOV
if isempty(x2)
x2=x1;
end
if size(x1,2)~=size(x2,2)
error('the number of columns of X1 and X2 has to be same')
end
if isfield(gpcf,'metric')
dist = gpcf.metric.fh.dist(gpcf.metric, x1, x2);
dist(dist<eps) = 0;
C = gpcf.magnSigma2.*(1+sqrt(3).*dist).*exp(-sqrt(3).*dist);
else
if isfield(gpcf,'selectedVariables')
x1 = x1(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
C=zeros(n1,n2);
ma2 = gpcf.magnSigma2;
% Evaluate the covariance
if ~isempty(gpcf.lengthScale)
s2 = 1./gpcf.lengthScale.^2;
% If ARD is not used make s a vector of
% equal elements
if size(s2)==1
s2 = repmat(s2,1,m1);
end
dist=zeros(n1,n2);
for j=1:m1
dist = dist + s2(j).*(bsxfun(@minus,x1(:,j),x2(:,j)')).^2;
end
dist = sqrt(dist);
C = ma2.*(1+sqrt(3).*dist).*exp(-sqrt(3).*dist);
end
C(C<eps)=0;
end
end
function C = gpcf_matern32_trcov(gpcf, x)
%GP_MATERN32_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_MATERN32_TRCOV(GP, TX) takes in covariance function
% of a Gaussian process GP and matrix TX that contains
% training input vectors. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i and j
% in TX. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_MATERN32_COV, GPCF_MATERN32_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
ma2 = gpcf.magnSigma2;
dist = gpcf.metric.fh.dist(gpcf.metric, x);
C = ma2.*(1+sqrt(3).*dist).*exp(-sqrt(3).*dist);
else
% Try to use the C-implementation
C = trcov(gpcf,x);
if isnan(C)
% If there wasn't C-implementation do here
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
s2 = 1./(gpcf.lengthScale).^2;
if size(s2)==1
s2 = repmat(s2,1,m);
end
ma2 = gpcf.magnSigma2;
% Here we take advantage of the
% symmetry of covariance matrix
C=zeros(n,n);
for i1=2:n
i1n=(i1-1)*n;
for i2=1:i1-1
ii=i1+(i2-1)*n;
for i3=1:m
C(ii)=C(ii)+s2(i3).*(x(i1,i3)-x(i2,i3)).^2; % the covariance function
end
C(i1n+i2)=C(ii);
end
end
dist = sqrt(C);
C = ma2.*(1+sqrt(3).*dist).*exp(-sqrt(3).*dist);
C(C<eps)=0;
end
end
end
function C = gpcf_matern32_trvar(gpcf, x)
%GP_MATERN32_TRVAR Evaluate training variance vector
%
% Description
% C = GP_MATERN32_TRVAR(GPCF, TX) takes in covariance function
% of a Gaussian process GPCF and matrix TX that contains
% training inputs. Returns variance vector C. Every element i
% of C contains variance of input i in TX. This is a mandatory
% subfunction used for example in prediction and energy computations.
%
%
% See also
% GPCF_MATERN32_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C = ones(n,1).*gpcf.magnSigma2;
C(C<eps)=0;
end
function reccf = gpcf_matern32_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_MATERN32_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains
% all the old samples and the current samples from GPCF.
% This subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_matern32';
% Initialize parameters
reccf.lengthScale= [];
reccf.magnSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_matern32_pak;
reccf.fh.unpak = @gpcf_matern32_unpak;
reccf.fh.e = @gpcf_matern32_lp;
reccf.fh.lpg = @gpcf_matern32_lpg;
reccf.fh.cfg = @gpcf_matern32_cfg;
reccf.fh.cov = @gpcf_matern32_cov;
reccf.fh.trcov = @gpcf_matern32_trcov;
reccf.fh.trvar = @gpcf_matern32_trvar;
reccf.fh.recappend = @gpcf_matern32_recappend;
reccf.p=[];
reccf.p.lengthScale=[];
reccf.p.magnSigma2=[];
if isfield(ri.p,'lengthScale') && ~isempty(ri.p.lengthScale)
reccf.p.lengthScale = ri.p.lengthScale;
end
if ~isempty(ri.p.magnSigma2)
reccf.p.magnSigma2 = ri.p.magnSigma2;
end
if isfield(ri, 'selectedVariables')
reccf.selectedVariables = ri.selectedVariables;
end
else
% Append to the record
gpp = gpcf.p;
if ~isfield(gpcf,'metric')
% record lengthScale
reccf.lengthScale(ri,:)=gpcf.lengthScale;
if isfield(gpp,'lengthScale') && ~isempty(gpp.lengthScale)
reccf.p.lengthScale = gpp.lengthScale.fh.recappend(reccf.p.lengthScale, ri, gpcf.p.lengthScale);
end
end
% record magnSigma2
reccf.magnSigma2(ri,:)=gpcf.magnSigma2;
if isfield(gpp,'magnSigma2') && ~isempty(gpp.magnSigma2)
reccf.p.magnSigma2 = gpp.magnSigma2.fh.recappend(reccf.p.magnSigma2, ri, gpcf.p.magnSigma2);
end
end
end
|
github
|
lcnhappe/happe-master
|
lik_logit.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_logit.m
| 14,344 |
utf_8
|
c16705d8ddef02a29ecefd128787faef
|
function lik = lik_logit(varargin)
%LIK_LOGIT Create a Logit likelihood structure
%
% Description
% LIK = LIK_LOGIT creates Logit likelihood for classification
% problem with class labels {-1,1}.
%
% The likelihood is defined as follows:
% __ n
% p(y|f) = || i=1 1/(1 + exp(-y_i*f_i) )
% where f is the latent value vector.
%
% See also
% GP_SET, LIK_*
%
% Copyright (c) 2008-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_LOGIT';
ip.addOptional('lik', [], @isstruct);
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Logit';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Logit')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_logit_pak;
lik.fh.unpak = @lik_logit_unpak;
lik.fh.ll = @lik_logit_ll;
lik.fh.llg = @lik_logit_llg;
lik.fh.llg2 = @lik_logit_llg2;
lik.fh.llg3 = @lik_logit_llg3;
lik.fh.tiltedMoments = @lik_logit_tiltedMoments;
lik.fh.predy = @lik_logit_predy;
lik.fh.invlink = @lik_logit_invlink;
lik.fh.recappend = @lik_logit_recappend;
end
end
function [w,s] = lik_logit_pak(lik)
%LIK_LOGIT_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_LOGIT_PAK(LIK) takes a likelihood structure LIK and
% returns an empty verctor W. If Logit likelihood had
% parameters this would combine them into a single row vector
% W (see e.g. lik_negbin). This is a mandatory subfunction used
% for example in energy and gradient computations.
%
% See also
% LIK_NEGBIN_UNPAK, GP_PAK
w = []; s = {};
end
function [lik, w] = lik_logit_unpak(lik, w)
%LIK_LOGIT_UNPAK Extract likelihood parameters from the vector.
%
% Description
% W = LIK_LOGIT_UNPAK(W, LIK) Doesn't do anything.
%
% If Logit likelihood had parameters this would extracts them
% parameters from the vector W to the LIK structure. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
% See also
% LIK_LOGIT_PAK, GP_UNPAK
lik=lik;
w=w;
end
function ll = lik_logit_ll(lik, y, f, z)
%LIK_LOGIT_LL Log likelihood
%
% Description
% E = LIK_LOGIT_LL(LIK, Y, F) takes a likelihood structure
% LIK, class labels Y, and latent values F. Returns the log
% likelihood, log p(y|f,z). This subfunction is also used in
% information criteria (DIC, WAIC) computations.
%
% See also
% LIK_LOGIT_LLG, LIK_LOGIT_LLG3, LIK_LOGIT_LLG2, GPLA_E
if ~isempty(find(abs(y)~=1))
error('lik_logit: The class labels have to be {-1,1}')
end
ll = sum(-log(1+exp(-y.*f)));
end
function llg = lik_logit_llg(lik, y, f, param, z)
%LIK_LOGIT_LLG Gradient of the log likelihood
%
% Description
% G = LIK_LOGIT_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F. Returns
% the gradient of the log likelihood with respect to PARAM. At the
% moment PARAM can be 'param' or 'latent'. This subfunction is
% needed when using Laplace approximation or MCMC for inference
% with non-Gaussian likelihoods.
%
% See also
% LIK_LOGIT_LL, LIK_LOGIT_LLG2, LIK_LOGIT_LLG3, GPLA_E
if ~isempty(find(abs(y)~=1))
error('lik_logit: The class labels have to be {-1,1}')
end
t = (y+1)/2;
PI = 1./(1+exp(-f));
llg = t - PI;
%llg = (y+1)/2 - 1./(1+exp(-f));
end
function llg2 = lik_logit_llg2(lik, y, f, param, z)
%LIK_LOGIT_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_LOGIT_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F. Returns
% the Hessian of the log likelihood with respect to PARAM. At
% the moment PARAM can be only 'latent'. LLG2 is a vector with
% diagonal elements of the Hessian matrix (off diagonals are
% zero). This subfunction is needed when using Laplace approximation
% or EP for inference with non-Gaussian likelihoods.
%
% See also
% LIK_LOGIT_LL, LIK_LOGIT_LLG, LIK_LOGIT_LLG3, GPLA_E
PI = 1./(1+exp(-f));
llg2 = -PI.*(1-PI);
end
function llg3 = lik_logit_llg3(lik, y, f, param, z)
%LIK_LOGIT_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_LOGIT_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, class labels Y, and latent values F and
% returns the third gradients of the log likelihood with
% respect to PARAM. At the moment PARAM can be only 'latent'.
% LLG3 is a vector with third gradients. This subfunction is
% needed when using Laplace approximation for inference with
% non-Gaussian likelihoods.
%
% See also
% LIK_LOGIT_LL, LIK_LOGIT_LLG, LIK_LOGIT_LLG2, GPLA_E, GPLA_G
if ~isempty(find(abs(y)~=1))
error('lik_logit: The class labels have to be {-1,1}')
end
t = (y+1)/2;
PI = 1./(1+exp(-f));
llg3 = -PI.*(1-PI).*(1-2*PI);
end
function [logM_0, m_1, sigm2hati1] = lik_logit_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_LOGIT_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_LOGIT_TILTEDMOMENTS(LIK, Y, I, S2, MYY)
% takes a likelihood structure LIK, class labels Y, index I
% and cavity variance S2 and mean MYY. Returns the zeroth
% moment M_0, mean M_1 and variance M_2 of the posterior
% marginal (see Rasmussen and Williams (2006): Gaussian
% processes for Machine Learning, page 55). This subfunction
% is needed when using EP for inference with non-Gaussian
% likelihoods.
%
% See also
% GPEP_E
% don't check this here, because this function is called so often by EP
% if ~isempty(find(abs(y)~=1))
% error('lik_logit: The class labels have to be {-1,1}')
% end
yy = y(i1);
logM_0=zeros(size(yy));
m_1=zeros(size(yy));
sigm2hati1=zeros(size(yy));
for i=1:length(i1)
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Logit * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_logit_norm(yy(i),myy_i(i),sigm2_i(i));
if isnan(minf) || isnan(maxf)
logM_0(i)=NaN; m_1(i)=NaN; sigm2hati1(i)=NaN;
continue
end
% Integrate with an adaptive Gauss-Kronrod quadrature
% (Rasmussen and Nickish use in GPML interpolation between
% a cumulative Gaussian scale mixture and linear tail
% approximation, which could be faster, but quadrature also
% takes only a fraction of the time EP uses overall, so no
% need to change...)
RTOL = 1.e-6;
ATOL = 1.e-10;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
% If the second central moment is less than cavity variance
% integrate more precisely. Theoretically should be
% sigm2hati1 < sigm2_i.
if sigm2hati1(i) >= sigm2_i(i)
ATOL = ATOL.^2;
RTOL = RTOL.^2;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
if sigm2hati1(i) >= sigm2_i(i)
%warning('lik_logit_tilted_moments: sigm2hati1 >= sigm2_i');
sigm2hati1(i)=sigm2_i(i)-eps;
end
end
logM_0(i) = log(m_0);
end
end
function [lpy, Ey, Vary] = lik_logit_predy(lik, Ef, Varf, yt, zt)
%LIK_LOGIT_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_LOGIT_PREDY(LIK, EF, VARF, YT)
% Returns logarithm of the predictive density of YT, that is
% p(yt | y) = \int p(yt | f) p(f|y) df.
% This requires also the class labels YT. This subfunction
% is needed when computing posterior predictive distributions
% for future observations.
%
% [LPY, EY, VARY] = LIK_LOGIT_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns also the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
if nargout > 1
py1 = zeros(length(Ef),1);
for i1=1:length(Ef)
myy_i = Ef(i1);
sigm_i = sqrt(Varf(i1));
minf=myy_i-6*sigm_i;
maxf=myy_i+6*sigm_i;
F = @(f)1./(1+exp(-f)).*norm_pdf(f,myy_i,sigm_i);
py1(i1) = quadgk(F,minf,maxf);
end
Ey = 2*py1-1;
Vary = 1-(2*py1-1).^2;
end
if ~isempty(find(abs(yt)~=1))
error('lik_logit: The class labels have to be {-1,1}')
end
% Quadrature integration
lpy = zeros(length(yt),1);
for i1 = 1:length(yt)
% get a function handle of the likelihood times posterior
% (likelihood * posterior = Poisson * Gaussian)
% and useful integration limits
[pdf,minf,maxf]=init_logit_norm(...
yt(i1),Ef(i1),Varf(i1));
% integrate over the f to get posterior predictive distribution
lpy(i1) = log(quadgk(pdf, minf, maxf));
end
end
function [df,minf,maxf] = init_logit_norm(yy,myy_i,sigm2_i)
%INIT_LOGIT_NORM
%
% Description
% Return function handle to a function evaluating Logit *
% Gaussian which is used for evaluating (likelihood * cavity)
% or (likelihood * posterior) Return also useful limits for
% integration. This is private function for lik_logit. This
% subfunction is needed by subfunctions tiltedMoments, siteDeriv
% and predy.
%
% See also
% LIK_LOGIT_TILTEDMOMENTS, LIK_LOGIT_PREDY
% avoid repetitive evaluation of constant part
ldconst = -log(sigm2_i)/2 -log(2*pi)/2;
% Create function handle for the function to be integrated
df = @logit_norm;
% use log to avoid underflow, and derivates for faster search
ld = @log_logit_norm;
ldg = @log_logit_norm_g;
ldg2 = @log_logit_norm_g2;
% Set the limits for integration
% Logit likelihood is log-concave so the logit_norm
% function is unimodal, which makes things easier
% approximate guess for the location of the mode
if sign(myy_i)==sign(yy)
% the log likelihood is flat on this side
modef = myy_i;
else
% the log likelihood is approximately yy*f on this side
modef=sign(myy_i)*max(abs(myy_i)-sigm2_i,0);
end
% find the mode of the integrand using Newton iterations
% few iterations is enough, since the first guess in the right direction
niter=2; % number of Newton iterations
mindelta=1e-6; % tolerance in stopping Newton iterations
for ni=1:niter
g=ldg(modef);
h=ldg2(modef);
delta=-g/h;
modef=modef+delta;
if abs(delta)<mindelta
break
end
end
% integrand limits based on Gaussian approximation at mode
modes=sqrt(-1/h);
minf=modef-8*modes;
maxf=modef+8*modes;
modeld=ld(modef);
if isinf(modeld) || isnan(modeld)
minf=NaN;maxf=NaN;
return
end
iter=0;
% check that density at end points is low enough
lddiff=20; % min difference in log-density between mode and end-points
minld=ld(minf);
step=1;
while minld>(modeld-lddiff)
minf=minf-step*modes;
minld=ld(minf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_logit -> init_logit_norm: ' ...
'integration interval minimun not found ' ...
'even after looking hard!'])
end
end
maxld=ld(maxf);
step=1;
while maxld>(modeld-lddiff)
maxf=maxf+step*modes;
maxld=ld(maxf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_logit -> init_logit_norm: ' ...
'integration interval maximum not found ' ...
'even after looking hard!'])
end
end
function integrand = logit_norm(f)
% Logit * Gaussian
integrand = exp(ldconst ...
-log(1+exp(-yy.*f)) ...
-0.5*(f-myy_i).^2./sigm2_i);
end
function log_int = log_logit_norm(f)
% log(Logit * Gaussian)
% log_logit_norm is used to avoid underflow when searching
% integration interval
log_int = ldconst ...
-log(1+exp(-yy.*f)) ...
-0.5*(f-myy_i).^2./sigm2_i;
end
function g = log_logit_norm_g(f)
% d/df log(Logit * Gaussian)
% derivative of log_logit_norm
g = yy./(exp(f*yy)+1)...
+ (myy_i - f)./sigm2_i;
end
function g2 = log_logit_norm_g2(f)
% d^2/df^2 log(Logit * Gaussian)
% second derivate of log_logit_norm
a=exp(f*yy);
g2 = -a*(yy./(a+1)).^2 ...
-1/sigm2_i;
end
end
function p = lik_logit_invlink(lik, f, z)
%LIK_LOGIT_INVLINK Returns values of inverse link function
%
% Description
% P = LIK_LOGIT_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values of inverse link function P.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_LOGIT_LL, LIK_LOGIT_PREDY
p = logitinv(f);
end
function reclik = lik_logit_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = GPCF_LOGIT_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
reclik.type = 'Logit';
% Set the function handles
reclik.fh.pak = @lik_logit_pak;
reclik.fh.unpak = @lik_logit_unpak;
reclik.fh.ll = @lik_logit_ll;
reclik.fh.llg = @lik_logit_llg;
reclik.fh.llg2 = @lik_logit_llg2;
reclik.fh.llg3 = @lik_logit_llg3;
reclik.fh.tiltedMoments = @lik_logit_tiltedMoments;
reclik.fh.predy = @lik_logit_predy;
reclik.fh.invlink = @lik_logit_invlink;
reclik.fh.recappend = @lik_logit_recappend;
end
end
|
github
|
lcnhappe/happe-master
|
lik_loggaussian.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lik_loggaussian.m
| 25,546 |
utf_8
|
0e6db76a65c1b277dbce5a945a7538f4
|
function lik = lik_loggaussian(varargin)
%LIK_LOGGAUSSIAN Create a right censored log-Gaussian likelihood structure
%
% Description
% LIK = LIK_LOGGAUSSIAN('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates a likelihood structure for right censored log-Gaussian
% survival model in which the named parameters have the
% specified values. Any unspecified parameters are set to
% default values.
%
% LIK = LIK_LOGGAUSSIAN(LIK,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a likelihood structure with the named parameters
% altered with the specified values.
%
% Parameters for log-Gaussian likelihood [default]
% sigma2 - variance [1]
% sigma2_prior - prior for sigma2 [prior_logunif]
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The likelihood is defined as follows:
% __ n
% p(y|f, z) = || i=1 [ (2*pi*s^2)^(-(1-z_i)/2)*y_i^-(1-z_i)
% *exp(-1/(2*s^2)*(1-z_i)*(log(y_i) - f_i)^2)
% *(1-norm_cdf((log(y_i)-f_i)/s))^z_i ]
%
%
% where s is the standard deviation of loggaussian distribution.
% z is a vector of censoring indicators with z = 0 for uncensored event
% and z = 1 for right censored event.
%
% When using the log-Gaussian likelihood you need to give the vector z
% as an extra parameter to each function that requires also y.
% For example, you should call gpla_e as follows: gpla_e(w, gp,
% x, y, 'z', z)
%
% See also
% GP_SET, LIK_*, PRIOR_*
%
% Copyright (c) 2012 Ville Tolvanen
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LIK_LOGGAUSSIAN';
ip.addOptional('lik', [], @isstruct);
ip.addParamValue('sigma2',1, @(x) isscalar(x) && x>0);
ip.addParamValue('sigma2_prior',prior_logunif(), @(x) isstruct(x) || isempty(x));
ip.parse(varargin{:});
lik=ip.Results.lik;
if isempty(lik)
init=true;
lik.type = 'Log-Gaussian';
else
if ~isfield(lik,'type') || ~isequal(lik.type,'Log-Gaussian')
error('First argument does not seem to be a valid likelihood function structure')
end
init=false;
end
% Initialize parameters
if init || ~ismember('sigma2',ip.UsingDefaults)
lik.sigma2 = ip.Results.sigma2;
end
% Initialize prior structure
if init
lik.p=[];
end
if init || ~ismember('sigma2_prior',ip.UsingDefaults)
lik.p.sigma2=ip.Results.sigma2_prior;
end
if init
% Set the function handles to the subfunctions
lik.fh.pak = @lik_loggaussian_pak;
lik.fh.unpak = @lik_loggaussian_unpak;
lik.fh.lp = @lik_loggaussian_lp;
lik.fh.lpg = @lik_loggaussian_lpg;
lik.fh.ll = @lik_loggaussian_ll;
lik.fh.llg = @lik_loggaussian_llg;
lik.fh.llg2 = @lik_loggaussian_llg2;
lik.fh.llg3 = @lik_loggaussian_llg3;
lik.fh.tiltedMoments = @lik_loggaussian_tiltedMoments;
lik.fh.siteDeriv = @lik_loggaussian_siteDeriv;
lik.fh.invlink = @lik_loggaussian_invlink;
lik.fh.predy = @lik_loggaussian_predy;
lik.fh.recappend = @lik_loggaussian_recappend;
lik.fh.predcdf = @lik_loggaussian_predcdf;
end
end
function [w,s] = lik_loggaussian_pak(lik)
%LIK_LOGGAUSSIAN_PAK Combine likelihood parameters into one vector.
%
% Description
% W = LIK_LOGGAUSSIAN_PAK(LIK) takes a likelihood structure LIK and
% combines the parameters into a single row vector W. This is a
% mandatory subfunction used for example in energy and gradient
% computations.
%
% w = log(lik.sigma2)
%
% See also
% LIK_LOGGAUSSIAN_UNPAK, GP_PAK
w=[];s={};
if ~isempty(lik.p.sigma2)
w = log(lik.sigma2);
s = [s; 'log(loggaussian.sigma2)'];
[wh sh] = lik.p.sigma2.fh.pak(lik.p.sigma2);
w = [w wh];
s = [s; sh];
end
end
function [lik, w] = lik_loggaussian_unpak(lik, w)
%LIK_LOGGAUSSIAN_UNPAK Extract likelihood parameters from the vector.
%
% Description
% [LIK, W] = LIK_LOGGAUSSIAN_UNPAK(W, LIK) takes a likelihood
% structure LIK and extracts the parameters from the vector W
% to the LIK structure. This is a mandatory subfunction used
% for example in energy and gradient computations.
%
% Assignment is inverse of
% w = log(lik.sigma2)
%
% See also
% LIK_LOGGAUSSIAN_PAK, GP_UNPAK
if ~isempty(lik.p.sigma2)
lik.sigma2 = exp(w(1));
w = w(2:end);
[p, w] = lik.p.sigma2.fh.unpak(lik.p.sigma2, w);
lik.p.sigma2 = p;
end
end
function lp = lik_loggaussian_lp(lik, varargin)
%LIK_LOGGAUSSIAN_LP log(prior) of the likelihood parameters
%
% Description
% LP = LIK_LOGGAUSSIAN_LP(LIK) takes a likelihood structure LIK and
% returns log(p(th)), where th collects the parameters. This subfunction
% is needed when there are likelihood parameters.
%
% See also
% LIK_LOGGAUSSIAN_LLG, LIK_LOGGAUSSIAN_LLG3, LIK_LOGGAUSSIAN_LLG2, GPLA_E
% If prior for sigma2 parameter, add its contribution
lp=0;
if ~isempty(lik.p.sigma2)
lp = lik.p.sigma2.fh.lp(lik.sigma2, lik.p.sigma2) +log(lik.sigma2);
end
end
function lpg = lik_loggaussian_lpg(lik)
%LIK_LOGGAUSSIAN_LPG d log(prior)/dth of the likelihood
% parameters th
%
% Description
% E = LIK_LOGGAUSSIAN_LPG(LIK) takes a likelihood structure LIK and
% returns d log(p(th))/dth, where th collects the parameters. This
% subfunction is needed when there are likelihood parameters.
%
% See also
% LIK_LOGGAUSSIAN_LLG, LIK_LOGGAUSSIAN_LLG3, LIK_LOGGAUSSIAN_LLG2, GPLA_G
lpg=[];
if ~isempty(lik.p.sigma2)
% Evaluate the gprior with respect to sigma2
ggs = lik.p.sigma2.fh.lpg(lik.sigma2, lik.p.sigma2);
lpg = ggs(1).*lik.sigma2 + 1;
if length(ggs) > 1
lpg = [lpg ggs(2:end)];
end
end
end
function ll = lik_loggaussian_ll(lik, y, f, z)
%LIK_LOGGAUSSIAN_LL Log likelihood
%
% Description
% LL = LIK_LOGGAUSSIAN_LL(LIK, Y, F, Z) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z, and
% latent values F. Returns the log likelihood, log p(y|f,z).
% This subfunction is needed when using Laplace approximation
% or MCMC for inference with non-Gaussian likelihoods. This
% subfunction is also used in information criteria (DIC, WAIC)
% computations.
%
% See also
% LIK_LOGGAUSSIAN_LLG, LIK_LOGGAUSSIAN_LLG3, LIK_LOGGAUSSIAN_LLG2, GPLA_E
if isempty(z)
error(['lik_loggaussian -> lik_loggaussian_ll: missing z! '...
'loggaussian likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loggaussian and gpla_e. ']);
end
s2 = lik.sigma2;
ll = sum(-(1-z)./2*log(2*pi*s2) - (1-z).*log(y) - (1-z)./(2*s2).*(log(y)-f).^2 ...
+ z.*log(1-norm_cdf((log(y)-f)./sqrt(s2))));
end
function llg = lik_loggaussian_llg(lik, y, f, param, z)
%LIK_LOGGAUSSIAN_LLG Gradient of the log likelihood
%
% Description
% LLG = LIK_LOGGAUSSIAN_LLG(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z and
% latent values F. Returns the gradient of the log likelihood
% with respect to PARAM. At the moment PARAM can be 'param' or
% 'latent'. This subfunction is needed when using Laplace
% approximation or MCMC for inference with non-Gaussian likelihoods.
%
% See also
% LIK_LOGGAUSSIAN_LL, LIK_LOGGAUSSIAN_LLG2, LIK_LOGGAUSSIAN_LLG3, GPLA_E
if isempty(z)
error(['lik_loggaussian -> lik_loggaussian_llg: missing z! '...
'loggaussian likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loggaussian and gpla_e. ']);
end
s2 = lik.sigma2;
r = log(y)-f;
switch param
case 'param'
llg = sum(-(1-z)./(2.*s2) + (1-z).*r.^2./(2.*s2^2) + z./(1-norm_cdf(r/sqrt(s2))) ...
.* (r./(sqrt(2.*pi).*2.*s2.^(3/2)).*exp(-1/(2.*s2).*r.^2)));
% correction for the log transformation
llg = llg.*lik.sigma2;
case 'latent'
llg = (1-z)./s2.*r + z./(1-norm_cdf(r/sqrt(s2))).*(1/sqrt(2*pi*s2) .* exp(-1/(2.*s2).*r.^2));
end
end
function llg2 = lik_loggaussian_llg2(lik, y, f, param, z)
%LIK_LOGGAUSSIAN_LLG2 Second gradients of the log likelihood
%
% Description
% LLG2 = LIK_LOGGAUSSIAN_LLG2(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z, and
% latent values F. Returns the hessian of the log likelihood
% with respect to PARAM. At the moment PARAM can be only
% 'latent'. LLG2 is a vector with diagonal elements of the
% Hessian matrix (off diagonals are zero). This subfunction
% is needed when using Laplace approximation or EP for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_LOGGAUSSIAN_LL, LIK_LOGGAUSSIAN_LLG, LIK_LOGGAUSSIAN_LLG3, GPLA_E
if isempty(z)
error(['lik_loggaussian -> lik_loggaussian_llg2: missing z! '...
'loggaussian likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loggaussian and gpla_e. ']);
end
s2 = lik.sigma2;
r = log(y)-f;
switch param
case 'param'
case 'latent'
llg2 = (z-1)./s2 + z.*(-exp(-r.^2/s2)./(2*pi*s2.*(1-norm_cdf(r/sqrt(s2))).^2) ...
+ r./(sqrt(2*pi).*s2^(3/2).*(1-norm_cdf(r/sqrt(s2)))).*exp(-r.^2./(2*s2)));
case 'latent+param'
llg2 = -(1-z)./s2^2.*(log(y)-f) + z.*(-r./(4*pi*s2^2.*(1-norm_cdf(r/sqrt(s2))).^2) ...
.* exp(-r.^2./s2) + (-1 + r.^2/s2)./(1-norm_cdf(r/sqrt(s2))).*1./(sqrt(2*pi)*2*s2^(3/2)).*exp(-r.^2./(2*s2)));
% correction due to the log transformation
llg2 = llg2.*s2;
end
end
function llg3 = lik_loggaussian_llg3(lik, y, f, param, z)
%LIK_LOGGAUSSIAN_LLG3 Third gradients of the log likelihood
%
% Description
% LLG3 = LIK_LOGGAUSSIAN_LLG3(LIK, Y, F, PARAM) takes a likelihood
% structure LIK, survival times Y, censoring indicators Z and
% latent values F and returns the third gradients of the log
% likelihood with respect to PARAM. At the moment PARAM can be
% only 'latent'. LLG3 is a vector with third gradients. This
% subfunction is needed when using Laplace approximation for
% inference with non-Gaussian likelihoods.
%
% See also
% LIK_LOGGAUSSIAN_LL, LIK_LOGGAUSSIAN_LLG, LIK_LOGGAUSSIAN_LLG2, GPLA_E, GPLA_G
if isempty(z)
error(['lik_loggaussian -> lik_loggaussian_llg3: missing z! '...
'loggaussian likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loggaussian and gpla_e. ']);
end
s2 = lik.sigma2;
r = log(y) - f;
switch param
case 'param'
case 'latent'
llg3 = 2.*z./(1-norm_cdf(r/sqrt(s2))).^3.*1./(2*pi*s2)^(3/2).*exp(-3/(2*s2)*r.^2) ...
- z./(1-norm_cdf(r/sqrt(s2))).^2.*r./(pi*s2^2).*exp(-r.^2./s2) ...
- z./(1-norm_cdf(r/sqrt(s2))).^2.*r./(2*pi*s2^2).*exp(-r.^2/s2) ...
- z./(1-norm_cdf(r/sqrt(s2))).^1.*1./(s2^(3/2)*sqrt(2*pi)).*exp(-r.^2/(2*s2)) ...
+ z./(1-norm_cdf(r/sqrt(s2))).^1.*r.^2./(sqrt(2*pi*s2)*s2^2).*exp(-r.^2/(2*s2));
case 'latent2+param'
llg3 = (1-z)./s2^2 + z.*(1./(1-norm_cdf(r/sqrt(s2))).^3.*r./(sqrt(8*pi^3).*s2.^(5/2)).*exp(-3/(2.*s2).*r.^2) ...
+ 1./(1-norm_cdf(r./sqrt(s2))).^2.*1./(4.*pi.*s2^2).*exp(-r.^2./s2) ...
- 1./(1-norm_cdf(r./sqrt(s2))).^2.*r.^2./(2*pi*s2^3).*exp(-r.^2./s2) ...
+ 1./(1-norm_cdf(r./sqrt(s2))).^2.*1./(4*pi*s2^2).*exp(-r.^2/s2) ...
- 1./(1-norm_cdf(r./sqrt(s2))).^1.*r./(sqrt(2*pi)*2*s2^(5/2)).*exp(-r.^2/(2*s2)) ...
- 1./(1-norm_cdf(r./sqrt(s2))).^2.*r.^2./(4*pi*s2^3).*exp(-r.^2/s2) ...
- 1./(1-norm_cdf(r./sqrt(s2))).^1.*r./(sqrt(2*pi)*s2^(5/2)).*exp(-r.^2/(2*s2)) ...
+ 1./(1-norm_cdf(r./sqrt(s2))).^1.*r.^3./(sqrt(2*pi)*2*s2^(7/2)).*exp(-r.^2/(2*s2)));
% correction due to the log transformation
llg3 = llg3.*lik.sigma2;
end
end
function [logM_0, m_1, sigm2hati1] = lik_loggaussian_tiltedMoments(lik, y, i1, sigm2_i, myy_i, z)
%LIK_LOGGAUSSIAN_TILTEDMOMENTS Returns the marginal moments for EP algorithm
%
% Description
% [M_0, M_1, M2] = LIK_LOGGAUSSIAN_TILTEDMOMENTS(LIK, Y, I, S2,
% MYY, Z) takes a likelihood structure LIK, survival times
% Y, censoring indicators Z, index I and cavity variance S2 and
% mean MYY. Returns the zeroth moment M_0, mean M_1 and
% variance M_2 of the posterior marginal (see Rasmussen and
% Williams (2006): Gaussian processes for Machine Learning,
% page 55). This subfunction is needed when using EP for
% inference with non-Gaussian likelihoods.
%
% See also
% GPEP_E
if isempty(z)
error(['lik_loggaussian -> lik_loggaussian_tiltedMoments: missing z!'...
'loggaussian likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loggaussian and gpep_e. ']);
end
yy = y(i1);
yc = 1-z(i1);
s2 = lik.sigma2;
logM_0=zeros(size(yy));
m_1=zeros(size(yy));
sigm2hati1=zeros(size(yy));
for i=1:length(i1)
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Negative-binomial * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_loggaussian_norm(yy(i),myy_i(i),sigm2_i(i),yc(i),s2);
% Integrate with quadrature
RTOL = 1.e-6;
ATOL = 1.e-10;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
% If the second central moment is less than cavity variance
% integrate more precisely. Theoretically for log-concave
% likelihood should be sigm2hati1 < sigm2_i.
if sigm2hati1(i) >= sigm2_i(i)
ATOL = ATOL.^2;
RTOL = RTOL.^2;
[m_0, m_1(i), m_2] = quad_moments(tf, minf, maxf, RTOL, ATOL);
sigm2hati1(i) = m_2 - m_1(i).^2;
if sigm2hati1(i) >= sigm2_i(i)
error('lik_loggaussian_tilted_moments: sigm2hati1 >= sigm2_i');
end
end
logM_0(i) = log(m_0);
end
end
function [g_i] = lik_loggaussian_siteDeriv(lik, y, i1, sigm2_i, myy_i, z)
%LIK_LOGGAUSSIAN_SITEDERIV Evaluate the expectation of the gradient
% of the log likelihood term with respect
% to the likelihood parameters for EP
%
% Description [M_0, M_1, M2] =
% LIK_LOGGAUSSIAN_SITEDERIV(LIK, Y, I, S2, MYY, Z) takes a
% likelihood structure LIK, survival times Y, expected
% counts Z, index I and cavity variance S2 and mean MYY.
% Returns E_f [d log p(y_i|f_i) /d a], where a is the
% likelihood parameter and the expectation is over the
% marginal posterior. This term is needed when evaluating the
% gradients of the marginal likelihood estimate Z_EP with
% respect to the likelihood parameters (see Seeger (2008):
% Expectation propagation for exponential families). This
% subfunction is needed when using EP for inference with
% non-Gaussian likelihoods and there are likelihood parameters.
%
% See also
% GPEP_G
if isempty(z)
error(['lik_loggaussian -> lik_loggaussian_siteDeriv: missing z!'...
'loggaussian likelihood needs the censoring '...
'indicators as an extra input z. See, for '...
'example, lik_loggaussian and gpla_e. ']);
end
yy = y(i1);
yc = 1-z(i1);
s2 = lik.sigma2;
% get a function handle of an unnormalized tilted distribution
% (likelihood * cavity = Log-Gaussian * Gaussian)
% and useful integration limits
[tf,minf,maxf]=init_loggaussian_norm(yy,myy_i,sigm2_i,yc,s2);
% additionally get function handle for the derivative
td = @deriv;
% Integrate with quadgk
[m_0, fhncnt] = quadgk(tf, minf, maxf);
[g_i, fhncnt] = quadgk(@(f) td(f).*tf(f)./m_0, minf, maxf);
g_i = g_i.*s2;
function g = deriv(f)
r=log(yy)-f;
g = -yc./(2.*s2) + yc.*r.^2./(2.*s2^2) + (1-yc)./(1-norm_cdf(r/sqrt(s2))) ...
.* (r./(sqrt(2.*pi).*2.*s2.^(3/2)).*exp(-1/(2.*s2).*r.^2));
end
end
function [lpy, Ey, Vary] = lik_loggaussian_predy(lik, Ef, Varf, yt, zt)
%LIK_LOGGAUSSIAN_PREDY Returns the predictive mean, variance and density of y
%
% Description
% LPY = LIK_LOGGAUSSIAN_PREDY(LIK, EF, VARF YT, ZT)
% Returns logarithm of the predictive density PY of YT, that is
% p(yt | zt) = \int p(yt | f, zt) p(f|y) df.
% This requires also the survival times YT, censoring indicators ZT.
% This subfunction is needed when computing posterior predictive
% distributions for future observations.
%
% [LPY, EY, VARY] = LIK_LOGGAUSSIAN_PREDY(LIK, EF, VARF) takes a
% likelihood structure LIK, posterior mean EF and posterior
% Variance VARF of the latent variable and returns the
% posterior predictive mean EY and variance VARY of the
% observations related to the latent variables. This subfunction
% is needed when computing posterior predictive distributions for
% future observations.
%
%
% See also
% GPLA_PRED, GPEP_PRED, GPMC_PRED
if isempty(zt)
error(['lik_loggaussian -> lik_loggaussian_predy: missing zt!'...
'loggaussian likelihood needs the censoring '...
'indicators as an extra input zt. See, for '...
'example, lik_loggaussian and gpla_e. ']);
end
yc = 1-zt;
s2 = lik.sigma2;
Ey=[];
Vary=[];
% Evaluate the posterior predictive densities of the given observations
lpy = zeros(length(yt),1);
for i1=1:length(yt)
if abs(Ef(i1))>700
lpy(i1) = NaN;
else
% get a function handle of the likelihood times posterior
% (likelihood * posterior = Negative-binomial * Gaussian)
% and useful integration limits
[pdf,minf,maxf]=init_loggaussian_norm(...
yt(i1),Ef(i1),Varf(i1),yc(i1),s2);
% integrate over the f to get posterior predictive distribution
lpy(i1) = log(quadgk(pdf, minf, maxf));
end
end
end
function [df,minf,maxf] = init_loggaussian_norm(yy,myy_i,sigm2_i,yc,s2)
%INIT_LOGGAUSSIAN_NORM
%
% Description
% Return function handle to a function evaluating
% loggaussian * Gaussian which is used for evaluating
% (likelihood * cavity) or (likelihood * posterior) Return
% also useful limits for integration. This is private function
% for lik_loggaussian. This subfunction is needed by subfunctions
% tiltedMoments, siteDeriv and predy.
%
% See also
% LIK_LOGGAUSSIAN_TILTEDMOMENTS, LIK_LOGGAUSSIAN_SITEDERIV,
% LIK_LOGGAUSSIAN_PREDY
% avoid repetitive evaluation of constant part
ldconst = -yc./2.*log(2*pi*s2) -yc.*log(yy) ...
- log(sigm2_i)/2 - log(2*pi)/2;
% Create function handle for the function to be integrated
df = @loggaussian_norm;
% use log to avoid underflow, and derivates for faster search
ld = @log_loggaussian_norm;
ldg = @log_loggaussian_norm_g;
ldg2 = @log_loggaussian_norm_g2;
% Set the limits for integration
if yc==0
% with yy==0, the mode of the likelihood is not defined
% use the mode of the Gaussian (cavity or posterior) as a first guess
modef = myy_i;
else
% use precision weighted mean of the Gaussian approximation
% of the loggaussian likelihood and Gaussian
mu=log(yy);
%s2=1./(yc+1./sigm2_i);
% s2=s2;
modef = (myy_i/sigm2_i + mu/s2)/(1/sigm2_i + 1/s2);
end
% find the mode of the integrand using Newton iterations
% few iterations is enough, since the first guess in the right direction
niter=4; % number of Newton iterations
mindelta=1e-6; % tolerance in stopping Newton iterations
for ni=1:niter
g=ldg(modef);
h=ldg2(modef);
delta=-g/h;
modef=modef+delta;
if abs(delta)<mindelta
break
end
end
% integrand limits based on Gaussian approximation at mode
modes=sqrt(-1/h);
minf=modef-8*modes;
maxf=modef+8*modes;
modeld=ld(modef);
iter=0;
% check that density at end points is low enough
lddiff=20; % min difference in log-density between mode and end-points
minld=ld(minf);
step=1;
while minld>(modeld-lddiff)
minf=minf-step*modes;
minld=ld(minf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_loggaussian -> init_loggaussian_norm: ' ...
'integration interval minimun not found ' ...
'even after looking hard!'])
end
end
maxld=ld(maxf);
step=1;
while maxld>(modeld-lddiff)
maxf=maxf+step*modes;
maxld=ld(maxf);
iter=iter+1;
step=step*2;
if iter>100
error(['lik_loggaussian -> init_loggaussian_norm: ' ...
'integration interval maximun not found ' ...
'even after looking hard!'])
end
end
function integrand = loggaussian_norm(f)
% loggaussian * Gaussian
integrand = exp(ldconst ...
- yc./(2*s2).*(log(yy)-f).^2 + (1-yc).*log(1-norm_cdf((log(yy)-f)/sqrt(s2))) ...
-0.5*(f-myy_i).^2./sigm2_i);
end
function log_int = log_loggaussian_norm(f)
% log(loggaussian * Gaussian)
% log_loggaussian_norm is used to avoid underflow when searching
% integration interval
log_int = ldconst ...
-yc./(2*s2).*(log(yy)-f).^2 + (1-yc).*log(1-norm_cdf((log(yy)-f)/sqrt(s2))) ...
-0.5*(f-myy_i).^2./sigm2_i;
end
function g = log_loggaussian_norm_g(f)
% d/df log(loggaussian * Gaussian)
% derivative of log_loggaussian_norm
g = yc./s2.*(log(yy)-f) + (1-yc)./(1-norm_cdf((log(yy)-f)/sqrt(s2))).*1/sqrt(2*pi*s2)*exp(-(log(yy)-f).^2./(2*s2)) ...
+ (myy_i - f)./sigm2_i;
end
function g2 = log_loggaussian_norm_g2(f)
% d^2/df^2 log(loggaussian * Gaussian)
% second derivate of log_loggaussian_norm
g2 = -yc./s2 + (1-yc).*(-exp(-(log(yy)-f).^2/s2)./(2*pi*s2.*(1-norm_cdf((log(yy)-f)/sqrt(s2))).^2) ...
+ (log(yy)-f)./(sqrt(2*pi).*s2^(3/2).*(1-norm_cdf((log(yy)-f)/sqrt(s2)))).*exp(-(log(yy)-f).^2./(2*s2))) ...
-1/sigm2_i;
end
end
function cdf = lik_loggaussian_predcdf(lik, Ef, Varf, yt)
%LIK_LOGGAUSSIAN_PREDCDF Returns the predictive cdf evaluated at yt
%
% Description
% CDF = LIK_LOGGAUSSIAN_PREDCDF(LIK, EF, VARF, YT)
% Returns the predictive cdf evaluated at YT given likelihood
% structure LIK, posterior mean EF and posterior Variance VARF
% of the latent variable. This subfunction is needed when using
% functions gp_predcdf or gp_kfcv_cdf.
%
% See also
% GP_PREDCDF
s2 = lik.sigma2;
% Evaluate the posterior predictive densities of the given observations
cdf = zeros(length(yt),1);
for i1=1:length(yt)
% Get a function handle of the likelihood times posterior
% (likelihood * posterior = log-Gaussian * Gaussian)
% and useful integration limits.
% yc=0 when evaluating predictive cdf
[pdf,minf,maxf]=init_loggaussian_norm(...
yt(i1),Ef(i1),Varf(i1),0,s2);
% integrate over the f to get posterior predictive distribution
cdf(i1) = 1-quadgk(pdf, minf, maxf);
end
end
function p = lik_loggaussian_invlink(lik, f)
%LIK_LOGGAUSSIAN Returns values of inverse link function
%
% Description
% P = LIK_LOGGAUSSIAN_INVLINK(LIK, F) takes a likelihood structure LIK and
% latent values F and returns the values of inverse link function P.
% This subfunction is needed when using function gp_predprctmu.
%
% See also
% LIK_LOGGAUSSIAN_LL, LIK_LOGGAUSSIAN_PREDY
p = exp(f);
end
function reclik = lik_loggaussian_recappend(reclik, ri, lik)
%RECAPPEND Append the parameters to the record
%
% Description
% RECLIK = GPCF_LOGGAUSSIAN_RECAPPEND(RECLIK, RI, LIK) takes a
% likelihood record structure RECLIK, record index RI and
% likelihood structure LIK with the current MCMC samples of
% the parameters. Returns RECLIK which contains all the old
% samples and the current samples from LIK. This subfunction
% is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC
if nargin == 2
% Initialize the record
reclik.type = 'Log-Gaussian';
% Initialize parameter
reclik.sigma2 = [];
% Set the function handles
reclik.fh.pak = @lik_loggaussian_pak;
reclik.fh.unpak = @lik_loggaussian_unpak;
reclik.fh.lp = @lik_loggaussian_lp;
reclik.fh.lpg = @lik_loggaussian_lpg;
reclik.fh.ll = @lik_loggaussian_ll;
reclik.fh.llg = @lik_loggaussian_llg;
reclik.fh.llg2 = @lik_loggaussian_llg2;
reclik.fh.llg3 = @lik_loggaussian_llg3;
reclik.fh.tiltedMoments = @lik_loggaussian_tiltedMoments;
reclik.fh.invlink = @lik_loggaussian_invlink;
reclik.fh.predy = @lik_loggaussian_predy;
reclik.fh.predcdf = @lik_loggaussian_predcdf;
reclik.fh.recappend = @lik_loggaussian_recappend;
reclik.p=[];
reclik.p.sigma2=[];
if ~isempty(ri.p.sigma2)
reclik.p.sigma2 = ri.p.sigma2;
end
else
% Append to the record
reclik.sigma2(ri,:)=lik.sigma2;
if ~isempty(lik.p)
reclik.p.sigma2 = lik.p.sigma2.fh.recappend(reclik.p.sigma2, ri, lik.p.sigma2);
end
end
end
|
github
|
lcnhappe/happe-master
|
gpmf_constant.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpmf_constant.m
| 8,340 |
utf_8
|
d28568c446e6b59df03d7a7809de643f
|
function gpmf = gpmf_constant(varargin)
%GPMF_CONSTANT Create a constant mean function
%
% Description
% GPMF = GPMF_CONSTANT('PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates constant mean function structure in which the named
% parameters have the specified values. Any unspecified
% parameters are set to default values.
%
% GPMF = GPMF_CONSTANT(GPMF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a mean function structure with the named parameters
% altered with the specified values.
%
% Parameters for constant mean function
% constant - constant value for the constant
% base function (default 1)
% prior_mean - prior mean (scalar or vector) for base
% functions' weight prior (default 0)
% prior_cov - prior covariances (scalar or vector)
% for base functions' prior corresponding
% each selected input dimension. In
% multiple dimension case prior_cov is a
% struct containing scalars or vectors.
% The covariances must all be either
% scalars (diagonal cov.matrix) or
% vectors (for non-diagonal cov.matrix)
% (default 100)
%
% See also
% GP_SET, GPMF_LINEAR, GPMF_SQUARED
%
% Copyright (c) 2010 Tuomas Nikoskinen
% Copyright (c) 2011 Jarno Vanhatalo
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPMF_CONSTANT';
ip.addOptional('gpmf', [], @isstruct);
ip.addParamValue('constant',1, @(x) isvector(x) && all(x>0));
ip.addParamValue('prior_mean',0, @(x) isvector(x));
ip.addParamValue('prior_cov',100, @(x) isvector(x));
ip.addParamValue('mean_prior', [], @isstruct);
ip.addParamValue('cov_prior', [], @isstruct);
ip.parse(varargin{:});
gpmf=ip.Results.gpmf;
if isempty(gpmf)
% Initialize a mean function
init=true;
gpmf.type = 'gpmf_constant';
else
% Modify a mean function
if ~isfield(gpmf,'type') && isequal(gpmf.type,'gpmf_constant')
error('First argument does not seem to be a constant mean function')
end
init=false;
end
% Initialize parameters
if init || ~ismember('type',ip.UsingDefaults)
gpmf.constant = ip.Results.constant;
end
if init || ~ismember('prior_mean',ip.UsingDefaults)
gpmf.b=ip.Results.prior_mean(:)';
end
if init || ~ismember('prior_mean',ip.UsingDefaults)
gpmf.B=ip.Results.prior_cov(:)';
end
if init || ~ismember('mean_prior',ip.UsingDefaults)
gpmf.p.b=ip.Results.mean_prior;
end
if init || ~ismember('cov_prior',ip.UsingDefaults)
gpmf.p.B=ip.Results.cov_prior;
end
if init
% Set the function handles to the nested functions
gpmf.fh.geth = @gpmf_geth;
gpmf.fh.pak = @gpmf_pak;
gpmf.fh.unpak = @gpmf_unpak;
gpmf.fh.lp = @gpmf_lp;
gpmf.fh.lpg = @gpmf_lpg;
gpmf.fh.recappend = @gpmf_recappend;
end
end
function h = gpmf_geth(gpmf, x)
%GPMF_GETH Calculate the base function values for a given input.
%
% Description
% H = GPMF_GETH(GPMF,X) takes in a mean function structure
% GPMF and inputs X. The function returns a row vector of
% length(X) containing the constant value which is by default
% 1.
constant=gpmf.constant;
h = repmat(constant,1,length(x(:,1)));
end
function [w, s] = gpmf_pak(gpmf, w)
%GPMF_PAK Combine GP mean function parameters into one vector
%
% Description
% W = GPCF_LINEAR_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W.
%
% w = [ log(gpcf.coeffSigma2)
% (hyperparameters of gpcf.coeffSigma2)]'
%
% See also
% GPCF_LINEAR_UNPAK
w = []; s = {};
if ~isempty(gpmf.p.b)
w = gpmf.b;
if numel(gpmf.b)>1
s = [s; sprintf('gpmf_constant.b x %d',numel(gpmf.b))];
else
s = [s; 'gpmf_constant.b'];
end
% Hyperparameters of coeffSigma2
[wh sh] = gpmf.p.b.fh.pak(gpmf.p.b);
w = [w wh];
s = [s; sh];
end
if ~isempty(gpmf.p.B)
w = [w log(gpmf.B)];
if numel(gpmf.B)>1
s = [s; sprintf('log(gpmf_constant.B x %d)',numel(gpmf.B))];
else
s = [s; 'log(gpmf_constant.B)'];
end
% Hyperparameters of coeffSigma2
[wh sh] = gpmf.p.B.fh.pak(gpmf.p.B);
w = [w wh];
s = [s; sh];
end
end
function [gpmf, w] = gpmf_unpak(gpmf, w)
%GPCF_LINEAR_UNPAK Sets the mean function parameters
% into the structure
%
% Description
% [GPCF, W] = GPMF_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W, and
% returns a covariance function structure identical to the
% input, except that the covariance hyper-parameters have been
% set to the values in W. Deletes the values set to GPCF from
% W and returns the modified W.
%
% Assignment is inverse of
% w = [ log(gpcf.coeffSigma2)
% (hyperparameters of gpcf.coeffSigma2)]'
%
% See also
% GPCF_LINEAR_PAK
gpp=gpmf.p;
if ~isempty(gpp.b)
i2=length(gpmf.b);
i1=1;
gpmf.b = w(i1:i2);
w = w(i2+1:end);
% Hyperparameters of coeffSigma2
[p, w] = gpmf.p.b.fh.unpak(gpmf.p.b, w);
gpmf.p.b = p;
end
if ~isempty(gpp.B)
i2=length(gpmf.B);
i1=1;
gpmf.B = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of coeffSigma2
[p, w] = gpmf.p.B.fh.unpak(gpmf.p.B, w);
gpmf.p.B = p;
end
end
function lp = gpmf_lp(gpmf)
%GPCF_SEXP_LP Evaluate the log prior of covariance function parameters
%
% Description
%
% See also
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpmf.p;
if ~isempty(gpmf.p.b)
lp = lp + gpp.b.fh.lp(gpmf.b, ...
gpp.b);
end
if ~isempty(gpp.B)
lp = lp + gpp.B.fh.lp(gpmf.B, ...
gpp.B) +sum(log(gpmf.B));
end
end
function [lpg_b, lpg_B] = gpmf_lpg(gpmf)
%GPCF_SEXP_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_SEXP_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters.
%
% See also
% GPCF_SEXP_PAK, GPCF_SEXP_UNPAK, GPCF_SEXP_LP, GP_G
lpg_b=[]; lpg_B=[];
gpp=gpmf.p;
if ~isempty(gpmf.p.b)
lll = length(gpmf.b);
lpgs = gpp.b.fh.lpg(gpmf.b, gpp.b);
lpg_b = [lpgs(1:lll) lpgs(lll+1:end)]; %.*gpmf.b+1
end
if ~isempty(gpmf.p.B)
lll = length(gpmf.B);
lpgs = gpp.B.fh.lpg(gpmf.B, gpp.B);
lpg_B = [lpgs(1:lll).*gpmf.B+1 lpgs(lll+1:end)];
end
end
function recmf = gpmf_recappend(recmf, ri, gpmf)
%RECAPPEND Record append
%
% Description
%
% See also
% GP_MC and GP_MC -> RECAPPEND
% Initialize record
if nargin == 2
recmf.type = 'gpmf_constant';
% Initialize parameters
recmf.b= [];
recmf.B = [];
% Set the function handles
recmf.fh.geth = @gpmf_geth;
recmf.fh.pak = @gpmf_pak;
recmf.fh.unpak = @gpmf_unpak;
recmf.fh.lp = @gpmf_lp;
recmf.fh.lpg = @gpmf_lpg;
recmf.fh.recappend = @gpmf_recappend;
recmf.p=[];
recmf.p.b=[];
recmf.p.B=[];
if isfield(ri.p,'b') && ~isempty(ri.p.b)
recmf.p.b = ri.p.b;
end
if ~isempty(ri.p.B)
recmf.p.B = ri.p.B;
end
return
end
gpp = gpmf.p;
% record magnSigma2
if ~isempty(gpmf.b)
recmf.b(ri,:)=gpmf.b;
if ~isempty(recmf.p.b)
recmf.p.b = gpp.b.fh.recappend(recmf.p.b, ri, gpmf.p.b);
end
elseif ri==1
recmf.b=[];
end
if ~isempty(gpmf.B)
recmf.B(ri,:)=gpmf.B;
if ~isempty(recmf.p.B)
recmf.p.B = gpp.B.fh.recappend(recmf.p.B, ri, gpmf.p.B);
end
elseif ri==1
recmf.B=[];
end
end
|
github
|
lcnhappe/happe-master
|
gpla_loopred.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpla_loopred.m
| 10,429 |
UNKNOWN
|
430225391c518e3110e0a4535af20ba2
|
function [Eft, Varft, lpyt, Eyt, Varyt] = gpla_loopred(gp, x, y, varargin)
%GPLA_LOOPRED Leave-one-out predictions with Laplace approximation
%
% Description
% [EFT, VARFT, LPYT, EYT, VARYT] = GPLA_LOOPRED(GP, X, Y, OPTIONS)
% takes a Gaussian process structure GP together with a matrix X
% of training inputs and vector Y of training targets, and
% evaluates the leave-one-out predictive distribution at inputs
% X and returns means EFT and variances VARFT of latent
% variables, the logarithm of the predictive densities PYT, and
% the predictive means EYT and variances VARYT of observations
% at input locations X.
%
% OPTIONS is optional parameter-value pair
% z - optional observed quantity in triplet (x_i,y_i,z_i)
% Some likelihoods may use this. For example, in case of
% Poisson likelihood we have z_i=E_i, that is, expected value
% for ith case.
%
% Laplace leave-one-out is approximated in linear response style
% by expressing the solutions for LOO problem in terms of
% solution for the full problem. The computationally cheap
% solution can be obtained by making the assumption that the
% difference between these two solution is small such that their
% difference may be treated as an Taylor expansion truncated at
% first order (Winther et al 2012, in progress).
%
% See also
% GP_LOOPRED, GP_PRED
% Copyright (c) 2011-2012 Aki Vehtari, Ville Tolvanen
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'GPLA_LOOPRED';
ip.addRequired('gp', @(x) isstruct(x));
ip.addRequired('x', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addRequired('y', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addParamValue('z', [], @(x) isreal(x) && all(isfinite(x(:))))
ip.addParamValue('method', 'lrs', @(x) ismember(x, {'lrs' 'cavity' 'inla'}))
ip.parse(gp, x, y, varargin{:});
z=ip.Results.z;
method = ip.Results.method;
[tn,nin] = size(x);
switch method
case 'lrs'
% Manfred Opper and Ole Winther (2000). Gaussian Processes for
% Classification: Mean-Field Algorithms. In Neural
% Computation, 12(11):2655-2684.
%
% Ole Winther et al (2012). Work in progress.
% latent posterior
[f, sigm2ii] = gpla_pred(gp, x, y, 'z', z, 'tstind', []);
deriv = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
La = 1./-gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
% really large values don't contribute, but make variance
% computation unstable. 2e15 approx 1/(2*eps)
La = min(La,2e15);
switch gp.type
case 'FULL'
% FULL GP (and compact support GP)
K = gp_trcov(gp,x);
Varft=1./diag(inv(K+diag(La)))-La;
case 'FIC'
% FIC
% Use inverse lemma for FIC low rank covariance matrix approximation
% Code adapated from gp_pred
u = gp.X_u;
m = size(u,1);
% Turn the inducing vector on right direction
if size(u,2) ~= size(x,2)
u=u';
end
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % 1 x f vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
Luu = chol(K_uu,'lower');
B=Luu\(K_fu');
Qv_ff=sum(B.^2)';
% Add also La to the vector of diagonal elements
Lav = Cv_ff-Qv_ff + La; % 1 x f, Vector of diagonal elements
% iLaKfu = diag(inv(Lav))*K_fu = inv(La)*K_fu
iLaKfu = zeros(size(K_fu)); % f x u,
n=size(x,1);
for i=1:n
iLaKfu(i,:) = K_fu(i,:)./Lav(i); % f x u
end
A = K_uu+K_fu'*iLaKfu;
A = (A+A')./2;
L = iLaKfu/chol(A);
%Varft=1./diag(inv(K+diag(La)))-La;
Varft=1./(1./Lav - sum(L.^2,2))-La;
case {'PIC' 'PIC_BLOCK'}
% PIC
% Use inverse lemma for PIC low rank covariance matrix approximation
% Code adapated from gp_pred (here Lab is same La in gp_pred)
u = gp.X_u;
ind = gp.tr_index;
if size(u,2) ~= size(x,2)
% Turn the inducing vector on right direction
u=u';
end
% Calculate some help matrices
[Kv_ff, Cv_ff] = gp_trvar(gp, x); % 1 x f vector
K_fu = gp_cov(gp, x, u); % f x u
K_uu = gp_trcov(gp, u); % u x u, noiseles covariance K_uu
Luu = chol(K_uu)';
% Evaluate the Lambda (La) for specific model
% Q_ff = K_fu*inv(K_uu)*K_fu'
% Here we need only the diag(Q_ff), which is evaluated below
B=Luu\K_fu';
iLaKfu = zeros(size(K_fu)); % f x u
for i=1:length(ind)
Qbl_ff = B(:,ind{i})'*B(:,ind{i});
[Kbl_ff, Cbl_ff] = gp_trcov(gp, x(ind{i},:));
% Add also La to the diagonal
Lab{i} = Cbl_ff - Qbl_ff + diag(La(ind{i}));
iLaKfu(ind{i},:) = Lab{i}\K_fu(ind{i},:);
end
A = K_uu+K_fu'*iLaKfu;
A = (A+A')./2; % Ensure symmetry
L = iLaKfu/chol(A);
% From this on evaluate the prediction
% See Snelson and Ghahramani (2007) for details
n=size(y,1);
iCv=zeros(n,1);
for i=1:length(ind)
iCv(ind{i},:) = diag(inv(Lab{i}));
end
%Varft=1./diag(inv(K+diag(La)))-La;
Varft=1./(iCv - sum(L.^2,2))-La;
case 'CS+FIC'
% CS+FIC
% Use inverse lemma for CS+FIC
% Code adapated from gp_pred (Here Las is same as La in gp_pred)
u = gp.X_u;
if size(u,2) ~= size(x,2)
% Turn the inducing vector on right direction
u=u';
end
n = size(x,1);
m = size(u,1);
ncf = length(gp.cf);
% Indexes to all non-compact support and compact support covariances.
cf1 = [];
cf2 = [];
% Loop through all covariance functions
for i1 = 1:ncf
if ~isfield(gp.cf{i1},'cs')
% Non-CS covariances
cf1 = [cf1 i1];
else
% CS-covariances
cf2 = [cf2 i1];
end
end
% First evaluate needed covariance matrices
% v defines that parameter is a vector
[Kv_ff, Cv_ff] = gp_trvar(gp, x, cf1); % f x 1 vector
K_fu = gp_cov(gp, x, u, cf1); % f x u
K_uu = gp_trcov(gp, u, cf1); % u x u, noiseles covariance K_uu
K_uu = (K_uu+K_uu')./2; % ensure the symmetry of K_uu
Luu = chol(K_uu)';
% Evaluate the Lambda (La)
% Q_ff = K_fu*inv(K_uu)*K_fu'
B=Luu\(K_fu'); % u x f
Qv_ff=sum(B.^2)';
% Add also La to the vector of diagonal elements
Lav = Cv_ff-Qv_ff + La; % f x 1, Vector of diagonal elements
K_cs = gp_trcov(gp,x,cf2);
Las = sparse(1:n,1:n,Lav,n,n) + K_cs;
iLaKfu = Las\K_fu;
A = K_uu+K_fu'*iLaKfu;
A = (A+A')./2; % Ensure symmetry
L = iLaKfu/chol(A);
%Varft=1./diag(inv(K+diag(La)))-La;
Varft=1./(diag(inv(Las)) - sum(L.^2,2))-La;
otherwise
error('Unknown type of Gaussian process')
end
% check if cavity variances are negative
ii=find(Varft<0);
if ~isempty(ii)
warning('gpla_loopred: some LOO latent variances are negative');
Varft(ii) = gp.jitterSigma2;
end
Eft=f-Varft.*deriv;
if nargout==3
lpyt = gp.lik.fh.predy(gp.lik, Eft, Varft, y, z);
elseif nargout>3
[lpyt,Eyt,Varyt] = gp.lik.fh.predy(gp.lik, Eft, Varft, y, z);
end
case 'cavity'
% using EP equations
% latent posterior
[f, sigm2ii] = gpla_pred(gp, x, y, 'z', z, 'tstind', []);
% "site parameters"
W = -gp.lik.fh.llg2(gp.lik, y, f, 'latent', z);
deriv = gp.lik.fh.llg(gp.lik, y, f, 'latent', z);
sigm2_t = 1./W;
mu_t = f + sigm2_t.*deriv;
% "cavity parameters"
sigma2_i = 1./(1./sigm2ii-1./sigm2_t);
myy_i = sigma2_i.*(f./sigm2ii-mu_t./sigm2_t);
% check if cavity varianes are negative
ii=find(sigma2_i<0);
if ~isempty(ii)
warning('gpla_loopred: some cavity variances are negative');
sigma2_i(ii) = sigm2ii(ii);
myy_i(ii) = f(ii);
end
% leave-one-out predictions
Eft=myy_i;
Varft=sigma2_i;
if nargout==3
lpyt = gp.lik.fh.predy(gp.lik, Eft, Varft, y, z);
elseif nargout>3
[lpyt,Eyt,Varyt] = gp.lik.fh.predy(gp.lik, Eft, Varft, y, z);
end
case 'inla'
% Leonhard Held and Birgit Schr�dle and H�vard Rue (2010)
% Posterior and Cross-validatory Predictive Checks: A
% Comparison of MCMC and INLA. In (eds) Thomas Kneib and
% Gerhard Tutz, Statistical Modelling and Regression
% Structures, pp. 91-110. Springer.
% latent posterior
[f, sigm2ii, lp] = gpla_pred(gp, x, y, 'z', z, 'tstind', []);
Eft = zeros(tn,1);
Varft = zeros(tn,1);
lpyt = zeros(tn,1);
minf = f-6.*sqrt(sigm2ii);
maxf = f+6.*sqrt(sigm2ii);
for i=1:tn
if isempty(z)
z1 = [];
else
z1 = z(i);
end
[m0, m1, m2] = quad_moments(@(x) norm_pdf(x, f(i), sqrt(sigm2ii(i)))./llvec(gp.lik,y(i),x,z1), minf(i), maxf(i));
Eft(i) = m1;
Varft(i) = m2-Eft(i)^2;
lpyt(i) = -log(m0);
end
if nargout>3
[tmp,Eyt,Varyt] = gp.lik.fh.predy(gp.lik, Eft, Varft, y, z);
end
if sum((abs(lpyt)./abs(lp) > 5) == 1) > 0.1*tn;
warning('Very bad predictive densities, gpla_loopred might not be reliable, check results!');
end
end
end
function expll = llvec(gplik, y, f, z)
for i=1:size(f,2)
expll(i) = exp(gplik.fh.ll(gplik, y, f(i), z));
end
end
|
github
|
lcnhappe/happe-master
|
lgpdens.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/lgpdens.m
| 18,497 |
windows_1250
|
c32ced344bd076d227b9aded6cc11400
|
function [p,pq,xx] = lgpdens(x,varargin)
%LGPDENS Logistic-Gaussian Process density estimate for 1D and 2D data
%
% Description
% LGPDENS(X,OPTIONS) Compute and plot LGP density estimate. X is
% 1D or 2D point data. For 1D data plot the mean and 95% region.
% For 2D data plot the density contours.
%
% [P,PQ,XT] = LGPDENS(X,OPTIONS) Compute LGP density estimate
% and return mean density P, 2.5% and 97.5% percentiles PQ, and
% grid locations.
%
% [P,PQ,XT] = LGPDENS(X,XT,OPTIONS) Compute LGP density estimate
% in the given grid locations XT.
%
% OPTIONS is optional parameter-value pair
% gridn - optional number of grid points used in each axis direction
% default is 400 for 1D, 20 for 2D.
% range - tells the estimation range, default is
% [min(min(x),mean(x)-3*std(x)), max(max(x),mean(x)+3*std(x))]
% for 1D [XMIN XMAX]
% for 2D [X1MIN X1MAX X2MIN X2MAX]
% gpcf - optional function handle of a GPstuff covariance function
% (default is @gpcf_sexp)
% latent_method - optional 'Laplace' (default) or 'MCMC'
% int_method - optional 'mode' (default), 'CCD' or 'grid'
% if latent_method is 'MCMC' then int_method is 'MCMC'
% display - defines if messages are displayed.
% 'off' (default) displays no output
% 'on' gives some output
% 'iter' displays output at each iteration
% speedup - defines if speed-up is used.
% 'off' (default) no speed-up is used
% 'on' With SEXP or EXP covariance function in 2D case
% uses Kronecker product structure and approximates the
% full posterior with a low-rank approximation. Otherwise
% with SEXP, EXP, MATERN32 and MATERN52 covariance
% functions in 1D and 2D cases uses FFT/FFT2 matrix-vector
% multiplication speed-up in the Newton's algorithm.
% cond_dens - defines if conditional density estimate is computed.
% 'off' (default) no conditional density
% 'on' computes for 2D the conditional median density
% estimate p(x2|x1) when the matrix [x1 x2] is given as
% input.
% basis_function - defines if basis functions are used.
% 'on' (default) uses linear and quadratic basis
% functions
% 'off' no basis functions
% Copyright (c) 2011-2012 Jaakko Riihimäki and Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
ip=inputParser;
ip.FunctionName = 'LGPDENS';
ip.addRequired('x', @(x) isnumeric(x) && size(x,2)==1 || size(x,2)==2);
ip.addOptional('xt',NaN, @(x) isnumeric(x) && size(x,2)==1 || size(x,2)==2);
ip.addParamValue('gridn',[], @(x) isnumeric(x));
ip.addParamValue('range',[], @(x) isempty(x)||isreal(x)&&(length(x)==2||length(x)==4));
ip.addParamValue('gpcf',@gpcf_sexp,@(x) ischar(x) || isa(x,'function_handle'));
ip.addParamValue('latent_method','Laplace', @(x) ismember(x,{'EP' 'Laplace' 'MCMC'}))
%ip.addParamValue('latent_method','Laplace', @(x) ismember(x,{'EP' 'Laplace'}))
ip.addParamValue('int_method','mode', @(x) ismember(x,{'mode' 'CCD', 'grid'}))
ip.addParamValue('normalize',false, @islogical);
ip.addParamValue('display', 'off', @(x) islogical(x) || ...
ismember(x,{'on' 'off' 'iter'}))
ip.addParamValue('speedup',[], @(x) ismember(x,{'on' 'off'}));
ip.addParamValue('cond_dens',[], @(x) ismember(x,{'on' 'off'}));
ip.addParamValue('basis_function',[], @(x) ismember(x,{'on' 'off'}));
ip.parse(x,varargin{:});
x=ip.Results.x;
xt=ip.Results.xt;
gridn=ip.Results.gridn;
xrange=ip.Results.range;
gpcf=ip.Results.gpcf;
latent_method=ip.Results.latent_method;
int_method=ip.Results.int_method;
normalize=ip.Results.normalize;
display=ip.Results.display;
speedup=ip.Results.speedup;
cond_dens=ip.Results.cond_dens;
basis_function=ip.Results.basis_function;
[n,m]=size(x);
switch m
case 1 % 1D
if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
error('LGPDENS: the input x must be 2D if cond_dens option is ''on''.')
end
% Parameters for a grid
if isempty(gridn)
% number of points
gridn=400;
end
xmin=min(x);xmax=max(x);
if ~isempty(xrange)
% extend given range to include min(x) and max(x)
xmin=min(xmin,xrange(1));
xmax=max(xmax,xrange(2));
elseif ~isnan(xt)
% use xt to define range and
% extend it to include min(x) and max(x)
xmin=min(xmin,min(xt));
xmax=max(xmax,max(xt));
else
xmin=min(xmin,mean(x)-3*std(x));
xmax=max(xmax,mean(x)+3*std(x));
end
% Discretize the data
if isnan(xt)
xx=linspace(xmin,xmax,gridn)';
else
xx=xt;
gridn=numel(xt);
end
xd=xx(2)-xx(1);
yy=hist(x,xx)';
% normalise, so that same prior is ok for different scales
xxn=(xx-mean(xx))./std(xx);
%[Ef,Covf]=gpsmooth(xxn,yy,[xxn; xtn],gpcf,latent_method,int_method);
[Ef,Covf]=gpsmooth(xxn,yy,xxn,gpcf,latent_method,int_method,display,speedup,gridn,cond_dens,basis_function);
if strcmpi(latent_method,'MCMC')
PJR=zeros(size(Ef,1),size(Covf,3));
for i1=1:size(Covf,3)
qr=bsxfun(@plus,randn(1000,size(Ef,1))*chol(Covf(:,:,i1),'upper'),Ef(:,i1)');
qjr=exp(qr)';
pjr=bsxfun(@rdivide,qjr,sum(qjr));
pjr=pjr./xd;
PJR(:,i1)=mean(pjr,2);
end
pjr=PJR;
else
qr=bsxfun(@plus,randn(1000,size(Ef,1))*chol(Covf,'upper'),Ef');
qjr=exp(qr)';
pjr=bsxfun(@rdivide,qjr,sum(qjr(1:gridn,:)));
pjr=pjr./xd;
end
pp=mean(pjr')';
ppq=prctile(pjr',[2.5 97.5])';
if nargout<1
% no output, do the plot thing
newplot
hp=patch([xx; xx(end:-1:1)],[ppq(:,1); ppq(end:-1:1,2)],[.8 .8 .8]);
set(hp,'edgecolor',[.8 .8 .8])
xlim([xmin xmax])
line(xx,pp,'linewidth',2);
else
p=pp;
pq=ppq;
end
case 2 % 2D
if ~isempty(cond_dens) && strcmpi(cond_dens,'on') && ~isempty(speedup) && strcmp(speedup, 'on')
warning('No speed-up option available with the cond_dens option. Using full covariance instead.')
speedup='off';
end
% Find unique points
[xu,I,J]=unique(x,'rows');
% and count number of repeated x's
counts=crosstab(J);
nu=length(xu);
% Parameters for a grid
if isempty(gridn)
% number of points in each direction
gridn=20;
end
if numel(gridn)==1
gridn(2)=gridn(1);
end
x1min=min(x(:,1));x1max=max(x(:,1));
x2min=min(x(:,2));x2max=max(x(:,2));
if ~isempty(xrange)
% extend given range to include min(x) and max(x)
x1min=min(x1min,xrange(1));
x1max=max(x1max,xrange(2));
x2min=min(x2min,xrange(3));
x2max=max(x2max,xrange(4));
elseif ~isnan(xt)
% use xt to define range and
% extend it to include min(x) and max(x)
x1min=min(x1min,min(xt(:,1)));
x1max=max(x1max,max(xt(:,1)));
x2min=min(x2min,min(xt(:,2)));
x2max=max(x2max,max(xt(:,2)));
else
x1min=min(x1min,mean(x(:,1))-3*std(x(:,1)));
x1max=max(x1max,mean(x(:,1))+3*std(x(:,1)));
x2min=min(x2min,mean(x(:,2))-3*std(x(:,2)));
x2max=max(x2max,mean(x(:,2))+3*std(x(:,2)));
end
% Discretize the data
if isnan(xt)
% Form regular grid to discretize the data
zz1=linspace(x1min,x1max,gridn(1))';
zz2=linspace(x2min,x2max,gridn(2))';
[z1,z2]=meshgrid(zz1,zz2);
z=[z1(:),z2(:)];
nz=length(z);
xx=z;
if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
% use ntx2 times more grid points for predictions
if gridn(2)>10
ntx2=3;
else
ntx2=10;
end
zzt1=linspace(x1min,x1max,gridn(1))';
zzt2=linspace(x2min,x2max,gridn(2)*ntx2)';
[zt1,zt2]=meshgrid(zzt1,zzt2);
zt=[zt1(:),zt2(:)];
%nzt=length(zt);
xt=zt;
end
else
xx=xt;
gridn=[length(unique(xx(:,1))) length(unique(xx(:,2)))];
end
yy=zeros(nz,1);
zi=interp2(z1,z2,reshape(1:nz,gridn(2),gridn(1)),xu(:,1),xu(:,2),'nearest');
for i1=1:nu
yy(zi(i1),1)=yy(zi(i1),1)+counts(i1);
end
%ye=ones(nz,1)./nz.*n;
unx1=unique(xx(:,1));
unx2=unique(xx(:,2));
xd=(unx1(2)-unx1(1))*(unx2(2)-unx2(1));
% normalise, so that same prior is ok for different scales
xxn=bsxfun(@rdivide,bsxfun(@minus,xx,mean(xx,1)),std(xx,1));
if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
xxtn=bsxfun(@rdivide,bsxfun(@minus,xt,mean(xx,1)),std(xx,1));
end
% [Ef,Covf]=gpsmooth(xxn,yy,[xxn; xtn],gpcf,latent_method,int_method);
if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
[Ef,Covf]=gpsmooth(xxn,yy,xxtn,gpcf,latent_method,int_method,display,speedup,gridn,cond_dens,basis_function);
else
[Ef,Covf]=gpsmooth(xxn,yy,xxn,gpcf,latent_method,int_method,display,speedup,gridn,cond_dens,basis_function);
end
if strcmpi(latent_method,'MCMC')
if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
unx2=(unique(xt(:,2)));
xd2=(unx2(2)-unx2(1));
PJR=zeros(size(Ef,1),size(Covf,3));
for i1=1:size(Covf,3)
qr=bsxfun(@plus,randn(1000,size(Ef,1))*chol(Covf(:,:,i1),'upper'),Ef(:,i1)');
qjr=exp(qr)';
%pjr=bsxfun(@rdivide,qjr,sum(qjr));
pjr=qjr;
pjr2=reshape(pjr,[gridn(2)*ntx2 gridn(1) size(pjr,2)]);
for j1=1:size(pjr2,3)
pjr2(:,:,j1)=bsxfun(@rdivide,pjr2(:,:,j1),sum(pjr2(:,:,j1)))./xd2;
end
pjr=reshape(pjr2,[gridn(2)*ntx2*gridn(1) size(pjr,2)]);
PJR(:,i1)=mean(pjr,2);
end
pjr=PJR;
%qp=median(pjr2,3);
%qp=bsxfun(@rdivide,qp,sum(qp,1));
else
PJR=zeros(size(Ef,1),size(Covf,3));
for i1=1:size(Covf,3)
qr=bsxfun(@plus,randn(1000,size(Ef,1))*chol(Covf(:,:,i1),'upper'),Ef(:,i1)');
qjr=exp(qr)';
pjr=bsxfun(@rdivide,qjr,sum(qjr));
pjr=pjr./xd;
PJR(:,i1)=mean(pjr,2);
end
pjr=PJR;
%pjr=mean(PJR,2);
end
else
if strcmpi(speedup,'on') && length(Covf)==2
qr1=bsxfun(@plus,bsxfun(@times,randn(1000,size(Ef,1)),sqrt(Covf{1})'),Ef');
qr2=randn(1000,size(Covf{2},1))*Covf{2};
qr=qr1+qr2;
else
qr=bsxfun(@plus,randn(1000,size(Ef,1))*chol(Covf,'upper'),Ef');
end
qjr=exp(qr)';
if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
pjr=zeros(size(qjr));
unx2=unique(xt(:,2));
xd2=(unx2(2)-unx2(1));
for k1=1:size(qjr,2)
qjrtmp=reshape(qjr(:,k1),[gridn(2)*ntx2 gridn(1)]);
qjrtmp=bsxfun(@rdivide,qjrtmp,sum(qjrtmp));
qjrtmp=qjrtmp./xd2;
pjr(:,k1)=qjrtmp(:);
end
else
pjr=bsxfun(@rdivide,qjr,sum(qjr));
pjr=pjr./xd;
end
end
%if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
% pp=median(pjr')';
%else
pp=mean(pjr')';
%end
ppq=prctile(pjr',[2.5 97.5])';
if nargout<1
% no output, do the plot thing
if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
pjr2=reshape(pjr,[gridn(2)*ntx2 gridn(1) size(pjr,2)]);
%qp=median(pjr2,3);
qp=mean(pjr2,3);
qp=bsxfun(@rdivide,qp,sum(qp,1));
qpc=cumsum(qp,1);
PL=[.05 .1 .2 .5 .8 .9 .95];
for i1=1:gridn(1)
pc=qpc(:,i1);
for pli=1:numel(PL),
qi(pli)=find(pc>PL(pli),1);
end,
ql(:,i1)=unx2(qi);
end
hold on
h1=plot(zz1,ql(4,:)','-', 'color', [0 0 255]./255,'linewidth',2);
h2=plot(zz1,ql([3 5],:)','--', 'color', [0 127 0]./255,'linewidth',1);
h3=plot(zz1,ql([2 6],:)','-.', 'color', [255 0 0]./255,'linewidth',1);
h4=plot(zz1,ql([1 7],:)',':', 'color', [0 0 0]./255,'linewidth',1);
hold off
legend([h1 h2(1) h3(1) h4(1)],'.5','.2/.8','.1/.9','.05/.95')
%plot(zz1,ql','linewidth',1)
%legend('.05','.1','.2','.5','.8','.9','.95')
xlim([x1min x1max])
ylim([x2min x2max])
else
G=zeros(size(z1));
G(:)=prctile(pjr',50);
%contour(z1,z2,G);
pp=G(:);
p1=pp./sum(pp);
pu=sort(p1,'ascend');
pc=cumsum(pu);
PL=[.05 .1 .2 .5 .8 .9 .95];
qi=[];
for pli=1:numel(PL)
qi(pli)=find(pc>PL(pli),1);
end
pl=pu(qi).*sum(pp);
contour(z1,z2,G,pl);
%hold on, plot(x(:,1),x(:,2),'kx')
%colorbar
end
else
p=pp;
pq=ppq;
end
otherwise
error('X has to be Nx1 or Nx2')
end
end
function [Ef,Covf] = gpsmooth(xx,yy,xxt,gpcf,latent_method,int_method,display,speedup,gridn,cond_dens,basis_function)
% Make inference with log Gaussian process and EP or Laplace approximation
% gp_mc and gp_ia still uses numeric display option
if strcmp(display,'off')
displ=0;
else
displ=1;
end
nin = size(xx,2);
% init gp
if strfind(func2str(gpcf),'ppcs')
% ppcs still have nin parameter...
gpcf1 = gpcf('nin',nin);
else
gpcf1 = gpcf();
end
% weakly informative prior
pm = prior_logunif();
pl = prior_t('s2', 10^2, 'nu', 4);
pa = prior_t('s2', 10^2, 'nu', 4);
%pm = prior_sqrtt('s2', 10^2, 'nu', 4);
%pl = prior_t('s2', 1^2, 'nu', 4);
%pa = prior_t('s2', 10^2, 'nu', 4);
% different covariance functions have different parameters
if isfield(gpcf1,'magnSigma2')
gpcf1 = gpcf(gpcf1, 'magnSigma2', .5, 'magnSigma2_prior', pm);
end
if isfield(gpcf1,'lengthScale')
gpcf1 = gpcf(gpcf1, 'lengthScale', .5, 'lengthScale_prior', pl);
end
if isfield(gpcf1,'alpha')
gpcf1 = gpcf(gpcf1, 'alpha', 20, 'alpha_prior', pa);
end
if isfield(gpcf1,'biasSigma2')
gpcf1 = gpcf(gpcf1, 'biasSigma2', 10, 'weightSigma2', 10,'biasSigma2_prior',prior_logunif(),'weightSigma2_prior',prior_logunif());
end
if ~isempty(cond_dens) && strcmp(cond_dens, 'on')
lik=lik_lgpc;
lik.gridn=gridn;
else
lik=lik_lgp;
end
% Create the GP structure
if ~isempty(basis_function) && strcmp(basis_function, 'off')
gp = gp_set('lik', lik, 'cf', {gpcf1}, 'jitterSigma2', 1e-4);
else
%gpmfco = gpmf_constant('prior_mean',0,'prior_cov',100);
gpmflin = gpmf_linear('prior_mean',0,'prior_cov',100);
gpmfsq = gpmf_squared('prior_mean',0,'prior_cov',100);
gp = gp_set('lik', lik, 'cf', {gpcf1}, 'jitterSigma2', 1e-4, 'meanf', {gpmflin,gpmfsq});
end
% First optimise hyperparameters using Laplace approximation
gp = gp_set(gp, 'latent_method', 'Laplace');
opt=optimset('TolFun',1e-2,'TolX',1e-3,'Display',display);
if ~isempty(speedup) && strcmp(speedup, 'on')
gp.latent_opt.gridn=gridn;
gp.latent_opt.pcg_tol=1e-12;
if size(xx,2)==2 && (strcmp(gp.cf{1}.type,'gpcf_sexp') || strcmp(gp.cf{1}.type,'gpcf_exp'))
% exclude eigenvalues smaller than 1e-6 or take 50%
% eigenvalues at most
gp.latent_opt.eig_tol=1e-6;
gp.latent_opt.eig_prct=0.5;
gp.latent_opt.kron=1;
opt.LargeScale='off';
if norm(xx-xxt)~=0
warning('In the low-rank approximation the grid locations xx are used instead of xxt in predictions.')
xxt=xx;
end
elseif strcmp(gp.cf{1}.type,'gpcf_sexp') || strcmp(gp.cf{1}.type,'gpcf_exp') || strcmp(gp.cf{1}.type,'gpcf_matern32') || strcmp(gp.cf{1}.type,'gpcf_matern52')
gp.latent_opt.fft=1;
end
end
if exist('fminunc')
gp=gp_optim(gp,xx,yy,'opt',opt, 'optimf', @fminunc);
else
gp=gp_optim(gp,xx,yy,'opt',opt, 'optimf', @fminlbfgs);
end
%gradcheck(gp_pak(gp), @gpla_nd_e, @gpla_nd_g, gp, xx, yy);
if strcmpi(latent_method,'MCMC')
gp = gp_set(gp, 'latent_method', 'MCMC');
%if ~isempty(cond_dens) && strcmpi(cond_dens,'on')
if size(xx,2)==2
% add more jitter for 2D cases with MCMC
gp = gp_set(gp, 'jitterSigma2', 1e-2);
%error('LGPDENS: MCMC is not implemented if cond_dens option is ''on''.')
end
% Here we use two stage sampling to get faster convergence
hmc_opt=hmc2_opt;
hmc_opt.steps=10;
hmc_opt.stepadj=0.05;
hmc_opt.nsamples=1;
latent_opt.display=0;
latent_opt.repeat = 20;
latent_opt.sample_latent_scale = 0.5;
hmc2('state', sum(100*clock))
% The first stage sampling
[r,g,opt]=gp_mc(gp, xx, yy, 'hmc_opt', hmc_opt, 'latent_opt', latent_opt, 'nsamples', 1, 'repeat', 15, 'display', displ);
%[r,g,opt]=gp_mc(gp, xx, yy, 'latent_opt', latent_opt, 'nsamples', 1, 'repeat', 15);
% re-set some of the sampling options
hmc_opt.steps=4;
hmc_opt.stepadj=0.05;
%latent_opt.repeat = 5;
hmc2('state', sum(100*clock));
% The second stage sampling
% Notice that previous record r is given as an argument
[rgp,g,opt]=gp_mc(gp, xx, yy, 'hmc_opt', hmc_opt, 'nsamples', 500,'latent_opt', latent_opt, 'record', r, 'display', displ);
% Remove burn-in
rgp=thin(rgp,102,4);
[Ef, Covf] = gpmc_jpreds(rgp, xx, yy, xxt);
else
if strcmpi(int_method,'mode')
% Just make prediction for the test points
[Ef,Covf] = gp_pred(gp, xx, yy, xxt);
else
% integrate over the hyperparameters
%[~, ~, ~, Ef, Covf] = gp_ia(opt, gp, xx, yy, xt, param);
gpia=gp_ia(gp, xx, yy, 'int_method', int_method, 'display', displ);
[Ef, Covf]=gpia_jpred(gpia, xx, yy, xxt);
end
end
end
|
github
|
lcnhappe/happe-master
|
gp_waic.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gp_waic.m
| 22,950 |
utf_8
|
224efafdd64d00db4c610107413a075d
|
function waic = gp_waic(gp, x, y, varargin)
%GP_WAIC The widely applicable information criterion (WAIC) for GP model
%
% Description
% WAIC = GP_WAIC(GP, X, Y) evaluates WAIC defined by
% Watanabe(2010) given a Gaussian process model GP, training
% inputs X and training outputs Y. Instead of Bayes loss we
% compute the Bayes utility which is just the negative of loss
% used by Watanabe.
%
% WAIC is evaluated as follows when using the variance form
%
% WAIC(n) = BUt(n) - V/n
%
% where BUt(n) is Bayesian training utility, V is functional variance
% and n is the number of training inputs.
%
% BUt = mean(log(p(yt | xt, x, y)))
% V = sum(E[log(p(y|th))^2] - E[log(p(y|th))]^2)
%
% When using the Gibbs training loss, WAIC is evaluated as follows
%
% WAIC(n) = BUt(n) - 2*(BUt(n) - GUt(n))
%
% where BUt(n) is as above and GUt is Gibbs training utility
%
% GUt(n) = E_th[mean(log(p(y|th)))].
%
% GP can be a Gaussian process structure, a record structure
% from GP_MC or an array of GPs from GP_IA.
%
% OPTIONS is optional parameter-value pair
% method - Method to evaluate waic, 'V' = Variance method, 'G' = Gibbs
% training utility method (default = 'V')
% form - Return form, 'mean' returns the mean value and 'all'
% returns the values for all data points (default = 'mean')
% z - optional observed quantity in triplet (x_i,y_i,z_i)
% Some likelihoods may use this. For example, in case of
% Poisson likelihood we have z_i=E_i, that is, expected value
% for ith case.
%
% See also
% GP_DIC, DEMO_MODELASSESMENT1, DEMO_MODELASSESMENT2
%
% References
%
% Watanabe(2010). Equations of states in singular statistical
% estimation. Neural Networks 23 (2010), 20-34
%
% Watanabe(2010). Asymptotic Equivalance of Bayes Cross Validation and
% Widely applicable Information Criterion in Singular Learning Theory.
% Journal of Machine Learning Research 11 (2010), 3571-3594.
%
%
% Copyright (c) 2011-2013 Ville Tolvanen
ip=inputParser;
ip.FunctionName = 'GP_WAIC';
ip.addRequired('gp',@(x) isstruct(x) || iscell(x));
ip.addRequired('x', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addRequired('y', @(x) ~isempty(x) && isreal(x) && all(isfinite(x(:))))
ip.addParamValue('method', 'V', @(x) ismember(x,{'V' 'G'}))
ip.addParamValue('form', 'mean', @(x) ismember(x,{'mean','all'}))
ip.addParamValue('z', [], @(x) isreal(x) && all(isfinite(x(:))))
ip.parse(gp, x, y, varargin{:});
method=ip.Results.method;
form=ip.Results.form;
% pass these forward
options=struct();
z = ip.Results.z;
if ~isempty(ip.Results.z)
options.zt=ip.Results.z;
options.z=ip.Results.z;
end
[tn, nin] = size(x);
% ====================================================
if isstruct(gp) % Single GP or MCMC solution
switch gp.type
case {'FULL' 'VAR' 'DTC' 'SOR'}
tstind = [];
case {'FIC' 'CS+FIC'}
tstind = 1:tn;
case 'PIC'
tstind = gp.tr_index;
end
if isfield(gp, 'etr')
% MCMC solution
[Ef, Varf, BUt] = gpmc_preds(gp,x,y, x, 'yt', y, 'tstind', tstind, options);
BUt=log(mean(exp(BUt),2));
GUt = zeros(tn,1);
Elog = zeros(tn,1);
Elog2 = zeros(tn,1);
nsamples = length(gp.edata);
if strcmp(gp.type, 'PIC')
tr_index = gp.tr_index;
gp = rmfield(gp, 'tr_index');
else
tr_index = [];
end
%Ef = zeros(tn, nsamples);
%Varf = zeros(tn, nsamples);
sigma2 = zeros(tn, nsamples);
for j = 1:nsamples
Gp = take_nth(gp,j);
if strcmp(gp.type, 'FIC') | strcmp(gp.type, 'PIC') || strcmp(gp.type, 'CS+FIC') || strcmp(gp.type, 'VAR') || strcmp(gp.type, 'DTC') || strcmp(gp.type, 'SOR')
Gp.X_u = reshape(Gp.X_u,length(Gp.X_u)/nin,nin);
end
Gp.tr_index = tr_index;
gp_array{j} = Gp;
%[Ef(:,j), Varf(:,j)] = gp_pred(Gp, x, y, x, 'yt', y, 'tstind', tstind, options);
if isfield(gp.lik.fh,'trcov')
sigma2(:,j) = repmat(Gp.lik.sigma2,1,tn);
end
end
if isequal(method,'V')
% Evaluate WAIC using the Variance method
if isfield(gp.lik.fh,'trcov')
% Gaussian likelihood
for i=1:tn
% fmin = mean(Ef(i,:) - 9*sqrt(Varf(i,:)));
% fmax = mean(Ef(i,:) + 9*sqrt(Varf(i,:)));
% Elog(i) = quadgk(@(f) mean(multi_npdf(f,Ef(i,:),(Varf(i,:))) ...
% .*bsxfun(@minus,-bsxfun(@rdivide,(repmat((y(i)-f),nsamples,1)).^2,(2.*sigma2(i,:))'), 0.5*log(2*pi*sigma2(i,:))').^2), fmin, fmax);
% Elog2(i) = quadgk(@(f) mean(multi_npdf(f,Ef(i,:),(Varf(i,:))) ...
% .*bsxfun(@minus,-bsxfun(@rdivide,(repmat((y(i)-f),nsamples,1)).^2,(2.*sigma2(i,:))'), 0.5*log(2*pi*sigma2(i,:))')), fmin, fmax);
%
m = Ef(i,:);
s2 = Varf(i,:);
m0 = 1; m1 = m; m2 = m.^2 + s2; m3 = m.*(m.^2+3*s2);
m4 = m.^4+6.*m.^2.*s2+3*s2.^2;
Elog2(i) = mean((-0.5.*log(2.*pi.*sigma2(i,:)) - y(i).^2./(2.*sigma2(i,:))).*m0 - 1./(2.*sigma2(i,:)) .* m2 + y(i)./sigma2(i,:) .* m1);
Elog(i) = mean((1/4 .* m4 - y(i) .* m3 + (3.*y(i).^2./2+0.5.*log(2.*pi.*sigma2(i,:)).*sigma2(i,:)) .* m2 ...
- (y(i).^3 + y(i).*log(2.*pi.*sigma2(i,:)).*sigma2(i,:)) .* m1 + (y(i).^4./4 + 0.5.*y(i).^2.*log(2.*pi.*sigma2(i,:)).*sigma2(i,:) ...
+ 0.25.*log(2.*pi.*sigma2(i,:)).^2.*sigma2(i,:).^2) .* m0) ./ sigma2(i,:).^2);
end
Elog2 = Elog2.^2;
Vn = (Elog-Elog2);
if strcmp(form, 'mean')
Vn = mean(Vn);
BUt = mean(BUt);
end
waic = BUt - Vn;
else
% non-Gaussian likelihood
for i=1:tn
if ~isempty(z)
z1 = z(i);
else
z1 = [];
end
if ~isequal(gp.lik.type, 'Coxph')
fmin = mean(Ef(i,:) - 9*sqrt(Varf(i,:)));
fmax = mean(Ef(i,:) + 9*sqrt(Varf(i,:)));
Elog(i) = quadgk(@(f) mean(multi_npdf(f,Ef(i,:),(Varf(i,:))) ...
.*llvec(gp_array, y(i), f, z1).^2), fmin, fmax);
Elog2(i) = quadgk(@(f) mean(multi_npdf(f,Ef(i,:),(Varf(i,:))) ...
.*llvec(gp_array, y(i), f, z1)), fmin, fmax);
else
ntime = size(gp.lik.xtime,1);
for i2=1:nsamples
% Use MC to integrate over latents
ns = 10000;
Sigma_tmp = diag(Varf([1:ntime ntime+i],i2));
f = mvnrnd(Ef([1:ntime ntime+i],i2), Sigma_tmp, ns);
tmp2(i2) = 1/ns * sum(llvec(gp_array{i2}, y(i,:), f', z1));
tmp(i2) = 1/ns * sum((llvec(gp_array{i2}, y(i,:), f', z1)).^2);
end
Elog2(i)=mean(tmp2);
Elog(i)=mean(tmp);
end
end
Elog2 = Elog2.^2;
Vn = (Elog-Elog2);
if strcmp(form, 'mean')
Vn = mean(Vn);
BUt = mean(BUt);
end
waic = BUt - Vn;
end
else
% Evaluate WAIC using the expected value form via Gibbs training
% loss
if isfield(gp.lik.fh,'trcov')
% Gaussian likelihood
for i=1:tn
fmin = mean(Ef(i,:) - 9*sqrt(Varf(i,:)));
fmax = mean(Ef(i,:) + 9*sqrt(Varf(i,:)));
GUt(i) = quadgk(@(f) mean(multi_npdf(f,Ef(i,:),(Varf(i,:))) ...
.*bsxfun(@minus,-bsxfun(@rdivide,(repmat((y(i)-f),nsamples,1)).^2,(2.*sigma2(i,:))'), 0.5*log(2*pi*sigma2(i,:))')), fmin, fmax);
end
if strcmp(form, 'mean')
GUt = mean(GUt);
BUt = mean(BUt);
end
waic = BUt-2*(BUt-GUt);
else
% non-Gaussian likelihood
for i=1:tn
if ~isempty(z)
z1 = z(i);
else
z1 = [];
end
fmin = mean(Ef(i,:) - 9*sqrt(Varf(i,:)));
fmax = mean(Ef(i,:) + 9*sqrt(Varf(i,:)));
GUt(i) = quadgk(@(f) mean(multi_npdf(f,Ef(i,:),(Varf(i,:))) ...
.*llvec(gp_array, y(i), f, z1)), fmin, fmax);
end
if strcmp(form, 'mean')
GUt = mean(GUt);
BUt = mean(BUt);
end
waic = BUt-2*(BUt-GUt);
end
end
else
% A single GP solution
[Ef, Varf, BUt] = gp_pred(gp, x, y, x, 'yt', y, 'tstind', tstind, options);
GUt = zeros(tn,1);
Elog = zeros(tn,1);
Elog2 = zeros(tn,1);
if isequal(method,'V')
% Estimate WAIC with variance form
if isfield(gp.lik.fh,'trcov')
% Gaussian likelihood
sigma2 = gp.lik.sigma2;
for i=1:tn
% Analytical moments for Gaussian distribution
m0 = 1; m1 = Ef(i); m2 = Ef(i)^2 + Varf(i); m3 = Ef(i)*(Ef(i)^2+3*Varf(i));
m4 = Ef(i)^4+6*Ef(i)^2*Varf(i)+3*Varf(i)^2;
Elog2(i) = (-0.5*log(2*pi*sigma2) - y(i).^2./(2.*sigma2))*m0 - 1./(2.*sigma2) * m2 + y(i)./sigma2 * m1;
Elog(i) = (1/4 * m4 - y(i) * m3 + (3*y(i).^2./2+0.5*log(2*pi*sigma2).*sigma2) * m2 ...
- (y(i).^3 + y(i).*log(2*pi*sigma2).*sigma2) * m1 + (y(i).^4/4 + 0.5*y(i).^2*log(2*pi*sigma2).*sigma2 ...
+ 0.25*log(2*pi*sigma2).^2.*sigma2.^2) * m0) ./ sigma2.^2;
end
Elog2 = Elog2.^2;
Vn = Elog-Elog2;
if strcmp(form,'mean')
BUt = mean(BUt);
Vn = mean(Vn);
end
waic = BUt - Vn;
else
% Non-Gaussian likelihood
for i=1:tn
if ~isempty(z)
z1 = z(i);
else
z1 = [];
end
if ~isequal(gp.lik.type, 'Coxph')
fmin = Ef(i)-9*sqrt(Varf(i));
fmax = Ef(i)+9*sqrt(Varf(i));
Elog(i) = quadgk(@(f) norm_pdf(f, Ef(i), sqrt(Varf(i))).*llvec(gp, y(i), f, z1).^2 ,...
fmin, fmax);
Elog2(i) = quadgk(@(f) norm_pdf(f, Ef(i), sqrt(Varf(i))).*llvec(gp, y(i), f, z1) ,...
fmin, fmax);
else
% Use MC to integrate over latents
ntime = size(gp.lik.xtime,1);
ns = 10000;
Sigma_tmp = Varf([1:ntime ntime+i], [1:ntime ntime+i]);
Sigma_tmp = (Sigma_tmp + Sigma_tmp') ./ 2;
f = mvnrnd(Ef([1:ntime ntime+i]), Sigma_tmp, ns);
Elog2(i) = 1/ns * sum(llvec(gp, y(i,:), f', z1));
Elog(i) = 1/ns * sum((llvec(gp, y(i,:), f', z1)).^2);
end
end
Elog2 = Elog2.^2;
Vn = Elog-Elog2;
if strcmp(form, 'mean')
Vn = mean(Vn);
BUt = mean(BUt);
end
waic = BUt - Vn;
end
else
% WAIC using the expected value form via Gibbs training loss GUt
if isfield(gp.lik.fh,'trcov')
% Gaussian likelihood
sigma2 = gp.lik.sigma2;
for i=1:tn
if Varf(i)<eps
GUt(i)=(-0.5*log(2*pi*sigma2)- (y(i) - Ef(i)).^2/(2.*sigma2));
else
% GUt(i) = quadgk(@(f) norm_pdf(f,Ef(i),sqrt(Varf(i))).*(-0.5*log(2*pi*sigma2)- (y(i) - f).^2/(2.*sigma2)), fmin, fmax);
m0 = 1; m1 = Ef(i); m2 = Ef(i)^2 + Varf(i);
GUt(i) = (-0.5*log(2*pi*sigma2) - y(i).^2./(2.*sigma2))*m0 - 1./(2.*sigma2) * m2 + y(i)./sigma2 * m1;
end
end
if strcmp(form,'mean')
GUt = mean(GUt);
BUt = mean(BUt);
end
waic = BUt-2*(BUt-GUt);
else
% Non-Gaussian likelihood
for i=1:tn
if ~isempty(z)
z1 = z(i);
else
z1 = [];
end
if ~isequal(gp.lik.type, 'Coxph')
fmin = Ef(i)-9*sqrt(Varf(i));
fmax = Ef(i)+9*sqrt(Varf(i));
GUt(i) = quadgk(@(f) norm_pdf(f, Ef(i), sqrt(Varf(i))).*llvec(gp, y(i), f, z1) ,...
fmin, fmax);
else
% If likelihood coxph use mc to integrate over latents
ntime = size(gp.lik.xtime,1);
ns = 10000;
Sigma_tmp = Varf([1:ntime ntime+i], [1:ntime ntime+i]);
Sigma_tmp = (Sigma_tmp + Sigma_tmp') ./ 2;
f = mvnrnd(Ef([1:ntime ntime+i]), Sigma_tmp, ns);
GUt(i) = 1/ns * sum(llvec(gp, y(i), f', z1));
end
end
if strcmp(form,'mean')
GUt = mean(GUt);
BUt = mean(BUt);
end
waic = BUt-2*(BUt-GUt);
end
end
end
elseif iscell(gp)
% gp_ia solution
switch gp{1}.type
case {'FULL' 'VAR' 'DTC' 'SOR'}
tstind = [];
case {'FIC' 'CS+FIC'}
tstind = 1:tn;
case 'PIC'
tstind = gp{1}.tr_index;
end
[tmp, tmp, BUt] = gp_pred(gp,x,y, x, 'yt', y, 'tstind', tstind, options);
GUt = zeros(tn,1);
Elog = zeros(tn,1);
Elog2 = zeros(tn,1);
nsamples = length(gp);
for j = 1:nsamples
Gp = gp{j};
weight(j) = Gp.ia_weight;
w(j,:) = gp_pak(Gp);
[Ef(:,j), Varf(:,j)] = gp_pred(Gp, x, y, x, 'yt', y, 'tstind', tstind, options);
if isfield(Gp.lik.fh,'trcov')
sigma2(:,j) = repmat(Gp.lik.sigma2,1,tn);
end
end
if isequal(method,'V')
% Evaluate WAIC using the variance form
if isfield(gp{1}.lik.fh,'trcov')
% Gaussian likelihood
for i=1:tn
fmin = sum(weight.*Ef(i,:) - 9*weight.*sqrt(Varf(i,:)));
fmax = sum(weight.*Ef(i,:) + 9*weight.*sqrt(Varf(i,:)));
Elog(i) = quadgk(@(f) sum(bsxfun(@times, multi_npdf(f,Ef(i,:),(Varf(i,:))),weight') ...
.*bsxfun(@minus,-bsxfun(@rdivide,(repmat((y(i)-f),nsamples,1)).^2,(2.*sigma2(i,:))'), 0.5*log(2*pi*sigma2(i,:))').^2), fmin, fmax);
Elog2(i) = quadgk(@(f) sum(bsxfun(@times, multi_npdf(f,Ef(i,:),(Varf(i,:))),weight') ...
.*bsxfun(@minus,-bsxfun(@rdivide,(repmat((y(i)-f),nsamples,1)).^2,(2.*sigma2(i,:))'), 0.5*log(2*pi*sigma2(i,:))')), fmin, fmax);
end
Elog2 = Elog2.^2;
Vn = (Elog-Elog2);
if strcmp(form, 'mean')
Vn = mean(Vn);
BUt = mean(BUt);
end
waic = BUt - Vn;
else
% non-Gaussian likelihood
for i=1:tn
if ~isempty(z)
z1 = z(i);
else
z1 = [];
end
fmin = sum(weight.*Ef(i,:) - 9*weight.*sqrt(Varf(i,:)));
fmax = sum(weight.*Ef(i,:) + 9*weight.*sqrt(Varf(i,:)));
Elog(i) = quadgk(@(f) sum(bsxfun(@times, multi_npdf(f,Ef(i,:),(Varf(i,:))),weight') ...
.*llvec(gp, y(i), f, z1).^2), fmin, fmax);
Elog2(i) = quadgk(@(f) sum(bsxfun(@times, multi_npdf(f,Ef(i,:),(Varf(i,:))),weight') ...
.*llvec(gp, y(i), f, z1)), fmin, fmax);
end
Elog2 = Elog2.^2;
Vn = (Elog-Elog2);
if strcmp(form, 'mean')
Vn = mean(Vn);
BUt = mean(BUt);
end
waic = BUt - Vn;
end
else
% Evaluate WAIC using the expected value form via Gibbs training loss
if isfield(gp{1}.lik.fh,'trcov')
% Gaussian likelihood
for i=1:tn
fmin = sum(weight.*Ef(i,:) - 9*weight.*sqrt(Varf(i,:)));
fmax = sum(weight.*Ef(i,:) + 9*weight.*sqrt(Varf(i,:)));
GUt(i) = quadgk(@(f) sum(bsxfun(@times, multi_npdf(f,Ef(i,:),(Varf(i,:))),weight') ...
.*bsxfun(@minus,-bsxfun(@rdivide,(repmat((y(i)-f),nsamples,1)).^2,(2.*sigma2(i,:))'), 0.5*log(2*pi*sigma2(i,:))')), fmin, fmax);
end
if strcmp(form, 'mean')
GUt = mean(GUt);
BUt = mean(BUt);
end
waic = BUt-2*(BUt-GUt);
else
% non-gaussian likelihood
for i=1:tn
if ~isempty(z)
z1 = z(i);
else
z1 = [];
end
fmin = sum(weight.*Ef(i,:) - 9*weight.*sqrt(Varf(i,:)));
fmax = sum(weight.*Ef(i,:) + 9*weight.*sqrt(Varf(i,:)));
GUt(i) = quadgk(@(f) sum(bsxfun(@times, multi_npdf(f,Ef(i,:),(Varf(i,:))),weight') ...
.*llvec(gp, y(i), f, z1)), fmin, fmax);
end
if strcmp(form, 'mean')
GUt = mean(GUt);
BUt = mean(BUt);
end
waic = BUt-2*(BUt-GUt);
end
end
end
end
function lls=llvec(gp, y, fs, z)
% Compute a vector of lls for vector argument fs used by quadgk. In
% case of IA or MC, return a matrix with rows corresponding to one
% GP and columns corresponding to all of the GP's.
if isstruct(gp)
% single gp
lls=zeros(1,size(fs,2));
for i1=1:size(fs,2)
lls(i1)=gp.lik.fh.ll(gp.lik,y,fs(:,i1),z);
end
% else
% % mc
% lls=zeros(length(gp), length(fs));
% for i=1:numel(fs)
% for j=1:numel(gp.edata)
% Gp = take_nth(gp, j);
% lls(j,i) = Gp.lik.fh.ll(Gp.lik, y, fs(i), z);
% end
% end
else
% ia & mc
lls=zeros(length(gp), length(fs));
for i=1:numel(fs)
for j=1:numel(gp)
lls(j,i) = gp{j}.lik.fh.ll(gp{j}.lik, y, fs(i), z);
end
end
end
end
function mpdf = multi_npdf(f, mean, sigma2)
% for every element in f, compute means calculated with
% norm_pdf(f(i), mean, sqrt(sigma2)). If mean and sigma2
% are vectors, returns length(mean) x length(f) matrix.
mpdf = zeros(length(mean), length(f));
for i=1:length(f)
mpdf(:,i) = norm_pdf(f(i), mean, sqrt(sigma2));
end
end
function [m_0, m_1, m_2, m_3, m_4] = moments(fun, a, b, rtol, atol, minsubs)
% QUAD_MOMENTS Calculate the 0th, 1st and 2nd moment of a given
% (unnormalized) probability distribution
%
% [m_0, m_1, m_2] = quad_moments(fun, a, b, varargin)
% Inputs:
% fun = Function handle to the unnormalized probability distribution
% a,b = integration limits [a,b]
% rtol = relative tolerance for the integration (optional, default 1e-6)
% atol = absolute tolerance for the integration (optional, default 1e-10)
%
% Returns the first three moments:
% m0 = int_a^b fun(x) dx
% m1 = int_a^b x*fun(x) dx / m0
% m2 = int_a^b x^2*fun(x) dx / m0
%
% The function uses an adaptive Gauss-Kronrod quadrature. The same set of
% integration points and intervals are used for each moment. This speeds up
% the evaluations by factor 3, since the function evaluations are done only
% once.
%
% The quadrature method is described by:
% L.F. Shampine, "Vectorized Adaptive Quadrature in Matlab",
% Journal of Computational and Applied Mathematics, 211, 2008,
% pp. 131-140.
% Copyright (c) 2010 Jarno Vanhatalo, Jouni Hartikainen
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
maxsubs = 650;
if nargin < 4
rtol = 1.e-6;
end
if nargin < 5
atol = 1.e-10;
end
if nargin < 6
minsubs = 10;
end
rtol = max(rtol,100*eps);
atol = max(atol,0);
minsubs = max(minsubs,2); % At least two subintervals are needed
% points and weights
points15 = [0.2077849550078985; 0.4058451513773972; 0.5860872354676911; ...
0.7415311855993944; 0.8648644233597691; 0.9491079123427585; ...
0.9914553711208126];
points = [-points15(end:-1:1); 0; points15];
w15 = [0.2044329400752989, 0.1903505780647854, 0.1690047266392679, ...
0.1406532597155259, 0.1047900103222502, 0.06309209262997855, ...
0.02293532201052922];
w = [w15(end:-1:1), 0.2094821410847278, w15];
w7 = [0,0.3818300505051189,0,0.2797053914892767,0,0.1294849661688697,0];
ew = w - [w7(end:-1:1), 0.4179591836734694, w7];
samples = numel(w);
% split the interval.
if b-a <= 0
c = a; a = b; b=c;
warning('The start of the integration interval was less than the end of it.')
end
apu = a + (1:(minsubs-1))./minsubs*(b-a);
apu = [a,apu,b];
subs = [apu(1:end-1);apu(2:end)];
% Initialize partial sums.
Ifx_ok = 0;
Ifx1_ok = 0;
Ifx2_ok = 0;
Ifx3_ok = 0;
Ifx4_ok = 0;
% The main loop
while true
% subintervals and their midpoints
midpoints = sum(subs)/2;
halfh = diff(subs)/2;
x = bsxfun(@plus,points*halfh,midpoints);
x = reshape(x,1,[]);
fx = fun(x);
fx1 = fx.*x;
fx2 = fx.*x.^2;
fx3 = fx.*x.^3;
fx4 = fx.*x.^4;
fx = reshape(fx,samples,[]);
fx1 = reshape(fx1,samples,[]);
fx2 = reshape(fx2,samples,[]);
fx3 = reshape(fx3,samples,[]);
fx4 = reshape(fx4,samples,[]);
% Subintegrals.
Ifxsubs = (w*fx) .* halfh;
errsubs = (ew*fx) .* halfh;
Ifxsubs1 = (w*fx1) .* halfh;
Ifxsubs2 = (w*fx2) .* halfh;
Ifxsubs3 = (w*fx3) .* halfh;
Ifxsubs4 = (w*fx4) .* halfh;
% Ifx and tol.
Ifx = sum(Ifxsubs) + Ifx_ok;
Ifx1 = sum(Ifxsubs1) + Ifx1_ok;
Ifx2 = sum(Ifxsubs2) + Ifx2_ok;
Ifx3 = sum(Ifxsubs3) + Ifx3_ok;
Ifx4 = sum(Ifxsubs4) + Ifx4_ok;
tol = max(atol,rtol*abs(Ifx));
% determine the indices ndx of Ifxsubs for which the
% errors are acceptable and remove those from subs
ndx = find(abs(errsubs) <= (2/(b-a)*halfh*tol));
subs(:,ndx) = [];
if isempty(subs)
break
end
% Update the integral.
Ifx_ok = Ifx_ok + sum(Ifxsubs(ndx));
Ifx1_ok = Ifx1_ok + sum(Ifxsubs1(ndx));
Ifx2_ok = Ifx2_ok + sum(Ifxsubs2(ndx));
Ifx3_ok = Ifx3_ok + sum(Ifxsubs3(ndx));
Ifx4_ok = Ifx4_ok + sum(Ifxsubs4(ndx));
% Quit if too many subintervals.
nsubs = 2*size(subs,2);
if nsubs > maxsubs
warning('quad_moments: Reached the limit on the maximum number of intervals in use.');
break
end
midpoints(ndx) = [];
subs = reshape([subs(1,:); midpoints; midpoints; subs(2,:)],2,[]); % Divide the remaining subintervals in half
end
% Scale moments
m_0 = Ifx;
m_1 = Ifx1./Ifx;
m_2 = Ifx2./Ifx;
m_3 = Ifx3./Ifx;
m_4 = Ifx4./Ifx;
end
|
github
|
lcnhappe/happe-master
|
gpcf_ppcs0.m
|
.m
|
happe-master/Packages/eeglab14_0_0b/plugins/fieldtrip-20160917/external/dmlt/external/gpstuff/gp/gpcf_ppcs0.m
| 37,470 |
utf_8
|
8e0026784ac7fd43fed2a933ac703b6c
|
function gpcf = gpcf_ppcs0(varargin)
%GPCF_PPCS0 Create a piece wise polynomial (q=0) covariance function
%
% Description
% GPCF = GPCF_PPCS0('nin',nin,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% creates piece wise polynomial (q=0) covariance function
% structure in which the named parameters have the specified
% values. Any unspecified parameters are set to default values.
% Obligatory parameter is 'nin', which tells the dimension
% of input space.
%
% GPCF = GPCF_PPCS0(GPCF,'PARAM1',VALUE1,'PARAM2,VALUE2,...)
% modify a covariance function structure with the named
% parameters altered with the specified values.
%
% Parameters for piece wise polynomial (q=0) covariance function [default]
% magnSigma2 - magnitude (squared) [0.1]
% lengthScale - length scale for each input. [1]
% This can be either scalar corresponding
% to an isotropic function or vector
% defining own length-scale for each input
% direction.
% l_nin - order of the polynomial [floor(nin/2) + 1]
% Has to be greater than or equal to default.
% magnSigma2_prior - prior for magnSigma2 [prior_logunif]
% lengthScale_prior - prior for lengthScale [prior_t]
% metric - metric structure used by the covariance function []
% selectedVariables - vector defining which inputs are used [all]
% selectedVariables is shorthand for using
% metric_euclidean with corresponding components
%
% Note! If the prior is 'prior_fixed' then the parameter in
% question is considered fixed and it is not handled in
% optimization, grid integration, MCMC etc.
%
% The piecewise polynomial function is the following:
%
% k_pp0(x_i, x_j) = ma2*cs^(l)
%
% where r = sum( (x_i,d - x_j,d).^2./l^2_d )
% l = floor(l_nin/2) + 1
% cs = max(0,1-r);
% and l_nin must be greater or equal to gpcf.nin
%
% NOTE! Use of gpcf_ppcs0 requires that you have installed
% GPstuff with SuiteSparse.
%
% See also
% GP_SET, GPCF_*, PRIOR_*, METRIC_*
% Copyright (c) 2009-2010 Jarno Vanhatalo
% Copyright (c) 2010 Aki Vehtari
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if nargin>0 && ischar(varargin{1}) && ismember(varargin{1},{'init' 'set'})
% remove init and set
varargin(1)=[];
end
ip=inputParser;
ip.FunctionName = 'GPCF_PPCS0';
ip.addOptional('gpcf', [], @isstruct);
ip.addParamValue('nin',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('magnSigma2',0.1, @(x) isscalar(x) && x>0);
ip.addParamValue('lengthScale',1, @(x) isvector(x) && all(x>0));
ip.addParamValue('l_nin',[], @(x) isscalar(x) && x>0 && mod(x,1)==0);
ip.addParamValue('metric',[], @isstruct);
ip.addParamValue('magnSigma2_prior', prior_logunif(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('lengthScale_prior',prior_t(), ...
@(x) isstruct(x) || isempty(x));
ip.addParamValue('selectedVariables',[], @(x) isempty(x) || ...
(isvector(x) && all(x>0)));
ip.parse(varargin{:});
gpcf=ip.Results.gpcf;
if isempty(gpcf)
% Check that SuiteSparse is available
if ~exist('ldlchol')
error('SuiteSparse is not installed (or it is not in the path). gpcf_ppcs0 cannot be used!')
end
init=true;
gpcf.nin=ip.Results.nin;
if isempty(gpcf.nin)
error('nin has to be given for ppcs: gpcf_ppcs0(''nin'',NIN,...)')
end
gpcf.type = 'gpcf_ppcs0';
% cf is compactly supported
gpcf.cs = 1;
else
if ~isfield(gpcf,'type') && ~isequal(gpcf.type,'gpcf_ppcs0')
error('First argument does not seem to be a valid covariance function structure')
end
init=false;
end
if init
% Set the function handles to the subfunctions
gpcf.fh.pak = @gpcf_ppcs0_pak;
gpcf.fh.unpak = @gpcf_ppcs0_unpak;
gpcf.fh.lp = @gpcf_ppcs0_lp;
gpcf.fh.lpg = @gpcf_ppcs0_lpg;
gpcf.fh.cfg = @gpcf_ppcs0_cfg;
gpcf.fh.ginput = @gpcf_ppcs0_ginput;
gpcf.fh.cov = @gpcf_ppcs0_cov;
gpcf.fh.trcov = @gpcf_ppcs0_trcov;
gpcf.fh.trvar = @gpcf_ppcs0_trvar;
gpcf.fh.recappend = @gpcf_ppcs0_recappend;
end
% Initialize parameters
if init || ~ismember('l_nin',ip.UsingDefaults)
gpcf.l=ip.Results.l_nin;
if isempty(gpcf.l)
gpcf.l = floor(gpcf.nin/2) + 1;
end
if gpcf.l < gpcf.nin
error('The l_nin has to be greater than or equal to the number of inputs!')
end
end
if init || ~ismember('lengthScale',ip.UsingDefaults)
gpcf.lengthScale = ip.Results.lengthScale;
end
if init || ~ismember('magnSigma2',ip.UsingDefaults)
gpcf.magnSigma2 = ip.Results.magnSigma2;
end
% Initialize prior structure
if init
gpcf.p=[];
end
if init || ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.p.lengthScale=ip.Results.lengthScale_prior;
end
if init || ~ismember('magnSigma2_prior',ip.UsingDefaults)
gpcf.p.magnSigma2=ip.Results.magnSigma2_prior;
end
%Initialize metric
if ~ismember('metric',ip.UsingDefaults)
if ~isempty(ip.Results.metric)
gpcf.metric = ip.Results.metric;
gpcf = rmfield(gpcf, 'lengthScale');
gpcf.p = rmfield(gpcf.p, 'lengthScale');
elseif isfield(gpcf,'metric')
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
% selectedVariables options implemented using metric_euclidean
if ~ismember('selectedVariables',ip.UsingDefaults)
if ~isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.selectedVariables = ip.Results.selectedVariables;
% gpcf.metric=metric_euclidean('components',...
% num2cell(ip.Results.selectedVariables),...
% 'lengthScale',gpcf.lengthScale,...
% 'lengthScale_prior',gpcf.p.lengthScale);
% gpcf = rmfield(gpcf, 'lengthScale');
% gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
elseif isfield(gpcf,'metric')
if ~isempty(ip.Results.selectedVariables)
gpcf.metric=metric_euclidean(gpcf.metric,...
'components',...
num2cell(ip.Results.selectedVariables));
if ~ismember('lengthScale',ip.UsingDefaults)
gpcf.metric.lengthScale=ip.Results.lengthScale;
gpcf = rmfield(gpcf, 'lengthScale');
end
if ~ismember('lengthScale_prior',ip.UsingDefaults)
gpcf.metric.p.lengthScale=ip.Results.lengthScale_prior;
gpcf.p = rmfield(gpcf.p, 'lengthScale');
end
else
if ~isfield(gpcf,'lengthScale')
gpcf.lengthScale = gpcf.metric.lengthScale;
end
if ~isfield(gpcf.p,'lengthScale')
gpcf.p.lengthScale = gpcf.metric.p.lengthScale;
end
gpcf = rmfield(gpcf, 'metric');
end
end
end
end
function [w,s] = gpcf_ppcs0_pak(gpcf)
%GPCF_PPCS0_PAK Combine GP covariance function parameters into
% one vector
%
% Description
% W = GPCF_PPCS0_PAK(GPCF) takes a covariance function
% structure GPCF and combines the covariance function
% parameters and their hyperparameters into a single row
% vector W. This is a mandatory subfunction used for example
% in energy and gradient computations.
%
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_PPCS0_UNPAK
w = []; s = {};
if ~isempty(gpcf.p.magnSigma2)
w = [w log(gpcf.magnSigma2)];
s = [s; 'log(ppcs0.magnSigma2)'];
% Hyperparameters of magnSigma2
[wh sh] = gpcf.p.magnSigma2.fh.pak(gpcf.p.magnSigma2);
w = [w wh];
s = [s; sh];
end
if isfield(gpcf,'metric')
[wh sh]=gpcf.metric.fh.pak(gpcf.metric);
w = [w wh];
s = [s; sh];
else
if ~isempty(gpcf.p.lengthScale)
w = [w log(gpcf.lengthScale)];
if numel(gpcf.lengthScale)>1
s = [s; sprintf('log(ppcs0.lengthScale x %d)',numel(gpcf.lengthScale))];
else
s = [s; 'log(ppcs0.lengthScale)'];
end
% Hyperparameters of lengthScale
[wh sh] = gpcf.p.lengthScale.fh.pak(gpcf.p.lengthScale);
w = [w wh];
s = [s; sh];
end
end
end
function [gpcf, w] = gpcf_ppcs0_unpak(gpcf, w)
%GPCF_PPCS0_UNPAK Sets the covariance function parameters into
% the structure
%
% Description
% [GPCF, W] = GPCF_PPCS0_UNPAK(GPCF, W) takes a covariance
% function structure GPCF and a hyper-parameter vector W,
% and returns a covariance function structure identical
% to the input, except that the covariance hyper-parameters
% have been set to the values in W. Deletes the values set to
% GPCF from W and returns the modified W. This is a mandatory
% subfunction used for example in energy and gradient computations.
%
% Assignment is inverse of
% w = [ log(gpcf.magnSigma2)
% (hyperparameters of gpcf.magnSigma2)
% log(gpcf.lengthScale(:))
% (hyperparameters of gpcf.lengthScale)]'
%
% See also
% GPCF_PPCS0_PAK
gpp=gpcf.p;
if ~isempty(gpp.magnSigma2)
gpcf.magnSigma2 = exp(w(1));
w = w(2:end);
% Hyperparameters of magnSigma2
[p, w] = gpcf.p.magnSigma2.fh.unpak(gpcf.p.magnSigma2, w);
gpcf.p.magnSigma2 = p;
end
if isfield(gpcf,'metric')
[metric, w] = gpcf.metric.fh.unpak(gpcf.metric, w);
gpcf.metric = metric;
else
if ~isempty(gpp.lengthScale)
i1=1;
i2=length(gpcf.lengthScale);
gpcf.lengthScale = exp(w(i1:i2));
w = w(i2+1:end);
% Hyperparameters of lengthScale
[p, w] = gpcf.p.lengthScale.fh.unpak(gpcf.p.lengthScale, w);
gpcf.p.lengthScale = p;
end
end
end
function lp = gpcf_ppcs0_lp(gpcf)
%GPCF_PPCS0_LP Evaluate the log prior of covariance function parameters
%
% Description
% LP = GPCF_PPCS0_LP(GPCF, X, T) takes a covariance function
% structure GPCF and returns log(p(th)), where th collects the
% parameters. This is a mandatory subfunction used for example
% in energy computations.
%
% See also
% GPCF_PPCS0_PAK, GPCF_PPCS0_UNPAK, GPCF_PPCS0_LPG, GP_E
% Evaluate the prior contribution to the error. The parameters that
% are sampled are transformed, e.g., W = log(w) where w is all
% the "real" samples. On the other hand errors are evaluated in
% the W-space so we need take into account also the Jacobian of
% transformation, e.g., W -> w = exp(W). See Gelman et.al., 2004,
% Bayesian data Analysis, second edition, p24.
lp = 0;
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lp = lp +gpp.magnSigma2.fh.lp(gpcf.magnSigma2, ...
gpp.magnSigma2) +log(gpcf.magnSigma2);
end
if isfield(gpcf,'metric')
lp = lp +gpcf.metric.fh.lp(gpcf.metric);
elseif ~isempty(gpp.lengthScale)
lp = lp +gpp.lengthScale.fh.lp(gpcf.lengthScale, ...
gpp.lengthScale) +sum(log(gpcf.lengthScale));
end
end
function lpg = gpcf_ppcs0_lpg(gpcf)
%GPCF_PPCS0_LPG Evaluate gradient of the log prior with respect
% to the parameters.
%
% Description
% LPG = GPCF_PPCS0_LPG(GPCF) takes a covariance function
% structure GPCF and returns LPG = d log (p(th))/dth, where th
% is the vector of parameters. This is a mandatory subfunction
% used for example in energy and gradient computations.
%
% See also
% GPCF_PPCS0_PAK, GPCF_PPCS0_UNPAK, GPCF_PPCS0_LP, GP_G
lpg = [];
gpp=gpcf.p;
if ~isempty(gpcf.p.magnSigma2)
lpgs = gpp.magnSigma2.fh.lpg(gpcf.magnSigma2, gpp.magnSigma2);
lpg = [lpg lpgs(1).*gpcf.magnSigma2+1 lpgs(2:end)];
end
if isfield(gpcf,'metric')
lpg_dist = gpcf.metric.fh.lpg(gpcf.metric);
lpg=[lpg lpg_dist];
else
if ~isempty(gpcf.p.lengthScale)
lll = length(gpcf.lengthScale);
lpgs = gpp.lengthScale.fh.lpg(gpcf.lengthScale, gpp.lengthScale);
lpg = [lpg lpgs(1:lll).*gpcf.lengthScale+1 lpgs(lll+1:end)];
end
end
end
function DKff = gpcf_ppcs0_cfg(gpcf, x, x2, mask,i1)
%GPCF_PPCS0_CFG Evaluate gradient of covariance function
% with respect to the parameters
%
% Description
% DKff = GPCF_PPCS0_CFG(GPCF, X) takes a covariance function
% structure GPCF, a matrix X of input vectors and returns
% DKff, the gradients of covariance matrix Kff = k(X,X) with
% respect to th (cell array with matrix elements). This is a
% mandatory subfunction used in gradient computations.
%
% DKff = GPCF_PPCS0_CFG(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_PPCS0_CFG(GPCF, X, [], MASK) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the diagonal of gradients of covariance matrix
% Kff = k(X,X2) with respect to th (cell array with matrix
% elements). This subfunction is needed when using sparse
% approximations (e.g. FIC).
%
% DKff = GPCF_PPCS0_CFG(GPCF, X, X2, [], i) takes a covariance
% function structure GPCF, a matrix X of input vectors and
% returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% hyperparameter. This subfunction is needed when using memory
% save option in gp_set.
%
% See also
% GPCF_PPCS0_PAK, GPCF_PPCS0_UNPAK, GPCF_PPCS0_LP, GP_G
gpp=gpcf.p;
i2=1;
DKff = {};
gprior = [];
if nargin==5
% Use memory save option
savememory=1;
if i1==0
% Return number of hyperparameters
i=0;
if ~isempty(gpcf.p.magnSigma2)
i=i+1;
end
if ~isempty(gpcf.p.lengthScale)
i=i+length(gpcf.lengthScale);
end
DKff=i;
return;
end
else
savememory=0;
end
% Evaluate: DKff{1} = d Kff / d magnSigma2
% DKff{2} = d Kff / d lengthScale
% NOTE! Here we have already taken into account that the parameters
% are transformed through log() and thus dK/dlog(p) = p * dK/dp
% evaluate the gradient for training covariance
if nargin == 2 || (isempty(x2) && isempty(mask))
Cdm = gpcf_ppcs0_trcov(gpcf, x);
ii1=0;
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = Cdm;
end
l = gpcf.l;
[I,J] = find(Cdm);
if isfield(gpcf,'metric')
% Compute the sparse distance matrix and its gradient.
[n, m] =size(x);
ntriplets = (nnz(Cdm)-n)./2;
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n-1
col_ind = ii + find(Cdm(ii+1:n,ii));
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii,:));
gd = gpcf.metric.fh.distg(gpcf.metric, x(col_ind,:), x(ii,:));
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(d);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = col_ind;
J(ind_tr) = ii;
dist(ind_tr) = d;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj};
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
Dd = -l.*cs.^(l-1);
Dd = ma2.*Dd;
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n,n);
DKff{ii1} = D + D';
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
ii1=ii1-1;
end
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic PPCS0
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
d2 = 0;
for i = 1:m
d2 = d2 + s2.*(x(I,i) - x(J,i)).^2;
end
d = sqrt(d2);
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
% Calculate the gradient matrix
D = -l.*cs.^(l-1);
D = -d.*ma2.*D;
D = sparse(I,J,D,n,n);
ii1 = ii1+1;
DKff{ii1} = D;
else
% In the case ARD is used
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
d2 = 0;
d_l2 = [];
for i = 1:m
d_l2(:,i) = s2(i).*(x(I,i) - x(J,i)).^2;
d2 = d2 + d_l2(:,i);
end
d = sqrt(d2);
d_l = d_l2;
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
Dd = -l.*cs.^(l-1);
Dd = -ma2.*Dd;
int = d ~= 0;
for i = i1
% Calculate the gradient matrix
D = d_l(:,i).*Dd;
% Divide by r in cases where r is non-zero
D(int) = D(int)./d(int);
D = sparse(I,J,D,n,n);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3 || isempty(mask)
if size(x,2) ~= size(x2,2)
error('gpcf_ppcs -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
ii1=0;
K = gpcf.fh.cov(gpcf, x, x2);
if ~isempty(gpcf.p.magnSigma2)
ii1 = ii1 +1;
DKff{ii1} = K;
end
l = gpcf.l;
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x);
[n2,m2]=size(x2);
ma = gpcf.magnSigma2;
% Compute the sparse distance matrix.
ntriplets = nnz(K);
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n2
d = zeros(n1,1);
d = gpcf.metric.fh.dist(gpcf.metric, x, x2(ii,:));
gd = gpcf.metric.fh.distg(gpcf.metric, x, x2(ii,:));
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric, x, x2(ii,:));
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = I2;
J(ind_tr) = ii;
dist(ind_tr) = R2;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj}(I2);
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
Dd = -l.*cs.^(l-1);
Dd = ma2.*Dd;
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n1,n2);
DKff{ii1} = D;
end
else
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n, m] =size(x);
if ~savememory
i1=1:m;
else
if i1==1
DKff=DKff{1};
return
end
i1=i1-1;
ii1=ii1-1;
end
if ~isempty(gpcf.p.lengthScale)
% loop over all the lengthScales
if length(gpcf.lengthScale) == 1
% In the case of isotropic PPCS0
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
dist1 = 0;
for i=1:m
dist1 = dist1 + s2.*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
d1 = sqrt(dist1);
cs1 = max(1-d1,0);
DK_l = -l.*cs1.^(l-1);
DK_l = -d1.*ma2.*DK_l;
ii1=ii1+1;
DKff{ii1} = DK_l;
else
% In the case ARD is used
s2 = 1./gpcf.lengthScale.^2;
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
dist1 = 0;
d_l1 = [];
for i = 1:m
dist1 = dist1 + s2(i).*bsxfun(@minus,x(:,i),x2(:,i)').^2;
d_l1{i} = s2(i).*(bsxfun(@minus,x(:,i),x2(:,i)')).^2;
end
d1 = sqrt(dist1);
cs1 = max(1-d1,0);
for i = i1
% Calculate the gradient matrix
DK_l = -l.*cs1.^(l-1);
DK_l = -ma2.*DK_l.*d_l1{i};
% Divide by r in cases where r is non-zero
DK_l(d1 ~= 0) = DK_l(d1 ~= 0)./d1(d1 ~= 0);
ii1=ii1+1;
DKff{ii1} = DK_l;
end
end
end
end
% Evaluate: DKff{1} = d mask(Kff,I) / d magnSigma2
% DKff{2...} = d mask(Kff,I) / d lengthScale
elseif nargin == 4 || nargin == 5
ii1=0;
[n, m] =size(x);
if ~isempty(gpcf.p.magnSigma2) && (~savememory || all(i1==1))
ii1 = ii1+1;
DKff{ii1} = gpcf.fh.trvar(gpcf, x); % d mask(Kff,I) / d magnSigma2
end
if isfield(gpcf,'metric')
dist = 0;
distg = gpcf.metric.fh.distg(gpcf.metric, x, [], 1);
gprior_dist = gpcf.metric.fh.lpg(gpcf.metric);
for i=1:length(distg)
ii1 = ii1+1;
DKff{ii1} = 0;
end
else
if ~isempty(gpcf.p.lengthScale)
for i2=1:length(gpcf.lengthScale)
ii1 = ii1+1;
DKff{ii1} = 0; % d mask(Kff,I) / d lengthScale
end
end
end
end
if savememory
DKff=DKff{1};
end
end
function DKff = gpcf_ppcs0_ginput(gpcf, x, x2, i1)
%GPCF_PPCS0_GINPUT Evaluate gradient of covariance function with
% respect to x
%
% Description
% DKff = GPCF_PPCS0_GINPUT(GPCF, X) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_PPCS0_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2) with respect to X (cell array with matrix elements).
% This subfunction is needed when computing gradients with
% respect to inducing inputs in sparse approximations.
%
% DKff = GPCF_PPCS0_GINPUT(GPCF, X, X2) takes a covariance
% function structure GPCF, a matrix X of input vectors
% and returns DKff, the gradients of covariance matrix Kff =
% k(X,X2), or k(X,X) if X2 is empty, with respect to ith
% covariate in X. This subfunction is needed when using memory
% option in gp_set.
%
% See also
% GPCF_PPCS0_PAK, GPCF_PPCS0_UNPAK, GPCF_PPCS0_LP, GP_G
[n, m] =size(x);
ii1 = 0;
if nargin==4
% Use memory save option
if i1==0
% Return number of covariates
if isfield(gpcf,'selectedVariables')
DKff=length(gpcf.selectedVariables);
else
DKff=m;
end
return
end
else
i1=1:m;
end
if nargin == 2 || isempty(x2)
l = gpcf.l;
K = gpcf.fh.trcov(gpcf, x);
[I,J] = find(K);
if isfield(gpcf,'metric')
% Compute the sparse distance matrix and its gradient.
ntriplets = (nnz(Cdm)-n)./2;
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n-1
col_ind = ii + find(Cdm(ii+1:n,ii));
d = zeros(length(col_ind),1);
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii,:));
[gd, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x(col_ind,:), x(ii,:));
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(d);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = col_ind;
J(ind_tr) = ii;
dist(ind_tr) = d;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj};
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
Dd = -l.*cs.^(l-1);
Dd = ma2.*Dd;
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n,n);
DKff{ii1} = D + D';
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic PPCS0
s2 = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s2 = 1./gpcf.lengthScale.^2;
end
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
d2 = 0;
for i = 1:m
d2 = d2 + s2(i).*(x(I,i) - x(J,i)).^2;
end
d = sqrt(d2);
% Create the 'compact support' matrix, that is, (1-R)_+,
% where ()_+ truncates all non-positive inputs to zero.
cs = 1-d;
Dd = -ma2.*l.*cs.^(l-1);
Dd = sparse(I,J,Dd,n,n);
d = sparse(I,J,d,n,n);
row = ones(n,1);
cols = 1:n;
for i = i1
for j = 1:n
% Calculate the gradient matrix
ind = find(d(:,j));
apu = full(Dd(:,j)).*s2(i).*(x(j,i)-x(:,i));
apu(ind) = apu(ind)./d(ind,j);
D = sparse(row*j, cols, apu, n, n);
D = D+D';
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
% Evaluate the gradient of non-symmetric covariance (e.g. K_fu)
elseif nargin == 3
if size(x,2) ~= size(x2,2)
error('gpcf_ppcs -> _ghyper: The number of columns in x and x2 has to be the same. ')
end
K = gpcf.fh.cov(gpcf, x, x2);
n2 = size(x2,1);
ii1=0;
l = gpcf.l;
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x);
[n2,m2]=size(x2);
ma = gpcf.magnSigma2;
% Compute the sparse distance matrix.
ntriplets = nnz(K);
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
dist = zeros(ntriplets,1);
for jj = 1:length(gpcf.metric.components)
gdist{jj} = zeros(ntriplets,1);
end
ntriplets = 0;
for ii=1:n2
d = zeros(n1,1);
d = gpcf.metric.fh.dist(gpcf.metric, x, x2(ii,:));
[gd, gprior_dist] = gpcf.metric.fh.ginput(gpcf.metric, x, x2(ii,:));
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = I2;
J(ind_tr) = ii;
dist(ind_tr) = R2;
for jj = 1:length(gd)
gdist{jj}(ind_tr) = gd{jj}(I2);
end
end
ma2 = gpcf.magnSigma2;
cs = 1-dist;
Dd = -l.*ma2.*cs.^(l-1);
for i=1:length(gdist)
ii1 = ii1+1;
D = Dd.*gdist{i};
D = sparse(I,J,D,n1,n2);
DKff{ii1} = D;
end
else
if length(gpcf.lengthScale) == 1
% In the case of an isotropic PPCS0
s2 = repmat(1./gpcf.lengthScale.^2, 1, m);
else
s2 = 1./gpcf.lengthScale.^2;
end
ma2 = gpcf.magnSigma2;
% Calculate the sparse distance (lower triangle) matrix
% and the distance matrix for each component
dist1 = 0;
for i = 1:m
dist1 = dist1 + s2(i).*bsxfun(@minus,x(:,i),x2(:,i)').^2;
end
d = sqrt(dist1);
cs = max(1-d,0);
Dd = -ma2.*l.*cs.^(l-1);
row = ones(n2,1);
cols = 1:n2;
for i = i1
for j = 1:n
% Calculate the gradient matrix
ind = find(d(j,:));
apu = Dd(j,:).*s2(i).*(x(j,i)-x2(:,i))';
apu(ind) = apu(ind)./d(j,ind);
D = sparse(row*j, cols, apu, n, n2);
ii1 = ii1+1;
DKff{ii1} = D;
end
end
end
end
end
function C = gpcf_ppcs0_cov(gpcf, x1, x2, varargin)
%GP_PPCS0_COV Evaluate covariance matrix between two input vectors
%
% Description
% C = GP_PPCS0_COV(GP, TX, X) takes in covariance function of
% a Gaussian process GP and two matrixes TX and X that contain
% input vectors to GP. Returns covariance matrix C. Every
% element ij of C contains covariance between inputs i in TX
% and j in X. This is a mandatory subfunction used for example in
% prediction and energy computations.
%
% See also
% GPCF_PPCS0_TRCOV, GPCF_PPCS0_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n1,m1]=size(x1);
[n2,m2]=size(x2);
else
% If scaled euclidean metric
if isfield(gpcf, 'selectedVariables')
x1 = x1(:,gpcf.selectedVariables);
x2 = x2(:,gpcf.selectedVariables);
end
[n1,m1]=size(x1);
[n2,m2]=size(x2);
s = 1./(gpcf.lengthScale);
s2 = s.^2;
if size(s)==1
s2 = repmat(s2,1,m1);
end
end
ma2 = gpcf.magnSigma2;
l = gpcf.l;
% Compute the sparse distance matrix.
ntriplets = max(1,floor(0.03*n1*n2));
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
ntriplets = 0;
I0=zeros(ntriplets,1);
J0=zeros(ntriplets,1);
nn0=0;
for ii1=1:n2
d = zeros(n1,1);
if isfield(gpcf, 'metric')
d = gpcf.metric.fh.dist(gpcf.metric, x1, x2(ii1,:));
else
for j=1:m1
d = d + s2(j).*(x1(:,j)-x2(ii1,j)).^2;
end
end
%d = sqrt(d);
I0t = find(d==0);
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
R2=sqrt(R2);
%len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
I(ntrip_prev+1:ntriplets) = I2;
J(ntrip_prev+1:ntriplets) = ii1;
R(ntrip_prev+1:ntriplets) = R2;
I0(nn0+1:nn0+length(I0t)) = I0t;
J0(nn0+1:nn0+length(I0t)) = ii1;
nn0 = nn0+length(I0t);
end
r = sparse(I(1:ntriplets),J(1:ntriplets),R(1:ntriplets));
[I,J,r] = find(r);
cs = full(sparse(max(0, 1-r)));
C = ma2.*cs.^l;
C = sparse(I,J,C,n1,n2) + sparse(I0,J0,ma2,n1,n2);
end
function C = gpcf_ppcs0_trcov(gpcf, x)
%GP_PPCS0_TRCOV Evaluate training covariance matrix of inputs
%
% Description
% C = GP_PPCS0_TRCOV(GP, TX) takes in covariance function of a
% Gaussian process GP and matrix TX that contains training
% input vectors. Returns covariance matrix C. Every element ij
% of C contains covariance between inputs i and j in TX.
% This is a mandatory subfunction used for example in prediction
% and energy computations.
%
% See also
% GPCF_PPCS0_COV, GPCF_PPCS0_TRVAR, GP_COV, GP_TRCOV
if isfield(gpcf,'metric')
% If other than scaled euclidean metric
[n, m] =size(x);
else
% If a scaled euclidean metric try first mex-implementation
% and if there is not such...
C = trcov(gpcf,x);
% ... evaluate the covariance here.
if isnan(C)
if isfield(gpcf, 'selectedVariables')
x = x(:,gpcf.selectedVariables);
end
[n, m] =size(x);
s = 1./(gpcf.lengthScale);
s2 = s.^2;
if size(s)==1
s2 = repmat(s2,1,m);
end
else
return
end
end
ma2 = gpcf.magnSigma2;
l = gpcf.l;
% Compute the sparse distance matrix.
ntriplets = max(1,floor(0.03*n*n));
I = zeros(ntriplets,1);
J = zeros(ntriplets,1);
R = zeros(ntriplets,1);
ntriplets = 0;
ntripletsz = max(1,floor(0.03.^2*n*n));
Iz = zeros(ntripletsz,1);
Jz = zeros(ntripletsz,1);
ntripletsz = 0;
for ii1=1:n-1
d = zeros(n-ii1,1);
col_ind = ii1+1:n;
if isfield(gpcf, 'metric')
d = gpcf.metric.fh.dist(gpcf.metric, x(col_ind,:), x(ii1,:));
else
for ii2=1:m
d = d+s2(ii2).*(x(col_ind,ii2)-x(ii1,ii2)).^2;
end
end
%d = sqrt(d);
% store zero distance index
[I2z,J2z] = find(d==0);
% create triplets for distances 0<d<1
d(d >= 1) = 0;
[I2,J2,R2] = find(d);
len = length(R);
ntrip_prev = ntriplets;
ntriplets = ntriplets + length(R2);
if (ntriplets > len)
I(2*len) = 0;
J(2*len) = 0;
R(2*len) = 0;
end
ind_tr = ntrip_prev+1:ntriplets;
I(ind_tr) = ii1+I2;
J(ind_tr) = ii1;
R(ind_tr) = sqrt(R2);
% create triplets for distances d==0 (i~=j)
lenz = length(Iz);
ntrip_prevz = ntripletsz;
ntripletsz = ntripletsz + length(I2z);
if (ntripletsz > lenz)
Iz(2*lenz) = 0;
Jz(2*lenz) = 0;
end
ind_trz = ntrip_prevz+1:ntripletsz;
Iz(ind_trz) = ii1+I2z;
Jz(ind_trz) = ii1;
end
% create a lower triangular sparse distance matrix from the triplets for distances 0<d<1
R = sparse(I(1:ntriplets),J(1:ntriplets),R(1:ntriplets),n,n);
% create a lower triangular sparse covariance matrix from the
% triplets for distances d==0 (i~=j)
Rz = sparse(Iz(1:ntripletsz),Jz(1:ntripletsz),repmat(ma2,1,ntripletsz),n,n);
% Find the non-zero elements of R.
[I,J,rn] = find(R);
% Compute covariances for distances 0<d<1
cs = max(0,1-rn);
C = ma2.*cs.^l;
% create a lower triangular sparse covariance matrix from the triplets for distances 0<d<1
C = sparse(I,J,C,n,n);
% add the lower triangular covariance matrix for distances d==0 (i~=j)
C = C + Rz;
% form a square covariance matrix and add the covariance matrix for i==j (d==0)
C = C + C' + sparse(1:n,1:n,ma2,n,n);
end
function C = gpcf_ppcs0_trvar(gpcf, x)
%GP_PPCS0_TRVAR Evaluate training variance vector
%
% Description
% C = GP_PPCS0_TRVAR(GPCF, TX) takes in covariance function of
% a Gaussian process GPCF and matrix TX that contains training
% inputs. Returns variance vector C. Every element i of C
% contains variance of input i in TX. This is a mandatory subfunction
% used for example in prediction and energy computations.
%
% See also
% GPCF_PPCS0_COV, GP_COV, GP_TRCOV
[n, m] =size(x);
C = ones(n,1).*gpcf.magnSigma2;
C(C<eps)=0;
end
function reccf = gpcf_ppcs0_recappend(reccf, ri, gpcf)
%RECAPPEND Record append
%
% Description
% RECCF = GPCF_PPCS0_RECAPPEND(RECCF, RI, GPCF) takes a
% covariance function record structure RECCF, record index RI
% and covariance function structure GPCF with the current MCMC
% samples of the parameters. Returns RECCF which contains all
% the old samples and the current samples from GPCF. This
% subfunction is needed when using MCMC sampling (gp_mc).
%
% See also
% GP_MC and GP_MC -> RECAPPEND
if nargin == 2
% Initialize the record
reccf.type = 'gpcf_ppcs0';
reccf.nin = ri.nin;
reccf.l = floor(reccf.nin/2)+4;
% cf is compactly supported
reccf.cs = 1;
% Initialize parameters
reccf.lengthScale= [];
reccf.magnSigma2 = [];
% Set the function handles
reccf.fh.pak = @gpcf_ppcs0_pak;
reccf.fh.unpak = @gpcf_ppcs0_unpak;
reccf.fh.e = @gpcf_ppcs0_lp;
reccf.fh.lpg = @gpcf_ppcs0_lpg;
reccf.fh.cfg = @gpcf_ppcs0_cfg;
reccf.fh.cov = @gpcf_ppcs0_cov;
reccf.fh.trcov = @gpcf_ppcs0_trcov;
reccf.fh.trvar = @gpcf_ppcs0_trvar;
reccf.fh.recappend = @gpcf_ppcs0_recappend;
reccf.p=[];
reccf.p.lengthScale=[];
reccf.p.magnSigma2=[];
if isfield(ri.p,'lengthScale') && ~isempty(ri.p.lengthScale)
reccf.p.lengthScale = ri.p.lengthScale;
end
if ~isempty(ri.p.magnSigma2)
reccf.p.magnSigma2 = ri.p.magnSigma2;
end
if isfield(ri, 'selectedVariables')
reccf.selectedVariables = ri.selectedVariables;
end
else
% Append to the record
gpp = gpcf.p;
if ~isfield(gpcf,'metric')
% record lengthScale
reccf.lengthScale(ri,:)=gpcf.lengthScale;
if isfield(gpp,'lengthScale') && ~isempty(gpp.lengthScale)
reccf.p.lengthScale = gpp.lengthScale.fh.recappend(reccf.p.lengthScale, ri, gpcf.p.lengthScale);
end
end
% record magnSigma2
reccf.magnSigma2(ri,:)=gpcf.magnSigma2;
if isfield(gpp,'magnSigma2') && ~isempty(gpp.magnSigma2)
reccf.p.magnSigma2 = gpp.magnSigma2.fh.recappend(reccf.p.magnSigma2, ri, gpcf.p.magnSigma2);
end
end
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.