plateform
stringclasses 1
value | repo_name
stringlengths 13
113
| name
stringlengths 3
74
| ext
stringclasses 1
value | path
stringlengths 12
229
| size
int64 23
843k
| source_encoding
stringclasses 9
values | md5
stringlengths 32
32
| text
stringlengths 23
843k
|
---|---|---|---|---|---|---|---|---|
github
|
jacksky64/imageProcessing-master
|
loso.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/loso.m
| 2,874 |
utf_8
|
ed1393dabddac9c12947c855895067f7
|
%LOSO Leave_One_Set_Out crossvalidation
%
% [E,C,D] = LOSO(A,CLASSF,LABLISTNAME)
% [E,C,D] = LOSO(A,CLASSF,SET_LABELS)
% [E,C,D] = LOSO(A,CLASSF,SET_LABELS,SET_LABLIST)
%
% INPUT
% A Dataset
% CLASSF Untrained classifier
% LABLISTNAME Name of label list in case of multiple labeling
% SET_LABELS Set of labels for objects in A
% SET_LABLIST Order and selection of labels in SET_LABELS
%
% OUTPUT
% E Classification error
% C Array with numbers of erroneaously clasified objects
% per label (vertically) and per class (horizontally)
% D Classification matrix of classified objects
% (order may be different from A)
%
% DESCRIPTION
% In crossvalidation it may be desired that sets of corresponding objects
% (e.g. pixels from the same image) are all together in the training set or
% in the test set. This might be enabled by adding an additional labeling to
% the dataset A (see ADDLABELS) corresponding to the sets and running LOSO
% with the corresponding LABLISTNAME.
% Alternatively, the set labels may be supplied in the call. In SET_LABLIST
% a ranking of the used labels can be supplied that will be used in C.
% In case SET_LABLIST does not contain all set labels used in SET_LABELS
% LOSO will only test the set labels given in SET_LABLIST and thereby
% perform areduced crosvalidation.
%
% The reported error E identical to E = sum(C)./classsizes(D)*getprior(A)';
% By confmat(D) a confusion matrix can be visualised.
%
% SEE ALSO
% DATASETS, MAPPINGS, TESTC, CONFMAT
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function [e,err,d] = loso(a,classf,set_lab,set_lablist)
if nargin < 4, testfun = testc; end
[curn,curlist] = curlablist(a); % present lablist number
if nargin == 3 & size(set_lab,1) == 1
if (isstr(set_lab) & strcmp(set_lab,curlist)) | set_lab == curn
error('The desired LOO set should differ from the present lablist')
end
lablistname = set_lab;
b = changelablist(a,lablistname);
elseif nargin == 3
b = addlabels(a,set_lab);
else
b = addlablist(a,set_lablist);
b = setnlab(b,renumlab(set_lab,set_lablist));
end
b = setlablist(b); % throw out empty sets
nset = getsize(b,3);
S = [1:nset 0];
c = getsize(a,3);
s = sprintf('Testing %i sets: ',nset);
prwaitbar(nset,s);
err = zeros(nset,c);
d = [];
N = 0;
for j=1:nset
prwaitbar(nset,j,[s int2str(j)]);
T = S;
T(j) = [];
x = changelablist(seldat(b,T),curlist);
y = changelablist(seldat(b,j),curlist);
N = N+size(y,1);
if ~isempty(y)
dd = y*(x*classf);
dd = dataset(dd);
[ee,err(j,:)] = testd(dd);
d = [d;dd];
end
end
prwaitbar(0);
e = sum(err)./classsizes(d);
e = e*getprior(a)';
|
github
|
jacksky64/imageProcessing-master
|
quadrc.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/quadrc.m
| 3,698 |
utf_8
|
598d8de6ec7f0611bdec468bf41f2a22
|
%QUADRC Quadratic Discriminant Classifier
%
% W = QUADRC(A,R,S)
%
% INPUT
% A Dataset
% R,S 0 <= R,S <= 1, regularization parameters (default: R = 0, S = 0)
%
% OUTPUT
% W Quadratic Discriminant Classifier mapping
%
% DESCRIPTION
% Computation of the quadratic classifier between the classes of the dataset
% A assuming normal densities. R and S are regularization parameters used
% for finding the covariance matrix as
%
% G = (1-R-S)*G + R*diag(diag(G)) + S*mean(diag(G))*eye(size(G,1))
%
% NOTE
% This routine differs from QDC; instead of using the densities, it is based
% on the class covariances. The multi-class problem is solved by multiple
% two-class quadratic discriminants. It is, thereby, a quadratic equivalent
% of FISHERC.
%
% SEE ALSO
% MAPPINGS, DATASETS, FISHERC, NMC, NMSC, LDC, UDC, QDC
% Copyright: R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: quadrc.m,v 1.5 2010/02/08 15:29:48 duin Exp $
function w = quadrc(a,arg2,s)
prtrace(mfilename);
if (nargin < 3)
prwarning(5,'regularisation parameter S not given, assuming 0');
s = 0;
end
if (nargin < 2)
prwarning(5,'regularisation parameter R not given, assuming 0');
arg2 = 0;
end
if (nargin < 1) | (isempty(a))
r = arg2;
w = mapping(mfilename,{r,s});
w = setname(w,'Quadr');
return
end
[m,k,c] = getsize(a);
if (~isa(arg2,'mapping')) % Second argument is not a mapping: train.
islabtype(a,'crisp');
isvaldfile(a,2,2); % at least 2 objects per class, 2 classes
a = testdatasize(a,'features');
a = setprior(a,getprior(a));
r = arg2;
if (min(classsizes(a)) < 2)
error('Classes should contain more than one vector.')
end
if (c == 2)
% 2-class case: calculate quadratic discriminant parameters.
%p = getprior(a); pa = p(1); pb = p(2);
JA = findnlab(a,1); JB = findnlab(a,2);
ma = mean(a(JA,:)); mb = mean(a(JB,:));
GA = covm(a(JA,:)); GB = covm(a(JB,:));
GA = (1-r-s) * GA + r * diag(diag(GA)) + ...
s * mean(diag(GA))*eye(size(GA,1));
GB = (1-r-s) * GB + r * diag(diag(GB)) + ...
s*mean(diag(GB))*eye(size(GB,1));
DGA = det(GA); DGB = det(GB);
GA = prinv(GA);
GB = prinv(GB);
par1 = 2*ma*GA-2*mb*GB; par2 = GB - GA;
% If either covariance matrix is nearly singular, substitute FISHERC.
% Otherwise construct the mapping.
if (DGA <= 0) | (DGB <= 0)
prwarning(1,'Covariance matrix nearly singular, regularization needed; using FISHERC instead')
w = fisherc(a);
else
%par0 = (mb*GB*mb'-ma*GA*ma') + 2*log(pa/pb) + log(DGB) -log(DGA);
par0 = (mb*GB*mb'-ma*GA*ma') + log(DGB) -log(DGA);
w = mapping(mfilename,'trained',{par0,par1',par2},getlablist(a),k,2);
w = cnormc(w,a);
w = setname(w,'Quadr');
w = setcost(w,a);
end
else
% For C > 2 classes, recursively call this function, using MCLASSC.
pars = feval(mfilename,[],r,s);
w = mclassc(a,pars);
end
else
% Second argument is a trained mapping: test. Note that we can only
% have a 2-class case here. W's output will be [D, -D], as the distance
% of a sample to a class is the negative distance to the other class.
v = arg2; pars = getdata(v);
d = +sum((a*pars{3}).*a,2) + a*pars{2} + ones(m,1)*pars{1};
w = setdat(a,[d, -d],v);
end
return
function p = logdet(cova,covb)
%
% Numerically feasible computation of log(det(cova)/det(covb)))
% (seems not to help)
k = size(cova,1);
sa = mean(diag(cova));
sb = mean(diag(covb));
deta = det(cova./sa)
detb = det(covb./sb)
p = k*log(sa) - k*log(sb) + log(deta) - log(detb);
|
github
|
jacksky64/imageProcessing-master
|
linearr.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/linearr.m
| 1,247 |
utf_8
|
e2721f6121b81980f2a0812e62679997
|
%LINEARR Linear regression
%
% Y = LINEARR(X,LAMBDA,N)
%
% INPUT
% X Dataset
% LAMBDA Regularization parameter (default: no regularization)
% N Order of polynomial (optional)
%
% OUTPUT
% Y Linear (or higher order) regression
%
% DESCRIPTION
% Perform a linear regression on dataset X, with regularization
% parameter LAMBDA. When N is supplied, also higher order polynomials
% are possible.
%
% SEE ALSO
% RIDGER, TESTR, PLOTR, VANDERMONDEM
% Copyright: D.M.J. Tax, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function y = linearr(x,lambda,p)
if nargin<3
p = 1;
end
if nargin<2
lambda = [];
end
if nargin<1 | isempty(x)
y = mapping(mfilename,{lambda,p});
y = setname(y,'Linear regression');
return
end
if ~ismapping(lambda) %training
[n,d] = size(x);
X = +vandermondem(x,p);
if isempty(lambda)
beta = prinv(X'*X)*X'*gettargets(x);
else
dimp = size(X,2);
beta = prinv(X'*X + lambda*eye(dimp))*X'*gettargets(x);
end
W.beta = beta;
W.n = p;
y = mapping(mfilename,'trained',W,1,d,1);
y = setname(y,'Linear regression');
else
% evaluation
w = getdata(lambda);
out = vandermondem(x,w.n)*w.beta;
y = setdat(x,out);
end
|
github
|
jacksky64/imageProcessing-master
|
svc_nu.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/svc_nu.m
| 4,293 |
utf_8
|
082db3155588a642885599a02947fefe
|
%SVC_NU Support Vector Classifier: NU algorithm
%
% This routine is outdated, use NUSVC instead
%
% [W,J,C] = SVC(A,TYPE,PAR,NU,MC,PD)
%
% INPUT
% A Dataset
% TYPE Type of the kernel (optional; default: 'p')
% PAR Kernel parameter (optional; default: 1)
% NU Regularization parameter (0 < NU < 1): expected fraction of SV
% (optional; default: max(leave-one-out 1_NN error,0.05))
%
% MC Do or do not data mean-centering (optional; default: 1 (to do))
% PD Do or do not the check of the positive definiteness (optional; default: 1 (to do))
%
% OUTPUT
% W Mapping: Support Vector Classifier
% J Object identifiers of support objects
% C Equivalent C regularization parameter of SVM-C algorithm
%
% DESCRIPTION
% Optimizes a support vector classifier for the dataset A by
% quadratic programming. The classifier can be of one of the types
% as defined by PROXM. Default is linear (TYPE = 'p', PAR = 1). In J
% the identifiers of the support objects in A are returned.
%
% NU belongs to the interval (0,1). NU close to 1 allows for more class overlap.
% Default NU = 0.25.
%
% NU is bounded from above by NU_MAX = (1 - ABS(Lp-Lm)/(Lp+Lm)), where
% Lp (Lm) is the number of positive (negative) samples. If NU > NU_MAX is supplied
% to the routine it will be changed to the NU_MAX.
%
% If NU is less than some NU_MIN which depends on the overlap between the classes
% the algorithm will typically take a long time to converge (if at all).
% So, it is advisable to set NU larger than expected overlap.
%
% Output is rescaled in such a manner as if it were returned by SVC with the parameter C.
%
%
% SEE ALSO
% SVO_NU, SVO, SVC, MAPPINGS, DATASETS, PROXM
% Copyright: S.Verzakov, [email protected]
% Based on SVC.M by D.M.J. Tax, D. de Ridder, R.P.W. Duin
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: svc_nu.m,v 1.6 2009/01/31 18:43:11 duin Exp $
function [W, J, C] = svc_nu(a,type,par,nu,mc,pd)
prtrace(mfilename);
warning('SVC_NU is outdated, use NUSVC instead')
if nargin < 2 | ~isa(type,'mapping')
if nargin < 6
pd = 1;
end
if nargin < 5
mc = 1;
end
if nargin < 4 | isempty(nu)
nu = [];
prwarning(3,'Regularization parameter nu set to NN error\n');
end
if nargin < 3 | isempty(par)
par = 1;
prwarning(3,'Kernel parameter par set to 1\n');
end
if nargin < 2 | isempty(type)
type = 'p';
prwarning(3,'Polynomial kernel type is used\n');
end
if nargin < 1 | isempty(a)
W = mapping(mfilename,{type,par,nu,mc,pd});
W = setname(W,'Support Vector Classifier (nu version)');
return;
end
islabtype(a,'crisp');
isvaldfile(a,1,2); % at least 1 object per class, 2 classes
a = testdatasize(a,'objects');
[m,k,c] = getsize(a);
nlab = getnlab(a);
if isempty(nu), nu = max(testk(a,1),0.01); end
% The SVC is basically a 2-class classifier. More classes are
% handled by mclassc.
if c == 2 % two-class classifier
% Compute the parameters for the optimization:
y = 3 - 2*nlab;
if mc
u = mean(a);
a = a -ones(m,1)*u;
else
u = [];
end
K = a*proxm(a,type,par);
% Perform the optimization:
[v,J,C] = svo_nu(+K,y,nu,pd);
% Store the results:
W = mapping(mfilename,'trained',{u,a(J,:),v,type,par},getlablist(a),k,2);
%W = cnormc(W,a);
W = setname(W,'Support Vector Classifier (nu version)');
W = setcost(W,a);
J = getident(a,J);
%J = a.ident(J);
else % multi-class classifier:
[W,J,C] = mclassc(a,mapping(mfilename,{type,par,nu,mc,pd}),'single');
end
else % execution
nodatafile(a);
w = +type;
m = size(a,1);
% The first parameter w{1} stores the mean of the dataset. When it
% is supplied, remove it from the dataset to improve the numerical
% precision. Then compute the kernel matrix using proxm:
if isempty(w{1})
d = a*proxm(w{2},w{4},w{5});
else
d = (a-ones(m,1)*w{1})*proxm(w{2},w{4},w{5});
end
% Data is mapped by the kernel, now we just have a linear
% classifier w*x+b:
d = [d ones(m,1)] * w{3};
d = sigm([d -d]);
W = setdat(a,d,type);
end
return;
|
github
|
jacksky64/imageProcessing-master
|
fontsize.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/fontsize.m
| 721 |
utf_8
|
3e26a7fb31b61b0d6d71d9b18153a75c
|
%FONT_SIZE Set large graphic font
%
% font_size(size)
%
% Set font size for current figure
function font_size(size)
V = axis;
H = get(gcf,'Children');
c1 = [];
for h = H'
if strcmp(get(h,'type'),'axes')
set(get(h,'XLabel'), 'FontSize', size);
set(get(h,'YLabel'), 'FontSize', size);
set(get(h,'Title'), 'FontSize', size);
set(h, 'FontSize', size);
c1 = [c1; get(gca, 'Children')];
end
end
axis(V);
for h1 = c1'
v1 = get (h1);
if (isfield (v1, 'FontSize'))
set (h1, 'FontSize', size);
end;
c2 = get (h1, 'Children');
for h2 = c2'
v2 = get (h2);
if (isfield (v2, 'FontSize'))
set (h2, 'FontSize', size);
end;
end;
end;
return
|
github
|
jacksky64/imageProcessing-master
|
featsetcc.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/featsetcc.m
| 288 |
utf_8
|
a053906fa41052ac6d97103075e07f84
|
%FEATSETCC Feature set combining classifier
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function [dset,id] = featsetcc(dobj,combc)
error('featsetcc has been replaced by bagcc')
|
github
|
jacksky64/imageProcessing-master
|
mclassc.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/mclassc.m
| 3,670 |
utf_8
|
a230d0335c0818ab2435633ba7d33997
|
%MCLASSC Computation of multi-class classifier from 2-class discriminants
%
% W = MCLASSC(A,CLASSF,MODE)
%
% INPUT
% A Dataset
% CLASSF Untrained classifier
% MODE Type of handling multi-class problems (optional; default: 'single')
%
% OUTPUT
% W Combined classifier
%
% DESCRIPTION
% For default MODE = 'single', the untrained classifier CLASSF is called to
% compute C classifiers between each of the C classes in the dataset A and
% the remaining C-1 classes. The result is stored into the combined
% classifier W.
%
% For MODE = 'multi' the untrained classifier CLASSF is trained between all
% pairs of classes as well as between each class and all other classes.
% This total set of C classes is combined by MINC. The use of soft labels
% is supported.
%
% EXAMPLES
% W = MCLASSC(GENDATM(100),QDC,'MULTI');
%
% SEE ALSO
% DATASETS, MAPPINGS, MINC.
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: mclassc.m,v 1.9 2009/08/18 23:09:01 duin Exp $
function [w,varargout] = mclassc(a,classf,mode)
prtrace(mfilename);
varargout = repmat({[]},[1, max((nargout-1),0)]);
if nargin < 3, mode = 'single'; end
if nargin < 2, classf = []; end
if nargin < 1 | isempty(a)
%fixed w=mclassc(dataset,ldc)
w = mapping(mfilename,{classf,mode});
return
end
if ~isa(classf,'mapping') | ~isuntrained(classf)
error('Second parameter should be untrained mapping')
end
islabtype(a,'crisp','soft');
isvaldfile(a,1,2); % at least 1 object per class, 2 classes
[m,k,c] = getsize(a);
if c == 2
[w,varargout] = map(a,classf);
return
end
varout = {};
lablist = getlablist(a);
s = sprintf('Multi-class: %4i classifiers: ',c);
prwaitbar(c,s);
switch mode
case 'single'
w = [];
% lablist = getlablist(a);
for i=1:c
prwaitbar(c,i,[s int2str(i)]);
if islabtype(a,'crisp')
mlab = 2 - (getnlab(a) == i);
aa = setlabels(a,mlab);
aa = remclass(aa); % remove empty classes
%aa = setnlab(a,mlab);
%aa = setlablist(aa,[1 2]');
if ~isempty(a.prior)
aa = setprior(aa,[a.prior(i),1-a.prior(i)]');
end
elseif islabtype(a,'soft')
atargets = gettargets(a);
targets = [atargets(:,i) 1-atargets(:,i)]; %assumes soft labels sum to one???
aa = dataset(+a,mlab,targets,'lablist',[1 2]');
end
varo = varargout;
[v,varo{:}] = map(aa,classf);
varout = [varout; varo];
w = [w,setlabels(v(:,1),lablist(i,:))];
end
case 'multi'
w = [];
nclassf = 0;
nlab = getnlab(a);
for i1=1:c
prwaitbar(c,i1,[s int2str(i1)]);
lab = lablist(i1,:);
J1 = find(nlab==i1);
if islabtype(a,'crisp')
mlab = ones(m,1);
mlab(J1) = zeros(length(J1),1);
aa = setlabels(a,mlab);
aa = remclass(aa); % remove empty classes
else
problab = gettargets(a);
mlab = [problab(:,i1) sum(problab,2)-problab(:,i1)];
aa = settargets(a,mlab,[1 2]');
end
I1 = [1:c]; I1(i1) = [];
varo = varargout;
[v,varo{:}] = map(aa,classf);
varout = [varout; varo];
w = [w,setlabels(v(:,1),lab)];
for i2 = I1
if islabtype(a,'crisp')
J2 = find(nlab==i2);
v = aa([J1;J2],:)*classf;
else
mlab2 = problab(:,[i1 i2]);
v = setlabels(aa,mlab2)*classf;
end
w = [w,setlabels(v(:,1),lab)];
nclassf = nclassf+1;
end
end
w = minc(w);
otherwise
error('Unknown mode')
end
prwaitbar(0);
w = setname(w,getname(classf));
w = setsize(w,[k,c]);
w = setcost(w,a);
if ~isempty(varout)
varargout = num2cell(varout',2)';
end
return
|
github
|
jacksky64/imageProcessing-master
|
disperror.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/disperror.m
| 2,483 |
utf_8
|
aa9422a1321ed41f48c803488f043e98
|
%DISPERROR Display error matrix with information on classifiers and datasets
%
% DISPERROR(DATA,CLASSF,ERROR,STD,FID)
%
% INPUT
% DATA Cell array of M datasets or dataset names (strings)
% CLASSF Cell array of N mappings or mapping names (strings)
% ERROR M*N matrix of (average) error estimates
% STD M*N matrix of standard devations on ERROR (optional)
% FID File in which results are written (default: 1)
% OUTPUT
%
% DESCRIPTION
% Displays the matrix ERROR matrix with error estimates for N
% classifiers related to M datasets. This routine is called by TESTC
% and CROSVALL to display results.
%
% EXAMPLE
% testsets = {gendath gendatb gendatd(100,5)}
% trainsets = {gendath gendatb gendatd(100,5)}
% classifiers = {nmc fisherc qdc svc}
% testc(testsets,map(trainsets,classifiers))
%
% SEE ALSO
% MAPPINGS, DATASETS, TESTC, CROSSVAL
% $Id: disperror.m,v 1.3 2007/06/05 12:43:35 duin Exp $
function disperror (data,classf,err,stdev,fid)
if nargin < 5, fid = 1; end
% Check arguments.
if (nargin > 3) & (any(size(err) ~= size(stdev)))
error('size of matrix with standard deviations should match matrix with errors')
end
if (~iscell(classf)) | (~isstr(classf{1}) & ~ismapping(classf{1}))
error('cell array of mappings or mapping names expected')
end
if (~iscell(data)) | (~isstr(data{1}) & ~isdataset(data{1}))
error('cell array of datasets or datasets names expected')
end
[m,n] = size(err);
if (length(data) ~= m)
error('size of dataset cell array should equal number of rows in error matrix');
end
if (length(classf) ~= n)
error('size of classifier cell array should equal number of columns in error matrix');
end
% If datasets are supplied, extract their names.
for j = 1:m
if (isdataset(data{j}))
data{j} = getname(data{j},20);
end
end
% If classifiers are supplied, extract their names.
for j = 1:n
if (ismapping(classf{j}))
classf{j} = getname(classf{j});
end
end
if (n == 1)
fprintf(fid,' %s \n\n',classf{1});
else
fprintf(fid,'\n');
for i = 1:n
fprintf(fid,'\n clsf_%i : %s',i,classf{i});
end
fprintf(fid,'\n\n ');
for i = 1:n
fprintf(fid,' clsf_%i',i);
end
fprintf(fid,'\n\n');
end
for j = 1:m
fprintf(fid,' %s',data{j});
fprintf(fid,' %7.3f',err(j,:));
if (nargin > 3)
fprintf(fid,'\n ');
fprintf(fid,' %7.3f',stdev(j,:));
fprintf(fid,'\n');
end
fprintf(fid,'\n');
end
fprintf(fid,'\n');
return
|
github
|
jacksky64/imageProcessing-master
|
parzendc.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/parzendc.m
| 3,112 |
utf_8
|
5499152fa3da34991b55b84c764f1a56
|
%PARZENDC Parzen density based classifier
%
% [W,H] = PARZENDC(A)
% W = PARZENDC(A,H)
%
% INPUT
% A Dataset
% H Smoothing parameters (optional; default: estimated from A for each class)
%
% OUTPUT
% W Trained Parzen classifier
% H Smoothing parameters, estimated from the data
%
% DESCRIPTION
% For each of the classes in the dataset A, a Parzen density is estimated
% using PARZENML. For each class, a feature normalisation on variance is
% included in the procedure. As a result, the Parzen density estimate uses
% different smoothing parameters for each class and each feature.
%
% If a set of smoothing parameters H is specified, no learning is performed,
% only the classifier W is produced. H should have the size of [C x K] if
% A has C classes and K features. If the size of H is [1 x K] or [C x 1],
% or [1 x 1], then identical values are assumed for all the classes and/or
% features.
%
% The densities for the points of a dataset B can be found by D = B*W.
% D is an [M x C] dataset, if B has M objects.
%
% EXAMPLES
% See PREX_DENSITY.
%
% SEE ALSO
% DATASETS, MAPPINGS, PARZENC, PARZEN_MAP, PARZENML
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: parzendc.m,v 1.7 2008/07/03 09:11:44 duin Exp $
function [W,h] = parzendc(a,h)
prtrace(mfilename);
if nargin < 2
prwarning(5,'Smoothing parameters not specified, estimated from the data.');
h = [];
end
% No input arguments: return an untrained mapping.
if nargin == 0 | isempty(a)
W = mapping(mfilename,h);
W = setname(W,'Parzen Classifier');
return;
end
islabtype(a,'crisp','soft');
isvaldfile(a,2,2); % at least 2 objects per class, 2 classes
a = testdatasize(a);
a = testdatasize(a,'objects');
[m,k,c] = getsize(a);
nlab = getnlab(a);
if ~isempty(h) % Take user settings for smoothing parameters.
if size(h,1) == 1, h = repmat(h,c,1); end
if size(h,2) == 1, h = repmat(h,1,k); end
if any(size(h) ~= [c,k])
error('Array with smoothing parameters has a wrong size.');
end
else % Estimate smoothing parameters
% Scale A such that its mean is shifted to the origin and
% the variances of all features are scaled to 1.
ws = scalem(a,'variance');
b = a*ws;
% SCALE is basically [1/mean(A) 1/STD(A)] based on the properties of SCALEM.
scale = ws.data.rot;
if (size(scale,1) ~= 1) % formally ws.data.rot stores a rotation matrix
scale = diag(scale)'; % extract the diagonal if it does,
end % otherwise we already have it
h = zeros(c,k);
if islabtype(a,'crisp')
s = sprintf('parzendc: smoothing per class ');
prwaitbar(c,s);
for j=1:c
prwaitbar(c,j,[s int2str(j)]);
bb = seldat(b,j); % BB consists of the j-th class only.
h(j,:) = repmat(parzenml(bb),1,k)./scale;
end
prwaitbar(0);
elseif islabtype(a,'soft')
h = parzenml(a);
end
end
W = mapping('parzen_map','trained',{a,h,getprior(a)},getlablist(a),k,c);
W = setname(W,'Parzen Classifier');
W = setcost(W,a);
return;
|
github
|
jacksky64/imageProcessing-master
|
ksmoothr.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/ksmoothr.m
| 1,034 |
utf_8
|
1bb121254911d38d3aed55d5c30bd04d
|
%KSMOOTHR Kernel smoother
%
% W = KSMOOTHR(X,H)
%
% INPUT
% X Regression dataset
% H Width parameter (default H=1)
%
% OUTPUT
% W Kernel smoother mapping
%
% DESCRIPTION
% Train a kernel smoothing W on data X, with width parameter H.
%
% SEE ALSO
% KNNR, TESTR, PLOTR
% Copyright: D.M.J. Tax, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function y = ksmoothr(x,h)
if nargin<2
h = 1;
end
if nargin<1 | isempty(x)
y = mapping(mfilename,{h});
y = setname(y,'Kernel smoother');
return
end
if ~ismapping(h) %training: just store the training data
[n,d] = size(x);
W.x = +x;
W.y = gettargets(x);
W.h = h;
y = mapping(mfilename,'trained',W,1,d,1);
y = setname(y,'Kernel smoother');
else
% evaluation
W = getdata(h);
[n,d] = size(x);
m = size(W.x,1);
xtst = +x;
gamma = -1/(W.h*W.h); % tiny speedup
% now go through all test data:
y = zeros(n,1);
K = exp(gamma*distm(xtst,W.x));
y = (K*W.y)./sum(K,2);
y = setdat(x,y);
end
|
github
|
jacksky64/imageProcessing-master
|
isparallel.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/isparallel.m
| 727 |
utf_8
|
c236c6aaf876afb62259dc6dea58e2a5
|
%ISPARALLEL Test on parallel mapping
%
% N = ISPARALLEL(W)
% ISPARALLEL(W)
%
% INPUT
% W input mapping
%
% OUTPUT
% N logical value
%
% DESCRIPTION
% Returns true for parallel mappings. If no output is required,
% false outputs are turned into errors. This may be used for
% assertion.
%
% SEE ALSO
% ISMAPPING, ISSTACKED
% $Id: isparallel.m,v 1.2 2006/03/08 22:06:58 duin Exp $
function n = isparallel(w)
prtrace(mfilename);
if isa(w,'mapping') & strcmp(w.mapping_file,'parallel')
n = 1;
else
n = 0;
end
% generate error if input is not a parallel mapping
% AND no output is requested (assertion)
if nargout == 0 & n == 0
error([newline '---- Parallel mapping expected -----'])
end
return
|
github
|
jacksky64/imageProcessing-master
|
gencirc.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/gencirc.m
| 1,003 |
utf_8
|
b95f991f81ebe9c78ff8cf68f51694dd
|
%GENCIRC Generation of a one-class circular dataset
%
% A = GENCIRC(N,S)
%
% INPUT
% N Size of dataset (optional; default: 50)
% S Standard deviation (optional; default: 0.1)
%
% OUTPUT
% A Dataset
%
% DESCRIPTION
% Generation of a uniformly distributed one-class 2D circular
% dataset with radius 1 superimposed with 1D normally distributed
% radial noise with standard deviation S. N points are generated.
% Defaults: N = 50, S = 0.1.
%
% SEE ALSO
% DATASETS, PRDATASETS
% Copyright: R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: gencirc.m,v 1.2 2006/03/08 22:06:58 duin Exp $
function a = gencirc(n,s)
prtrace(mfilename);
if nargin < 1, n = 50; end
if nargin < 2, s = 0.1; end
if (length(s) > 1)
error('Standard deviation should be scalar')
end
alf = rand(n,1)*2*pi;
r = ones(n,1) + randn(n,1)*s;
a = [r.*sin(alf),r.*cos(alf)];
a = dataset(a);
return;
|
github
|
jacksky64/imageProcessing-master
|
averagec.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/averagec.m
| 1,493 |
utf_8
|
30ebbb9c4bcf1c7042173d6357e742ef
|
%AVERAGEC Combining of linear classifiers by averaging coefficients
%
% W = AVERAGEC(V)
% W = V*AVERAGEC
%
% INPUT
% V A set of affine base classifiers.
%
% OUTPUT
% W Combined classifier.
%
% DESCRIPTION
% Let V = [V1,V2,V3, ... ] is a set of affine classifiers trained on the same
% classes, then W is the average combiner: it averages the coefficients of the
% base classifiers, resulting in a new affine classifier. This might also be
% used as A*[V1,V2,V3]*AVERAGEC, in which A is a dataset to be classified.
%
% The base classifiers may be combined in a stacked way (operating in the same
% feature space by V = [V1,V2,V3, ... ] or in a parallel way (operating in
% different feature spaces) by V = [V1;V2;V3; ... ].
%
% SEE ALSO
% MAPPINGS, DATASETS, VOTEC, MAXC, MINC, MEDIANC, PRODC, AVERAGEC, STACKED,
% PARALLEL
%
% EXAMPLES
% See PREX_COMBINING.
% Copyright: R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: averagec.m,v 1.2 2006/03/08 22:06:58 duin Exp $
function w = averagec (p1)
% The average combiner is constructed as a fixed combiner (FIXEDCC) of
% type 'average'.
type = 'average'; name = 'Average combiner';
% Possible calls: AVERAGEC, AVERAGEC(W) or AVERAGEC(A,W).
if (nargin == 0)
w = mapping('fixedcc','combiner',{[],type,name});
else
w = fixedcc(p1,[],type,name);
end
if (isa(w,'mapping'))
w = setname(w,name);
end
return
|
github
|
jacksky64/imageProcessing-master
|
perlc.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/perlc.m
| 3,940 |
utf_8
|
ab6cd9ecba1cb940276cbd81aea4b8b0
|
% PERLC - Train a linear perceptron classifier
%
% W = PERLC(A)
% W = PERLC(A,MAXITER,ETA,W_INI,TYPE)
%
% INPUT
% A Training dataset
% MAXITER Maximum number of iterations (default 100)
% ETA Learning rate (default 0.1)
% W_INI Initial weights, as affine mapping, e.g W_INI = NMC(A)
% (default: random initialisation)
% TYPE 'batch': update by batch processing (default)
% 'seq' : update sequentially
%
% OUTPUT
% W Linear perceptron classifier mapping
%
% DESCRIPTION
% Outputs a perceptron W trained on dataset A using learning rate ETA for a
% maximum of MAXITER iterations (or until convergence).
%
% If ETA is NaN it is optimised by REGOPTC.
%
% SEE ALSO
% DATASETS, MAPPINGS, NMC, FISHERC, BPXNC, LMNC, REGOPTC
% Copyright: D. de Ridder, R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: perlc.m,v 1.6 2008/07/03 09:11:44 duin Exp $
function w = perlc (a, maxiter, eta, w_ini, type)
prtrace(mfilename);
if (nargin < 5 | isempty(type))
type = 'batch';
end
if (nargin < 4 | isempty(w_ini))
prwarning(3,'No initial weights W_INI supplied, using random initialization');
w_ini = [];
end
if (nargin < 3 | isempty(eta))
prwarning(3,'Learning rate ETA not specified, assuming 0.1');
eta = 0.1;
end
if (nargin < 2 | isempty(maxiter))
prwarning(3,'Maximum number of iterations not specified, assuming 100');
maxiter = 100;
end
if (nargin < 1) | (isempty(a))
w = mapping(mfilename,{maxiter,eta,w_ini});
w = setname(w,'Linear Perceptron');
return
end
if isnan(eta) % optimize regularisation parameter
defs = {100,0.1,[],'batch'};
parmin_max = [0,0;1e-6,0.9;0,0;0,0];
w = regoptc(a,mfilename,{maxiter, eta, w_ini, type},defs,[2],parmin_max,testc([],'soft'),1);
return
end
% Unpack the dataset.
islabtype(a,'crisp');
isvaldfile(a,1,2); % at least 1 object per class, 2 classes
[m,k,c] = getsize(a);
nlab = getnlab(a);
% PERLC is basically a 2-class classifier. More classes are
% handled by mclassc.
if c == 2 % two-class classifier
ws = scalem(a,'variance');
a = a*ws;
% Add a column of 1's for the bias term.
Y = [+a ones(m,1)];
% Initialise the WEIGHTS with a small random uniform distribution,
% or with the specified affine mapping.
if isempty(w_ini)
weights = 0.02*(rand(k+1,c)-0.5);
else
isaffine(w_ini);
weights = [w_ini.data.rot;w_ini.data.offset];
end
converged = 0; iter = 0;
s = sprintf('perlc, %i iterations: ',maxiter);
prwaitbar(maxiter,s,m*k>100000);
while (~converged)
% Find the maximum output for each sample.
[maxw,ind] = max((Y*weights)');
changed = 0;
if (strcmp(type,'batch'))
% Update for all incorrectly classified samples simultaneously.
changed = 0;
for i = 1:m
if (ind(i) ~= nlab(i))
weights(:,nlab(i)) = weights(:,nlab(i)) + eta*Y(i,:)';
weights(:,ind(i)) = weights(:,ind(i)) - eta*Y(i,:)';
changed = 1;
end;
end;
iter = iter+1;
else
% update for the worst classified object only
J = find(ind' ~= nlab);
if ~isempty(J)
[dummy,imax] = min(maxw(J)); i = J(imax);
weights(:,nlab(i)) = weights(:,nlab(i)) + eta*Y(i,:)';
weights(:,ind(i)) = weights(:,ind(i)) - eta*Y(i,:)';
iter = iter+1;
changed = 1;
end;
end
% Continue until things stay the same or until MAXITER iterations.
converged = (~changed | iter >= maxiter);
prwaitbar(maxiter,iter,[s int2str(iter)]);
end
prwaitbar(0);
% Build the classifier
w = ws*affine(weights(1:k,:),weights(k+1,:),a);
w = cnormc(w,a);
w = setlabels(w,getlablist(a));
else % multi-class classifier:
w = mclassc(a,mapping(mfilename,{maxiter,eta,w_ini}));
end
return
|
github
|
jacksky64/imageProcessing-master
|
gpr.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/gpr.m
| 1,399 |
utf_8
|
ad5febfcbb81308a317595040c2500c5
|
%GPR Gaussian Process regression
%
% W = GPR(A,KERNEL,S_noise)
%
%INPUT
% A Dataset
% KERNEL Untrained mapping to compute kernel by A*(A*KERNEL)
% during training, or B*(A*KERNEL) during evaluation with
% dataset B
% S_noise Standard deviation of the noise
%
%OUTPUT
% W Mapping: Gaussian Process regression
%
%DESCRIPTION
%Fit a Gaussian Process regressor on dataset A. For a nonlinear regressor,
%define kernel mapping KERNEL. For kernel definitions, have a look at
%proxm.m.
%
%SEE ALSO
% svmr, proxm, linearr, testr, plotr
% Copyright: D.M.J. Tax, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function y = gpr(x,kernel,s_noise)
if nargin<3
s_noise = 1;
end
if nargin<2
kernel = proxm([],'p',1);
end
if nargin<1 || isempty(x)
y = mapping(mfilename,{kernel,s_noise});
y = setname(y,'Gaussian Proc. regression');
return
end
if ~ismapping(kernel) || ~istrained(kernel) %training
[n,d] = size(x);
W.X = [+x ones(n,1)];
% train:
W.K = W.X*kernel;
L = chol(+(W.X*W.K) + s_noise*s_noise*eye(n));
W.w = L\(L'\gettargets(x));
% store:
W.kernel = kernel;
y = mapping(mfilename,'trained',W,1,d,1);
y = setname(y,'Gaussian Proc. regression');
else
% evaluation
W = getdata(kernel);
out = [+x ones(size(x,1),1)]*W.K*W.w;
y = setdat(x,out);
end
|
github
|
jacksky64/imageProcessing-master
|
rejectm.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/rejectm.m
| 1,951 |
utf_8
|
fa3364ba0a378a5cb9db889c924a2b4c
|
%REJECTM Rejection mapping
%
% W = REJECTM(A,FRAC)
%
% DESCRIPTION
% Train the threshold of a rejection mapping W such that a fraction FRAC
% of the training data A is rejected. Dataset A is usually the output of
% a classifier. The mapping REJECTM will add one extra reject class.
%
% W = REJECTM(A,FRAC,REJNAME)
%
% If desired, the rejected objects will be labeled REJNAME. Default is
% REJNAME = 'reject'.
%
% EXAMPLES
% A = GENDATB; % create trainingset
% W = LDC(A); % create supervised classifier
% WR = REJECTM(A*W,0.05); % reject 5% of the data
% SCATTERD(A); PLOTC(W*WR); % show
%
% SEE ALSO
% REJECT, ROC, PLOTE
% Copyright: D.M.J. Tax, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function w = rejectm(a,thr,rejname)
if nargin<3
rejname = 'reject';
end
if nargin<2
thr = 0.05;
end
if nargin<1 | isempty(a)
w = mapping(mfilename,{thr,rejname});
w = setname(w,'rejection mapping');
return
end
if ~ismapping(thr) %training
[n,k,c] = getsize(a);
% add the new outlier class to the lablist
newll = getlablist(a);
if isa(newll,'double')
%newll = [newll; max(newll)+1];
if nargin>2 & isa(rejname,'char')
warning('Labels are numeric, user supplied string class label.');
end
newll = [newll; rejname];
else
newll = char(newll,rejname);
end
% find the 'winning' class
maxa = max(+a,[],2);
% sort the posteriors for all of the classes:
sa = sort(maxa);
% find the thr-percentile and use that as a threshold:
fracn = max(ceil(thr*n),1);
thr = sa(fracn);
% Store the threshold:
W.thr = thr;
W.c = c+1;
w = mapping(mfilename,'trained',W,newll,k,c+1);
w = setname(w,'rejection mapping');
else % evaluation
W = getdata(thr);
m = size(a,1);
% just add an extra reject-class, that will have the constant
% threshold output:
newout = [a repmat(W.thr,m,1)];
w = setdat(a,newout,thr);
end
return
|
github
|
jacksky64/imageProcessing-master
|
testp.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/testp.m
| 2,691 |
utf_8
|
b23721ebac675abf1169567eb38380a6
|
%TESTP Error estimation of Parzen classifier
%
% E = TESTP(A,H,T)
% E = TESTP(A,H)
%
% INPUT
% A input dataset
% H matrix smoothing parameters (optional, def: determined via
% parzenc)
% T test dataset (optional)
%
% OUTPUT
% E estimated error rate
%
% DESCRIPTION
% Tests a dataset T on dataset A using a Parzen classification and returns
% the classification error E. Returns the leave-one-out error estimate. If
% H is not given, it is determined by PARZENC.
%
% SEE ALSO
% DATASETS, MAPPINGS, PARZEN_MAP, PARZENML, PARZENC.
% Copyright: R.P.W. Duin, [email protected]
% Faculty of Applied Physics, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% may be to be merged with parzen_map, see also testk
% $Id: testp.m,v 1.4 2009/09/08 21:27:51 duin Exp $
function [e,d] = testp(a,h,t)
prtrace(mfilename);
isvaldfile(a,2,2);
a = testdatasize(a);
a = testdatasize(a,'objects');
if nargin < 2, [W,h] = parzenc(a); end
[m,k,c] = getsize(a);
nlab = getnlab(a);
lablist = getlablist(a);
p = getprior(a);
if length(h) == 1, h = h*ones(1,c); end
if length(h) ~= c, error('Wrong number of smoothing parameters'); end
% if no test dataset is specified
if nargin <= 2
% find for each sample cross-validated estimate
% of aposteriori probability.
d = classp(a,nlab,h,p);
[dmax,J] = max(d',[],1);
e = nlabcmp(lablist(J,:),lablist(nlab,:)) / m;
% if the validation dataset is given
elseif nargin == 3
lablistt = getlablist(t);
[n,kt] = size(t);
nlabt = getnlab(t);
if k ~= kt
error('Data sizes do not match');
end
d = classp(a,nlab,h,p,t);
[dmax,J] = max(d',[],1);
e = nlabcmp(lablistt(J,:),lablistt(nlabt,:)) / n;
end
return
%CLASSP estimate of Parzen density (if t is not specified,
% a leave-one-out error estimate for a is returned)
function F = classp(a,nlab,h,p,t)
[m,k] = size(a);
maxa = max(max(abs(a)));
a = a/maxa;
h = h/maxa;
if nargin < 5
mt = m;
else
[mt,kt] = size(t);
t = t/maxa;
end
c = max(nlab);
alf=sqrt(2*pi)^k; % density normalization factor
[num,n] = prmem(mt,m); % use batches to avoid excessive memory usage
F = ones(mt,c);
for i = 0:num-1
if i == num-1
nn = mt - num*n + n;
else
nn = n;
end
range = [i*n+1:i*n+nn];
if nargin <= 4
D = +distm(a,a(range,:));
D(i*n+1:m+1:i*n+nn*m) = inf*ones(1,nn); % set distances to itself at inf
else
D = +distm(a,t(range,:));
end
for i=1:c
I = find(nlab == i);
if length(I) > 0
F(range,i) = p(i).*sum(exp(-D(I,:)*0.5./(h(i).^2)),1)'./(length(I)*alf*h(i)^k);
end
end
end
F = F + realmin;
F = F ./ (sum(F')'*ones(1,c));
return
|
github
|
jacksky64/imageProcessing-master
|
prtver.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/prtver.m
| 994 |
utf_8
|
1ca6fb7544215befd398757ac4f2ba7d
|
%PRTVER Get PRTools version
%
%This routine is intended for internal use in PRTools only
function prtversion = prtver
persistent PRTVERSION
if ~isempty (PRTVERSION)
prtversion = PRTVERSION;
return
end
verstring = version;
if strcmp(computer,'MAC2') | verstring(1) == '5';
% name = fileparts(which('fisherc'))
% [pp,name,ext] = fileparts(name(1:end-1))
ver_struct.Name = 'Pattern Recognition Tools';
ver_struct.Version = '4.0.0';
ver_struct.Release = '';
ver_struct.Date = '';
prtversion = {ver_struct datestr(now)};
else
% [pp,name,ext] =fileparts(fileparts(which('fisherc')));
% vers = ver([name,ext]);
% if isempty(vers)
% vers = 0;
% error([newline 'This version of PRTools is not properly defined as a toolbox.' ...
% newline 'Please add it first, e.g. using the addpath command with the path from root!'])
% end
% prtversion = {ver([name,ext]) datestr(now)};
prtversion = {ver('prtools') datestr(now)};
end
PRTVERSION = prtversion;
|
github
|
jacksky64/imageProcessing-master
|
pcaklm.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/pcaklm.m
| 5,776 |
utf_8
|
152497f52bca71043ca54be95472ebff
|
%PCAKLM Principal Component Analysis/Karhunen-Loeve Mapping
% (PCA or MCA of overall/mean covariance matrix)
%
% [W,FRAC] = PCAKLM(TYPE,A,N)
% [W,N] = PCAKLM(TYPE,A,FRAC)
%
% INPUT
% A Dataset
% TYPE Type of mapping: 'pca' or 'klm'. Default: 'pca'.
% N or FRAC Number of dimensions (>= 1) or fraction of variance (< 1)
% to retain; if > 0, perform PCA; otherwise MCA.
% Default: N = inf.
%
% OUTPUT
% W Affine Karhunen-Loeve mapping
% FRAC or N Fraction of variance or number of dimensions retained.
%
% DESCRIPTION
% Performs a principal component analysis (PCA) or minor component analysis
% (MCA) on the overall or mean class covariance matrix (weighted by the
% class prior probabilities). It finds a rotation of the dataset A to an
% N-dimensional linear subspace such that at least (for PCA) or at most (for
% MCA) a fraction FRAC of the total variance is preserved.
%
% PCA is applied when N (or FRAC) >= 0; MCA when N (or FRAC) < 0. If N is
% given (abs(N) >= 1), FRAC is optimised. If FRAC is given (abs(FRAC) < 1),
% N is optimised.
%
% Objects in a new dataset B can be mapped by B*W, W*B or by A*KLM([],N)*B.
% Default (N = inf): the features are decorrelated and ordered, but no
% feature reduction is performed.
%
% ALTERNATIVE
%
% V = PCAKLM(A,TYPE,0)
%
% Returns the cumulative fraction of the explained variance. V(N) is the
% cumulative fraction of the explained variance by using N eigenvectors.
%
% This function should not be called directly, only trough PCA or KLM.
% Use FISHERM for optimizing the linear class separability (LDA).
%
% SEE ALSO
% MAPPINGS, DATASETS, PCLDC, KLLDC, PCA, KLM, FISHERM
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: pcaklm.m,v 1.15 2010/02/08 15:29:48 duin Exp $
function [w,truefrac] = pcaklm (type,a,frac)
prtrace(mfilename);
truefrac = [];
% Default: preserve all dimensions (identity mapping).
if (nargin < 3) | (isempty(frac))
frac = inf;
prwarning (3,'no dimensionality given, only decorrelating and ordering dimensions');
end
% Default: perform PCA.
if (nargin < 1) | (isempty(type))
type = 'pca';
prwarning (3,'no type given, assuming PCA');
end
if (strcmp(type,'pca'))
mapname = 'PCA';
elseif (strcmp(type,'klm'))
mapname = 'Karhunen-Loeve Mapping';
else
error('Unknown type specified');
end
%DXD Make the name a bit more informative:
if isfinite(frac)
if (frac<1)
mapname = [mapname sprintf(' ret. %4.1f%% var',100*frac)];
else
mapname = [mapname sprintf(' to %dD',frac)];
end
end
% Empty mapping: return straightaway.
if (nargin < 2) | (isempty(a))
w = mapping(type,frac);
w = setname(w,mapname);
return
end
%nodatafile(a);
if ~isdataset(a)
if isa(a,'double')
a = dataset(a,1); % make sure we have a dataset
else
error('nodatafile','Data should be given in a dataset or as doubles')
end
end
islabtype(a,'crisp','soft');
isvaldfile(a,1); % at least 1 object per class
a = setfeatdom(a,[]); % get rid of domain testing
[m,k,c] = getsize(a);
p = getprior(a);
a = setprior(a,p); % make class frequencies our prior
% If FRAC < 0, perform minor component analysis (MCA) instead of
% principal component analysis.
mca = (frac < 0); frac = abs(frac);
% Shift mean of data to origin.
b = a*scalem(a);
% If there are less samples M than features K, first perform a lossless
% projection to the (M-1) dimensional space spanned by the samples.
if (m <= k)
testdatasize(b,'objects');
u = reducm(b); b = b*u;
korg = k; [m,k] = size(b);
frac = min(frac,k);
else
testdatasize(b,'features');
u = [];
end
% Calculate overall or average class prior-weighted covariance matrix and
% find eigenvectors F.
if (strcmp(type,'pca'))
if (c==0) | ~islabtype(a,'crisp') % we have unlabeled data!
G = prcov(+b); % use all
else
bb = [];
classsiz = classsizes(b);
for j = 1:c
bb = [bb; seldat(b,j)*filtm([],'double')*p(j)/classsiz(j)];
end
[U,G] = meancov(remclass(setnlab(bb*m,1)));
end
else
%DXD For high dimensional dataset with many classes, we cannot
%store all individual cov. matrices in memory (like in the next
%line), but we have to compute them one by one:
%[U,GG] = meancov(b,1);
G = zeros(k,k);
for i = 1:c
%G = G + p(i)*GG(:,:,i);
[U,GG] = meancov(seldat(b,i),1);
G = G + p(i)*GG;
end
end
[F,V] = preig(G); % overdone if reducm has been called
% v = V(I) contains the sorted eigenvalues:
% descending for PCA, ascending for MCA.
if (mca)
[v,I] = sort(diag(V));
else
[v,I] = sort(-diag(V)); v = -v;
end
if (frac == inf) % Return all dimensions, decorrelated and ordered.
n = k; truefrac = k;
elseif (frac == 0) % Just return cumulative retained variance.
w = cumsum(v)/sum(v);
return
elseif (frac >= 1) % Return FRAC dimensions.
n = abs(frac);
if (n > k),
error('illegal dimensionality requested');
end
I = I(1:n);
sv = sum(v);
if (sv ~= 0),
truefrac = cumsum(v(1:n))/sv;
else,
truefrac = 0;
end;
elseif (frac > 0) % Return the N dimensions that retain at least (PCA)
% or at most (MCA) FRAC variance.
J = find(cumsum(v)/sum(v) > frac);
if (mca), n = J(1)-1; else, n = J(1); end;
truefrac = n; I = I(1:n);
end
% If needed, apply pre-calculated projection to (M-1) dimensional subspace.
if (~isempty(u))
rot = u.data.rot*F(:,I);
off = u.data.offset*F(:,I);
else
rot = F(:,I);
off = -mean(a*F(:,I));
end
% Construct affine mapping.
w = affine(rot,off,a);
w = setdata(w,v,'eigenvalues');
w = setname(w,mapname);
return
|
github
|
jacksky64/imageProcessing-master
|
loglc.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/loglc.m
| 3,458 |
utf_8
|
4c1da4f17c22125827b02b458af22294
|
%LOGLC Logistic Linear Classifier
%
% W = LOGLC(A)
%
% INPUT
% A Dataset
%
% OUTPUT
% W Logistic linear classifier
%
% DESCRIPTION
% Computation of the linear classifier for the dataset A by maximizing the
% likelihood criterion using the logistic (sigmoid) function.
% This routine becomes very slow for feature sizes above 1000.
%
% REFERENCES
% A. Webb, Statistical Pattern Recognition, John Wiley & Sons, New York, 2002.
% J. A. Anderson, Logistic discrimination, in: P. R. Krishnaiah and L. N.
% Kanal (eds.), Handbook of Statistics 2: Classification, Pattern Recognition
% and Reduction of Dimensionality, North Holland, Amsterdam, 1982, 169--191.
%
% SEE ALSO
% MAPPINGS, DATASETS, LDC, FISHERC
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: loglc.m,v 1.8 2010/02/08 15:31:48 duin Exp $
function W = loglc(a)
prtrace(mfilename);
% No input data, return an untrained classifier.
if (nargin == 0) | (isempty(a))
W = mapping(mfilename);
W = setname(W,'Logistic Classifier');
return
end
islabtype(a,'crisp');
isvaldfile(a,1,2); % at least 2 object per class, 2 classes
a = testdatasize(a);
fid = []; % Progress messages default destination
[m,k,c] = getsize(a);
nlab = getnlab(a);
prior = getprior(a);
a = setprior(a,prior);
if (c > 2)
% Compute C classifiers: each class against all others.
W = mclassc(a,mapping(mfilename));
else
v = scalem(a,'variance');
a = a*v;
accuracy = 0.0001; % An accuracy for the ML loop.
x = [a,ones(m,1)];
% A standard trick to set the labels to +1 for the first class
% (NLAB=1) and to -1 for the second one (NLAB=2). Then, each
% object vector is multiplied by its new label +/-1.
x(find(nlab==2),:) = -x(find(nlab==2),:);
x = +x;
alf = sum(nlab==2)/sum(nlab==1);
weights = zeros(1,k+1);
% Maximize the likelihood L to find WEIGHTS
L = -inf; Lnew = -realmax;
prwaitbar(100,'loglc: Optimizing log likelihoods',k > 100)
d0 = 10*log(10);
while (abs(Lnew - L) > accuracy)
prwaitbar(100, 100-100*(log(abs(Lnew-L))-log(accuracy))/d0);
pax = ones(m,1) ./ (1 + exp(-x*weights')); % Estimate of P(class +1|x).
pbx = 1 - pax; % Estimate of P(class -1|x).
L = Lnew; Lnew = sum(log(pax+realmin)); % Update likelihood.
p2x = sqrt(pax.*pbx);
y = x .* p2x(:,ones(1,k+1));
%size(y'*y)
weights = pbx' * x * prpinv(y'*y) + weights;
end
prwaitbar(0);
% Define LOGLC by an affine (linear) mapping based
% on the [K x 1] matrix R and the offset w0.
w0 = weights(k+1) + log(alf*prior(1)/prior(2));
R = weights(1:k)';
%DXD: for a two-class classifier we have to supply two posterior
%probabilities:
%W = v*affine(R,w0,a,getlablist(a)); % wrong
W = v*affine([R -R],[w0 -w0],a,getlablist(a));
W = setout_conv(W,1);
end
W = setname(W,'Logistic Classifier');
return
function [weights,L,Lnew,len2] = optimw(x,weights,L,Lnew,accuracy,len2,fid)
% this function is never called
[m,k] = size(x);
while (abs(Lnew - L) > accuracy)
pax = ones(m,1) ./ (1 + exp(-x*weights')); % Estimate of P(class +1|x).
pbx = 1 - pax; % Estimate of P(class -1|x).
L = Lnew; Lnew = sum(log(pax+realmin)); % Update likelihood.
p2x = sqrt(pax.*pbx);
y = x .* p2x(:,ones(1,k));
weights = pbx' * x * prpinv(y'*y) + weights;
end
|
github
|
jacksky64/imageProcessing-master
|
modeseek.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/modeseek.m
| 1,991 |
utf_8
|
8fe1d02f08dc5537527b58387dd22cae
|
%MODESEEK Clustering by mode-seeking
%
% [LAB,J] = MODESEEK(D,K)
%
% INPUT
% D Distance matrix or distance dataset (square)
% K Number of neighbours to search for local mode (default: 10)
%
% OUTPUT
% LAB Cluster assignments, 1..K
% J Indices of modal samples
%
% DESCRIPTION
% A K-NN modeseeking method is used to assign each object to its nearest mode.
%
% LITERATURE
% Cheng, Y. "Mean shift, mode Seeking, and clustering", IEEE Transactions
% on Pattern Analysis and Machine Intelligence, vol. 17, no. 8, pp. 790-799,
% 1995.
%
% SEE ALSO
% MAPPINGS, DATASETS, KMEANS, HCLUST, KCENTRES, PROXM
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: modeseek.m,v 1.2 2006/03/08 22:06:58 duin Exp $
function [assign,J] = modeseek (d,k)
prtrace(mfilename);
if (nargin < 2)
prwarning(1,'No k supplied, assuming k = 10');
k = 10;
end
[m,n] = size(d);
if (m ~= n), error('distance matrix should be square'); end
if (k < 2), error('neighborhood size should be at least 2'); end
if (k > n), error('k too large for this dataset'); end
[d,J] = sort(+d,1); % Find neighbours.
f = 1./(d(k,:)+realmin); % Calculate densities.
J(k+1:end,:) = []; % Just retain indices of neighbours.
% Find indices of local modes in neighbourhood.
[dummy,I] = max(reshape(f(J),size(J)));
% Translate back to indices in all the data. N now contains the
% index of the nearest neighbour in the K-neighbourhood.
N = J(I+[0:k:k*(m-1)]);
% Re-assign samples to the sample their nearest neighbour is assigned to.
% Iterate until assignments don't change anymore. Samples that then point
% to themselves are modes; all other samples point to the closest mode.
M = N(N);
while (any(M~=N))
N = M; M = N(N);
end
% Use renumlab to obtain assignments 1, 2, ... and the list of unique
% assignments (the modes).
[assign,J] = renumlab(M');
return
|
github
|
jacksky64/imageProcessing-master
|
plsm.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/plsm.m
| 2,563 |
utf_8
|
3c22241ed5520e5da9e20863af1ae6cb
|
% PLSM Partial Least Squares Feature Extraction
%
% W = PLSM
% W = PLSM([],MAXLV,METHOD)
%
% [W, INFORM] = PLSM(A,MAXLV,METHOD)
%
% INPUT
% A training dataset
% MAXLV maximal number of latent variables (will be corrected
% if > rank(A));
% MAXLV=inf means MAXLV=min(size(A)) -- theoretical
% maximum number of LV; by default = inf
% METHOD 'NIPALS' or 'SIMPLS'; by default = 'SIMPLS'
%
% OUTPUT
% W PLS feature extraction mapping
% INFORM extra algorithm output
%
% DESRIPTION
% PRTools Adaptation of PLS_TRAIN/PLS_TRANSFORM routines No preprocessing is
% done inside this mapping. It is the user responsibility to train
% preprocessing on training data and apply it to the test data.
%
% Crisp labels will be converted into soft labels which
% will be used as a target matrix.
%
% SEE ALSO
% PLS_TRAIN, PLS_TRANSFORM, PLS_APPLY
% Copyright: S.Verzakov, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: plsm.m,v 1.1 2007/08/28 11:00:39 davidt Exp $
%
function [w,inform]=plsm(par1,par2,par3)
% No dataset given: return untrained mapping.
if (nargin < 1) | (isempty(par1))
if nargin < 2
par2 = inf;
end
if nargin < 3
par3 = 'SIMPLS';
end
data = {par2,par3};
w = mapping(mfilename,'untrained',data);
w = setname(w,'Partial Least Squares Mapping (FE)');
return
end
isdataset(par1); % Assert that A is a dataset.
% training
if nargin < 2 | ~isa(par2,'mapping')
% a*w when w is untrained or
if nargin < 2
par2 = inf;
end
if nargin < 3
par3 = 'SIMPLS';
end
maxLV = par2;
method = par3;
if strcmp(par1.labtype,'crisp')
y=gettargets(setlabtype(par1,'soft'));
else
y=gettargets(par1);
end
% options
Options.maxLV = maxLV;
Options.method = method;
Options.X_centering=[];
Options.Y_centering=[];
Options.X_scaling=[];
Options.Y_scaling=[];
[B,XRes,YRes,Options]=pls_train(+par1,y,Options);
clear B
data.n=Options.maxLV;
data.R=XRes.R;
data.Options=Options;
% Save all useful data.
w = mapping(mfilename,'trained',data,[],size(XRes.R,1),size(XRes.R,2));
w = setname(w,'Partial Least Squares Mapping');
if nargout > 1
inform.XRes=XRes;
inform.YRes=YRes;
end
% execution
else
wdata = getdata(par2); % Unpack the mapping.
T = pls_prepro(+par1,wdata.Options.X_centering,wdata.Options.X_scaling)*wdata.R(:,1:wdata.n);
w = setdat(par1,T,par2);
end
return
|
github
|
jacksky64/imageProcessing-master
|
pls_apply.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/pls_apply.m
| 1,626 |
utf_8
|
961a8eadfab5964c53573af62c6b64f2
|
%pls_apply Partial Least Squares (applying)
%
% Y = pls_apply(X,B)
% Y = pls_apply(X,B,Options)
%
% INPUT
% X [N -by- d_X] the input data matrix, N samples, d_X variables
% B [d_X -by- d_Y] regression matrix: Y_new = X_new*B
% (X_new here after preprocessing, Y_new before
% un-preprocessing; preprocessing and
% un-preprocessing could be done automatically
% (than Options contains info about
% preprocessing) or manually)
% Options structure returned by pls_train (if not supplied then will
% be no preprocessing performed)
%
% OUTPUT
% Y [N -by- d_Y] the output data matrix, N samples, d_Y variables
%
% DESCRIPTION
% Applys PLS (Partial Least Squares) regression model
%
% SEE ALSO
% pls_train
% Copyright: S.Verzakov, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: pls_apply.m,v 1.1 2007/08/28 11:00:39 davidt Exp $
function Y = pls_apply(X,B,Options)
if nargin < 3
Options = [];
end
DefaultOptions.X_centering = [];
DefaultOptions.Y_centering = [];
DefaultOptions.X_scaling = [];
DefaultOptions.Y_scaling = [];
Options = pls_updstruct(DefaultOptions, Options);
[N, d_X] = size(X);
[d_XB, d_Y, M] = size(B);
if d_X ~= d_XB
error('size(X,2) must be equal to size(B,1)');
end
X = pls_prepro(X, Options.X_centering, Options.X_scaling);
Y = zeros(N,d_Y,M);
for i=1:M
Y(:,:,i) = pls_prepro(X*B(:,:,i), Options.Y_centering, Options.Y_scaling, -1);
end
return;
|
github
|
jacksky64/imageProcessing-master
|
parallel.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/parallel.m
| 6,283 |
utf_8
|
b199f4616786be7d72eee766b4fd7620
|
%PARALLEL Combining classifiers in different feature spaces
%
% WC = PARALLEL(W1,W2,W3, ....) or WC = [W1;W2;W3; ...]
% WC = PARALLEL({W1;W2;W3; ...}) or WC = [{W1;W2;W3; ...}]
% WC = PARALLEL(WC,W1,W2, ....) or WC = [WC;W2;W3; ...]
% WC = PARALELL(C);
% WC = PARALLEL(WC,N);
%
% INPUT
% W1,W2,... Base classifiers to be combined.
% WC Parallel combined classifier
% C Cell array of classifiers
% N Integer array
%
% OUTPUT
% WC Combined classifier.
%
% DESCRIPTION
% The base classifiers (or mappings) W1, W2, W3, ... defined in different
% feature spaces are combined in WC. This is a classifier defined for the
% total number of features and with the combined set of outputs. So, for three
% two-class classifiers defined for the classes 'c1' and 'c2', a dataset A is
% mapped by D = A*WC on the outputs 'c1','c2','c1','c2','c1','c2' which are
% the feature labels of D. Note that classification by LABELD(D) finds for
% each vector in D the feature label of the column with the maximum value.
% This is equivalent to using the maximum combiner MAXC.
%
% Other fixed combining rules like PRODC, MEANC, and VOTEC can be applied
% by D = A*WC*PRODC etc. A trained combiner like FISHERC has to be supplied
% with the appropriate training set by AC = A*WC; VC = AC*FISHERC. So the
% expression VC = A*WC*FISHERC yields a classifier and not a dataset as with
% fixed combining rules. This classifier operates in the intermediate feature
% space, the output space of the set of base classifiers. A new dataset B has
% to be mapped to this intermediate space first by BC = B*WC before it can be
% classified by D = BC*VC. As this is equivalent to D = B*WC*VC, the total
% trained combiner is WTC = WC*VC = WC*A*WC*FISHERC. To simplify this procedure
% PRTools executes the training of a combined classifier by
% WTC = A*(WC*FISHERC) as WTC = WC*A*WC*FISHERC.
%
% In order to allow for training an untrained parallel combined classifier by
% A*WC the subsets of the features of A that apply for the individual base
% classifiers of WC should be known to WC. This is facilitated by the
% call WC = PARALLEL(WC,N), in which N is an array of integers, such that
% sum(N) equals the feature size of A.
%
% SEE ALSO
% MAPPINGS, DATASETS, MAXC, MINC, MEANC, MEDIANC, PRODC, FISHERC, STACKED
%
% EXAMPLES
% See PREX_COMBINING.
% Copyright: R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: parallel.m,v 1.4 2009/03/18 16:17:59 duin Exp $
function out = parallel (varargin)
prtrace(mfilename);
% If there are no arguments, just return an empty map.
if (nargin == 0)
out = mapping(mfilename,'combiner');
return
end
% If there is one argument, assume it is a mapping or cell array of these.
if (nargin == 1)
v = varargin{1};
if (~iscell(v))
ismapping(v); % Assert that V is a single mapping.
out = mapping('parallel',getmapping_type(v),{v},getlabels(v));
out = set(out,'size',getsize(v));
else
% If V is a cell array of mappings, unpack it and call this function
% for each cell.
out = feval(mfilename,v{:});
end
return
end
if ( nargin == 2 & ( isparallel(varargin{1}) | iscell(varargin{1}) ) ...
& ~ismapping(varargin{2}) )
% special case, store dataset sizes
if iscell(varargin{1})
w = parallel(varargin{1});
else
w = varargin{1};
end
n = varargin{2};
if length(w.data) ~= length(n)
error('Wrong number of classsizes')
end
w.data = [w.data {n}];
out = w;
% If there are multiple arguments and the first is not a dataset,
% combine the supplied mappings.
% elseif ((nargin > 2) | ~isa(varargin{1},'dataset'))
elseif (~(isa(varargin{1},'dataset')))
v1 = varargin{1};
if (isempty(v1) & ~ismapping(v1))
start = 3; v1 = varargin{2};
else
start = 2;
end
ismapping(v1); % Assert that V1 is a mapping.
[k,n] = size(v1); % Extract V1's characteristics.
labels = getlabels(v1);
type = getmapping_type(v1);
if (~strcmp(getmapping_file(v1),mfilename))
% If V1 is not already a parallel combiner, make it into a cell array
% of mappings.
v = {v1};
else
% V1 is already a parallel combiner: get the mapping data.
v = getdata(v1);
end
% Now concatenate all base classifiers as cells in V.
for j = start:nargin
v2 = varargin{j};
if j==nargin & ~ismapping(v2)
v = [v {v2}];
else
if (~strcmp(type,getmapping_type(v2)))
error('mappings should be of equal type')
end
v = [v {v2}];
k = k + size(v2,1);
n = n + size(v2,2);
labels = [labels; getlabels(v2)];
end
end
if length(v) == 1
out = v{1}; % just one mapping left: return it
else
out = mapping('parallel',type,v,labels,k,n); % Construct the combined mapping.
end
else
% Execution: dataset * parallel_mapping.
a = varargin{1};
if nargin > 2 & ~ismapping(varargin{end})
v = parallel(varargin(2:end-1),varargin{end});
elseif nargin > 2
v = parallel(varargin(2:end));
else
v = varargin{2};
end
ismapping(v); % Assert that V is a mapping.
out = []; n = 0;
if isuntrained(v)
if ismapping(v.data{end})
error(['Training of parallel combined untrained classifier not possible.' ...
newline 'Feature sizes should be stored in the classifier first.'])
end
s = v.data{end};
if sum(s) ~= size(a,2)
error('Feature size of dataset does not match with classifier')
end
v.data = v.data(1:end-1);
r = length(v.data);
t = sprintf('Training %i base classifiers: ',r);
prwaitbar(r,t)
for j=1:r
prwaitbar(r,j,[t getname(v{j})]);
N = [n+1:n+s(j)]; % to features indexed by N.
n = n + s(j);
w = a(:,N)*v{j};
out = [out; w];
end
prwaitbar(0)
else
for j = 1:length(v.data)
sz = size(v{j},1); % Classifier V{J} is applied
N = [n+1:n+sz]; % to features indexed by N.
b = a(:,N)*v{j};
if (size(v{j},2) == 1) % Restore 2D outputs for k->1 classifiers.
b = b(:,1);
end
n = n + sz;
out = [out b]; % Concatenate mapped data.
end
end
end
return
|
github
|
jacksky64/imageProcessing-master
|
im_fill_norm.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/im_fill_norm.m
| 1,152 |
utf_8
|
fcd880b1620e4cf27165ee515172ac83
|
%IM_FILL_NORM Fill and normalize image for display puproses
%
% B = IM_FILL_NORM(A,N,BACKGROUND)
%
%Low level routine for the DATAFILE/SHOW command to display non-square
%images of the datafile A, inside square of NxN pixels. Empty areas are
%filled with gray.
%Empty parts of images are given the value BACKGROUND (default: gray (0.5));
function b = im_fill_norm(a,n,background)
if isa(a,'dataset')
isobjim(a);
if isdatafile(a) & ~isempty(getuser(a,'number_bands_differs'))
a = band2obj(a);
outsize = 1;
else
outsize = [n n getfeatsize(a,3)];
end
% outsize = [n n getfeatsize(a,3)];
b = filtim(a,mfilename,{n,background},outsize);
else
a = double(a);
[x,y,p] = size(a);
mx = max(a(:));
mn = min(a(:));
%b = ones(n,n,p);
if x == 1
b = imresize(a,[1 n]);
elseif x > y
a = imresize(a,max(round(n*[x,y]/x),[1,1]));
k = size(a,2);
s = floor((n-k)/2)+1;
b = background*ones(n,n,p);
b(:,s:s+k-1,:) = a;
else
a = imresize(a,max(round(n*[x,y]/y),[1,1]));
k = size(a,1);
s = floor((n-k)/2)+1;
b = 0.5*ones(n,n,p);
b(s:s+k-1,:,:) = a;
end
b = (b - mn)/(mx-mn+eps);
end
|
github
|
jacksky64/imageProcessing-master
|
isfeatim.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/isfeatim.m
| 621 |
utf_8
|
9e19b7be2892fcb9bd5d884c251c94c5
|
%ISFEATIM
%
% N = ISFEATIM(A);
%
% INPUT
% A Input dataset
%
% OUTPUT
% N 1/0 if dataset A does/doesn't contain images
%
% DESCRIPTION
% True if dataset contains features that are images.
%
% SEE ALSO
% ISDATASET, ISMAPPING, ISDATAIM
% $Id: isfeatim.m,v 1.2 2006/03/08 22:06:58 duin Exp $
function n = isfeatim(a)
prtrace(mfilename);
% When the field objsize contains a vector instead of a scalar, the
% features inside the dataset are images:
n = isa(a,'dataset') & length(a.objsize) > 1;
if (nargout == 0) & (n == 0)
error([newline '---- Dataset with feature images expected -----'])
end
return;
|
github
|
jacksky64/imageProcessing-master
|
pls_prepro.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/pls_prepro.m
| 1,715 |
utf_8
|
d87b6dd6fe49928ae30c51efbc000dbc
|
% [X,centering,scaling] = pls_prepro(X,centering,scaling, flag)
function [X,centering,scaling] = pls_prepro(X,centering,scaling, flag)
% Copyright: S.Verzakov, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
if nargin<4
flag = 1;
end
[N,d] = size(X);
centering = centering(:).';
scaling = scaling(:).';
if flag >= 0
if length(centering) == 1
if isnan(centering)
centering = mean(X,1);
X = X - repmat(centering, [N,1]);
else
X = X - centering;
end
elseif length(centering) == d
idx = find(isnan(centering));
centering(idx) = mean(X(:,idx),1);
X = X - repmat(centering, [N,1]);
end
if length(scaling) == 1
if isnan(scaling)
scaling = std(X,0,1);
idx0 = find(scaling == 0);
scaling(idx0) = 1;
warning(['features ' num2str(idx(:)) ' have std = 0 and are not scaled']);
X = X ./ repmat(scaling, [N,1]);
else
X = X / scaling;
end
elseif length(scaling) == d
idx = find(isnan(scaling));
scaling(idx) = std(X(:,idx),0,1);
idx0 = find(scaling(idx) == 0);
scaling(idx(idx0)) = 1;
warning(['features ' num2str(idx(idx0(:))) ' have std = 0 and are not scaled']);
X = X ./ repmat(scaling, [N,1]);
end
else
if length(centering) > 0 & all(~isnan(centering))
if length(centering) == 1
X = X + centering;
elseif length(centering) == d
X = X + repmat(centering, [N,1]);
end
end
if length(scaling) > 0 & all(~isnan(scaling))
if length(scaling) == 1
X = X * scaling;
elseif length(scaling) == d
X = X .* repmat(scaling, [N,1]);
end
end
end
return
|
github
|
jacksky64/imageProcessing-master
|
clevalf.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/clevalf.m
| 4,409 |
utf_8
|
cf5e535696e36f4d01d8c9746d141c1a
|
%CLEVALF Classifier evaluation (feature size curve)
%
% E = CLEVALF(A,CLASSF,FEATSIZES,LEARNSIZE,NREPS,T,TESTFUN)
%
% INPUT
% A Training dataset.
% CLASSF The untrained classifier to be tested.
% FEATSIZES Vector of feature sizes (default: all sizes)
% LEARNSIZE Number of objects/fraction of training set size
% (see GENDAT)
% NREPS Number of repetitions (default: 1)
% T Independent test dataset (optional)
% TESTFUN Mapping,evaluation function (default classification error)
%
% OUTPUT
% E Structure with results
% See PLOTE for a description
%
% DESCRIPTION
% Generates at random for all feature sizes stored in FEATSIZES training
% sets of the given LEARNSIZE out of the dataset A. See GENDAT for the
% interpretation of LEARNSIZE. These are used for training the untrained
% classifier CLASSF. The result is tested by all unused ojects of A, or,
% if given, by the test dataset T. This is repeated N times. If no testset
% is given and if LEARNSIZE is not given or empty, the training set is
% bootstrapped. If a testset is given, the default training set size is
% the entire training set. Default FEATSIZES: all feature sizes.
% The mean erors are stored in E. The observed standard deviations are
% stored in S. The default test routine is classification error estimation
% by TESTC([],'crisp').
%
% This function uses the RAND random generator and thereby reproduces only
% if its seed is saved and reset.
%
% SEE ALSO
% MAPPINGS, DATASETS, CLEVAL, CLEVALB, TESTC, PLOTE, GENDAT
% Copyright: R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: clevalf.m,v 1.8 2008/07/03 09:05:50 duin Exp $
function e = clevalf(a,classf,featsizes,learnsize,n,Tset,testfun)
prtrace(mfilename);
[m,k] = size(a);
if (nargin < 7) | isempty(testfun)
testfun = testc([],'crisp');
end;
if nargin < 6, Tset = []; end
if nargin < 5, n = 1; end;
if nargin < 4, learnsize = []; end
if nargin < 3 | isempty(featsizes), featsizes = [1:k]; end
if isdataset(classf) & ismapping(a) % correct for old order
dd = a; a = classf; classf = {dd};
end
if isdataset(classf) & iscell(a) & ismapping(a{1}) % correct for old order
dd = a; a = classf; classf = dd;
end
if ~iscell(classf), classf = {classf}; end
isdataset(a);
ismapping(classf{1});
if ~isempty(Tset), isdataset(Tset); T = Tset; end
[m,k,c] = getsize(a);
featsizes(find(featsizes > k)) = [];
featsizes = featsizes(:)';
if length(learnsize) > 1 & length(learnsize) ~= c
error('Learnsize should be scalar or a vector with length equal to the class size')
end
r = length(classf(:));
e.error = zeros(r,length(featsizes));
e.std = zeros(r,length(featsizes));
e.xvalues = featsizes;
e.n = n;
datname = getname(a);
if ~isempty(datname)
e.title = ['Feature curve for ' getname(a)];
end
e.xlabel= 'Feature size';
if n > 1
e.ylabel= ['Averaged error (' num2str(n) ' experiments)'];
else
e.ylabel = 'Error';
end
if featsizes(end)/featsizes(1) > 20
e.plot = 'semilogx';
end
e.names = [];
s1 = sprintf('clevalf: %i classifiers: ',r);
prwaitbar(r,s1);
e1 = zeros(n,length(featsizes));
seed = rand('state');
% loop over all classifiers
for q = 1:r
isuntrained(classf{q});
name = getname(classf{q});
prwaitbar(r,q,[s1 name]);
e.names = char(e.names,name);
e1 = zeros(n,length(featsizes));
rand('state',seed); % take care that classifiers use same training set
seed2 = rand('state');
s2 = sprintf('clevalf: %i repetitions: ',n);
prwaitbar(n,s2);
for i = 1:n
prwaitbar(n,i,[s2 int2str(i)]);
rand('state',seed2);
if isempty(Tset)
[b,T] = gendat(a,learnsize);
elseif ~isempty(learnsize)
b = gendat(a,learnsize);
else
b = a;
end
seed2 = rand('state');
nfeatsizes = length(featsizes);
s3 = sprintf('clevalf: %i feature sizes: ',nfeatsizes);
prwaitbar(nfeatsizes,s2);
for j=1:nfeatsizes
f = featsizes(j);
prwaitbar(nfeatsizes,j,[s3 int2str(j) ' (' int2str(f) ')']);
e1(i,j) = T(:,1:f)*(b(:,1:f)*classf{q})*testfun;
end
prwaitbar(0)
end
prwaitbar(0)
e.error(q,:) = mean(e1,1);
if n == 1
e.std(q,:) = zeros(1,size(e.std,2));
else
e.std(q,:) = std(e1)/sqrt(n);
end
end
prwaitbar(0)
e.names(1,:) = [];
return
|
github
|
jacksky64/imageProcessing-master
|
distm.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/distm.m
| 2,392 |
utf_8
|
86750607d43a524f999ca5c5de2345f8
|
%DISTM Compute square Euclidean distance matrix
%
% D = DISTM(A,B)
% D = DISTM(A);
% D = A*DISTM
%
% INPUT
% A,B Datasets or matrices; B is optional, default B = A
%
% OUTPUT
% D Square Euclidean distance dataset or matrix
%
% DESCRIPTION
% Computation of the square Euclidean distance matrix D between two
% sets A and B. If A has M objects and B has N objects, then D is
% [M x N]. If A and B are datasets, then D is a dataset as well with
% the labels defined by the labels of A and the feature labels defined
% by the labels of B.
%
% Unlabeled objects in B are neglected, unless B is entirely unlabeled.
%
% If A is not a dataset, but a matrix of doubles then D is also not a
% dataset, but a set of doubles.
%
% NOTE
% DISTM(A,B) is equivalent to A*PROXM(B,'d',2)).
%
% SEE ALSO
% DATASETS, PROXM
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: distm.m,v 1.8 2009/11/27 08:53:00 duin Exp $
function D = distm(A,B)
prtrace(mfilename);
if nargin == 0
D = mapping(mfilename,'fixed');
return
end
if nargin < 2
B = A;
end
B = cdats(B,1); % ???? why just labeled objects in B ?????
[ma,ka] = size(A);
[mb,kb] = size(B);
if (ka ~= kb)
error('Feature sizes should be equal')
end
if isdatafile(A)
D = zeros(ma,mb);
next = 1;
while next > 0
[a,next,J] = readdatafile(A,next);
D(J,:) = +distm(a,B);
end
elseif isdatafile(B)
D = zeros(ma,mb);
next = 1;
while next > 0 % we need another version of readdatafile here, as due
[b,next,J] = readdatafile2(B,next); % to persistent variables double
D(:,J) = +distm(A,b); % looping can not be handled correctly
end
else % A and B are not datafiles
% The order of operations below is good for the accuracy.
D = ones(ma,1)*sum(B'.*B',1);
D = D + sum(A.*A,2)*ones(1,mb);
D = D - 2 .* (+A)*(+B)';
J = find(D<0); % Check for a numerical inaccuracy.
D(J) = zeros(size(J)); % D should be nonnegative.
if ((nargin < 2) & (ma == mb)) % take care of symmetric distance matrix
D = (D + D')/2;
D([1:ma+1:ma*ma]) = zeros(1,ma);
end
end
if isa(A,'dataset') % set object and feature labels
if isa(B,'dataset')
D = setdata(A,D,getlab(B));
else
D = setdata(A,D);
end
end
return
|
github
|
jacksky64/imageProcessing-master
|
svo.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/svo.m
| 5,695 |
utf_8
|
a9b5f6ada2a4fc27d55d2577eb06d58f
|
%SVO Support Vector Optimizer
%
% [V,J,C,NU] = SVO(K,NLAB,C,OPTIONS)
%
% INPUT
% K Similarity matrix
% NLAB Label list consisting of -1/+1
% C Scalar for weighting the errors (optional; default: 1)
% OPTIONS
% .PD_CHECK force positive definiteness of the kernel by adding a small constant
% to a kernel diagonal (default: 1)
% .BIAS_IN_ADMREG it may happen that bias of svc (b term) is not defined, then
% if BIAS_IN_ADMREG == 1, b will be taken from the midpoint of its admissible
% region, otherwise (BIAS_IN_ADMREG == 0) the situation will be considered
% as an optimization failure and treated accordingly (deafault: 1)
% .PF_ON_FAILURE if optimization is failed (or bias is undefined and BIAS_IN_ADMREG is 0)
% and PF_ON_FAILURE == 1, then Pseudo Fisher classifier will be computed,
% otherwise (PF_ON_FAILURE == 0) an error will be issued (default: 1)
%
% OUTPUT
% V Vector of weights for the support vectors
% J Index vector pointing to the support vectors
% C C which was actually used for optimization
% NU NU parameter of SVC_NU algorithm, which gives the same classifier
%
% DESCRIPTION
% A low level routine that optimizes the set of support vectors for a 2-class
% classification problem based on the similarity matrix K computed from the
% training set. SVO is called directly from SVC. The labels NLAB should indicate
% the two classes by +1 and -1. Optimization is done by a quadratic programming.
% If available, the QLD function is used, otherwise an appropriate Matlab routine.
%
% SEE ALSO
% SVC
% Copyright: D.M.J. Tax, D. de Ridder, R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: svo.m,v 1.6 2010/02/08 15:29:48 duin Exp $
function [v,J,C,nu] = svo(K,y,C,Options)
prtrace(mfilename);
if nargin < 4
Options = [];
end
DefOptions.pd_check = 1;
DefOptions.bias_in_admreg = 1;
DefOptions.pf_on_failure = 1;
Options = updstruct(DefOptions, Options,1);
if nargin < 3 | isempty(C)
prwarning(3,'The regularization parameter C is not specified, assuming 1.');
C = 1;
end
vmin = 1e-9; % Accuracy to determine when an object becomes the support object.
vmin1 = min(1,C)*vmin; % controls if an object is the support object
vmin2 = C*vmin; % controls if a support object is the boundary support object
% Set up the variables for the optimization.
n = size(K,1);
D = (y*y').*K;
f = -ones(1,n);
A = y';
b = 0;
lb = zeros(n,1);
ub = repmat(C,n,1);
p = rand(n,1);
D = (D+D')/2; % guarantee symmetry
% Make the kernel matrix K positive definite.
if Options.pd_check
i = -30;
while (pd_check (D + (10.0^i) * eye(n)) == 0)
i = i + 1;
end
if (i > -30),
prwarning(2,'K is not positive definite. The kernel is regularized by adding 10.0^(%d)*I',i);
end
i = i+2;
D = D + (10.0^(i)) * eye(n);
end
% Minimization procedure initialization:
% 'qp' minimizes: 0.5 x' K x + f' x
% subject to: Ax <= b
%
if (exist('qld') == 3)
v = qld (D, f, -A, b, lb, ub, p, 1);
elseif (exist('quadprog') == 2)
prwarning(1,'QLD not found, the Matlab routine QUADPROG is used instead.')
opt = optimset; opt.LargeScale='off'; opt.Display='off';
v = quadprog(D, f, [], [], A, b, lb, ub,[],opt);
else
prwarning(1,'QLD not found, the Matlab routine QP is used instead.')
verbos = 0;
negdef = 0;
normalize = 1;
v = qp(D, f, A, b, lb, ub, p, 1, verbos, negdef, normalize);
end
try
% check if the optimizer returned anything
if isempty(v)
error('Optimization did not converge.');
end
% Find all the support vectors.
J = find(v > vmin1);
Jp = J(y(J) == 1);
Jm = J(y(J) == -1);
% Sanity check: there are support objects from both classes
if isempty(J)
error('There are no support objects.');
elseif isempty(Jp)
error('There are no support objects from the positive class.');
elseif isempty(Jm)
error('There are no support objects from the negative class.');
end
% compute nu parameter
nu = sum(v(J),1)/(C*n);
% Find the SV on the boundary
I = find((v > vmin1) & (v < C-vmin2));
% Include class information into object weights
v = y.*v;
% There are boundary support objects we can use them to find a bias term
if ~isempty(I)
b = mean(y(I)-K(I,J)*v(J));
elseif Options.bias_in_admreg
% There are no boundary support objects
% We try to put the bias into the middle of admissible region
% non SV
J0 = (1:n)';
J0(J) = [];
J0p = J0(y(J0) == 1);
J0m = J0(y(J0) == -1);
% Jp and Jm are all margin errors
lb = max(y([J0p;Jm]) - K([J0p;Jm],J)*v(J));
ub = min(y([J0m;Jp]) - K([J0m;Jp],J)*v(J));
if lb > ub
error('The admissible region of the bias term is empty.');
end
prwarning(2,['The bias term is undefined. The midpoint of its admissible region is used.']);
b = (lb+ub)/2;
else
error('The bias term is undefined.');
end
v = [v(J); b];
catch
err.message = '##';
lasterror(err); % avoid problems with prwaitbar
if Options.pf_on_failure
prwarning(1,[lasterr ' Pseudo-Fisher is computed instead.']);
n = size(K,1);
%v = prpinv([K ones(n,1)])*y;
v = prpinv([K ones(n,1); ones(1,n) 0])*[y; 0];
J = [1:n]';
nu = nan;
else
rethrow(lasterror);
end
end
return;
|
github
|
jacksky64/imageProcessing-master
|
prcursor.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/prcursor.m
| 1,012 |
utf_8
|
f0ffc26e1d4b1085e240482c53ee31cb
|
%PRCURSOR Show object ident.
%
% PRCURSOR(H)
%
% Enable the datacursor in a scatterplot. This can be used to
% investigate the object identifier by clicking on the object.
% Copyright: D.M.J. Tax, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function out = prcursor(h,event_obj)
% This file has to tasks:
% 1. to setup the datacursor (by enabling it and setting the callback
% function)
% 2. to provide the callback when a user pressed on an object
if ~exist('datacursormode','file')
error('MATLAB 7.0 or newer is required (datacursormode.m is not available).');
end
if nargin ~=2
% we are doing the setup:
if nargin<1
h = gcf;
end
dh = datacursormode(h);
set(dh,'enable','on','updatefcn',@prcursor);
else
% we are doing the callback:
nr = get(event_obj,'dataindex');
ud = get(get(event_obj,'target'),'UserData');
if ~isempty(ud) & isfield(ud,'ident')
nr = ud.ident(nr);
end
out = sprintf('obj. %d',nr);
end
return
|
github
|
jacksky64/imageProcessing-master
|
clevalb.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/clevalb.m
| 5,779 |
utf_8
|
49bc4933f35f2f156671760c1c794679
|
%CLEVALB Classifier evaluation (learning curve), bootstrap version
%
% E = CLEVALB(A,CLASSF,TRAINSIZES,N)
%
% INPUT
% A Training dataset
% CLASSF Classifier to evaluate
% TRAINSIZES Vector of class sizes, used to generate subsets of A
% (default [2,3,5,7,10,15,20,30,50,70,100])
% NREPS Number of repetitions (default 1)
%
% OUTPUT
% E Error structure (see PLOTE)
%
% DESCRIPTION
% Generates at random, for all class sizes defined in TRAINSIZES, training
% sets out of the dataset A and uses these for training the untrained
% classifier CLASSF. CLASSF may also be a cell array of untrained
% classifiers; in this case the routine will be run for all of them. The
% resulting trained classifiers are tested on all objects in A. This
% procedure is then repeated N times.
%
% Training set generation is done "with replacement" and such that for each
% run the larger training sets include the smaller ones and that for all
% classifiers the same training sets are used.
%
% If CLASSF is fully deterministic, this function uses the RAND random
% generator and thereby reproduces if its seed is reset (see RAND).
% If CLASSF uses RANDN, its seed may have to be set as well.
%
% Use FID = 1 to report progress to the command window.
%
% EXAMPLES
% See PREX_CLEVAL.
%
% SEE ALSO
% MAPPINGS, DATASETS, CLEVALB, TESTC, PLOTE
% Copyright: R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: clevalb.m,v 1.4 2008/07/03 09:05:50 duin Exp $
function e = clevalb(a,classf,learnsizes,nreps,fid)
prtrace(mfilename)
% use of fid is outdated
if (nargin < 4)
prwarning(2,'number of repetitions not specified, assuming NREPS = 1');
nreps = 1;
end;
if (nargin < 3)
prwarning(2,'vector of training set class sizes not specified, assuming [2,3,5,7,10,15,20,30,50,70,100]');
learnsizes = [2,3,5,7,10,15,20,30,50,70,100];
end;
% If a single mapping is given, convert it to a 1 x 1 cell array.
if (ismapping(classf)), classf = {classf}; end
% Correct for old argument order.
if (isdataset(classf)) & (ismapping(a))
tmp = a; a = classf; classf = {tmp};
end
if (isdataset(classf)) & (iscell(a)) & (ismapping(a{1}))
tmp = a; a = classf; classf = tmp;
end
if ~iscell(classf), classf = {classf}; end
% Assert that all is right.
isdataset(a); ismapping(classf{1});
% Remove requested class sizes that are larger than the size of the
% smallest class.
mc = classsizes(a); [m,k,c] = getsize(a);
toolarge = find(learnsizes >= min(mc));
if (~isempty(toolarge))
prwarning(2,['training set class sizes ' num2str(learnsizes(toolarge)) ...
' larger than the minimal class size in A; removed them']);
learnsizes(toolarge) = [];
end
learnsizes = learnsizes(:)';
% Fill the error structure.
nw = length(classf(:));
datname = getname(a);
e.n = nreps;
e.error = zeros(nw,length(learnsizes));
e.std = zeros(nw,length(learnsizes));
e.xvalues = learnsizes(:)';
e.names = [];
e.xlabel = 'Training set size';
if (nreps > 1)
e.ylabel= ['Averaged error (' num2str(nreps) ' experiments)'];
elseif (nreps == 1)
e.ylabel = 'Error';
else
error('Number of repetitions NREPS should be >= 1.');
end;
if (~isempty(datname))
e.title = ['Bootstrapped learning curve on ' datname];
end
if (learnsizes(end)/learnsizes(1) > 20)
e.plot = 'semilogx'; % If range too large, use a log-plot for X.
end
% Report progress.
s1 = sprintf('clevalb: %i classifiers: ',nw);
prwaitbar(nw,s1);
% Store the seed, to reset the random generator later for different
% classifiers.
seed = rand('state');
% Loop over all classifiers (with index WI).
for wi = 1:nw
isuntrained(classf{wi});
name = getname(classf{wi});
e.names = char(e.names,name);
prwaitbar(nw,wi,[s1 name]);
% E1 will contain the error estimates.
e1 = zeros(nreps,length(learnsizes));
% Take care that classifiers use same training set.
rand('state',seed); seed2 = seed;
% For NREPS repetitions...
s2 = sprintf('cleval: %i repetitions: ',nreps);
prwaitbar(nreps,s2);
for i = 1:nreps
prwaitbar(nreps,i,[s2 int2str(i)]);
% Store the randomly permuted indices of samples of class CI to use in
% this training set in JR(CI,:).
JR = zeros(c,max(learnsizes));
for ci = 1:c
JC = findnlab(a,ci);
% Necessary for reproducable training sets: set the seed and store
% it after generation, so that next time we will use the previous one.
rand('state',seed2);
R = ceil(rand(1,max(learnsizes))*length(JC));
JR(ci,:) = JC(R)';
seed2 = rand('state');
end
li = 0; % Index of training set.
nlearns = length(learnsizes);
s3 = sprintf('cleval: %i sizes: ',nlearns);
prwaitbar(nreps,s3);
for j = nlearns
nj = learnsizes(j);
prwaitbar(nlearns,j,[s3 int2str(j) ' (' int2str(nj) ')']);
li = li + 1;
% J will contain the indices for this training set.
J = [];
for ci = 1:c
J = [J;JR(ci,1:j)'];
end;
% Train classifier CLASSF{WI} on this training set and calculate
% error.
W = a(J,:)*classf{wi};
e1(i,li) = testc(a,W);
end
prwaitbar(0);
end
prwaitbar(0);
% Calculate average error and standard deviation for this classifier
% (or set the latter to zero if there's been just 1 repetition).
e.error(wi,:) = mean(e1,1);
if (nreps == 1)
e.std(wi,:) = zeros(1,size(e.std,2));
else
e.std(wi,:) = std(e1)/sqrt(nreps);
end
end
prwaitbar(0);
% The first element is the empty string [], remove it.
e.names(1,:) = [];
return
|
github
|
jacksky64/imageProcessing-master
|
klms.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/klms.m
| 1,499 |
utf_8
|
c5280fd52bab9dc81ed54a1061e8b099
|
%KLMS Karhunen Loeve Mapping, followed by scaling
%
% [W,FRAC] = KLMS(A,N)
% [W,N] = KLMS(A,FRAC)
%
% INPUT
% A Dataset
% N or FRAC Number of dimensions (>= 1) or fraction of variance (< 1)
% to retain; if > 0, perform PCA; otherwise MCA. Default: N = inf.
%
% OUTPUT
% W Affine Karhunen-Loeve mapping
% FRAC or N Fraction of variance or number of dimensions retained.
%
% DESCRIPTION
% First a Karhunen Loeve Mapping is performed (i.e. PCA or MCA on the average
% prior-weighted class covariance matrix). The result is scaled by the mean
% class standard deviations. For N and FRAC, see KLM.
%
% Default N: select all ('pre-whiten' the average covariance matrix, i.e.
% orthogonalize and scale). The resulting mapping has a unit average
% covariance matrix.
%
% SEE ALSO
% MAPPINGS, DATASETS, KLM, PCA
% Copyright: R.P.W. Duin, [email protected]
% Faculty of Applied Physics, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: klms.m,v 1.2 2006/03/08 22:06:58 duin Exp $
function [w,truefrac] = klms(a,n)
prtrace(mfilename);
if (nargin < 2), n = []; end;
if (nargin < 1) | (isempty(a))
w = mapping('klms',n);
w = setname(w,'Scaled KL Mapping');
return
end
[w,truefrac] = klm(a,n); % Calculate KL mapping
b = a*w; % Combine KL mapping with scaling on
w = w*scalem(b,'c-variance'); % KL-mapped data
w = setname(w,'Scaled KL Mapping');
return
|
github
|
jacksky64/imageProcessing-master
|
knn_map.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/knn_map.m
| 3,533 |
utf_8
|
92e336b19065af64fd9f274ee288a896
|
%KNN_MAP Map a dataset on a K-NN classifier
%
% F = KNN_MAP(A,W)
%
% INPUT
% A Dataset
% W K-NN classifier trained by KNNC
%
% OUTPUT
% F Posterior probabilities
%
% DESCRIPTION
% Maps the dataset A by the K-NN classifier W on the [0,1] interval for
% each of the classes that W is trained on. The posterior probabilities,
% stored in F, are computed in the following ways:
% soft labeled training set: the normalised average of the soft labels
% of the K neighbors.
% crisp labeled training set, K = 1: normalisation of sigm(log(F)) with
% F(1:C) = sum(NN_Dist(1:C))./NN_Dist(1:C) - 1
% in which C is the number of classes and NN_Dist stores
% the distance to the nearest neighbor of each class.
% crisp labeled training set, K > 1: normalisation of
% (N(1:C) + 1)/(K+C), in which N stores the number of
% objects per class within the K first neighbors.
%
% This routine is called automatically to determine A*W if W is trained
% by KNNC.
%
% Warning: Class prior probabilities in the dataset A are neglected.
%
% SEE ALSO
% MAPPINGS, DATASETS, KNNC, TESTK
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: knn_map.m,v 1.3 2007/06/19 11:44:14 duin Exp $
function F = knn_map(T,W)
prtrace(mfilename);
% Get the training data and parameters from the mapping:
data = getdata(W);
a = data{1};
knn = data{2};
[m,k,c] = getsize(a);
nlab = getnlab(a);
% If there is no test set, then the leave-one-out is done on the
% training set (see TESTK).
if isempty(T)
T = a;
loo = 1;
else
loo = 0;
end
[mt,kt] = size(T);
if (kt ~= k), error('Wrong feature size'); end
r = classsizes(a);
[num,n] = prmem(mt,m); % Check the available memory.
F = ones(mt,c);
D = ones(mt,c);
% Loop in batches.
for i = 0:num-1
if (i == num-1)
nn = mt - num*n + n;
else
nn = n;
end
range = [i*n+1:i*n+nn];
if loo,
DD = +distm(a,a(range,:));
dmax = max(DD(:));
% Set distances to itself at INF to find later the nearest
% neighbors more easily
DD(i*n+1:m+1:i*n+nn*m) = inf*ones(1,nn);
else
DD = distm(+a,+T(range,:));
dmax = max(DD(:));
end
J = find(isnan(DD));
if length(J) > 0
DD(J) = dmax*10;
end
[DD,L] = sort(DD);
switch getlabtype(a)
case 'soft'
for j=1:c
F(range,j) = sum(reshape(a.targets(L(1:knn,:),j),knn,length(range)),1)';
end
case 'crisp'
L = reshape(nlab(L),size(L)); % Find labels.
% Find label frequencies.
for j = 1:c
F(range,j) = sum(L(1:knn,:)==j,1)';
end
otherwise
error('Illegal label type')
end
% Estimate posterior probabilities
if islabtype(a,'crisp')
if (knn >= 2) % Use Bayes estimators on frequencies.
F(range,:) = (F(range,:)+1)/(knn+c);
else % Use distances.
K = max(F(range,:)');
for j = 1:c
K = min(K,r(j));
J = reshape(find(L==j),r(j),nn); % Find the distances between
J = J(K+[0:nn-1]*r(j)); % that neighbor and other objects.
D(range,j) = DD(J)'; % Number for all classes.
end
F(range,:) = sigm(log(sum(D(range,:),2)*ones(1,c)./ ...
(D(range,:)+realmin) - 1 + realmin));
end
end
% Normalize the probabilities.
F(range,:) = F(range,:) ./ (sum(F(range,:),2)*ones(1,c));
end
if (isdataset(T))
F = setdata(T,F,getlabels(W));
end;
return;
|
github
|
jacksky64/imageProcessing-master
|
im_measure.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/im_measure.m
| 4,799 |
utf_8
|
adb1ef01ba08251a32b46ad247177e35
|
%IM_MEASURE Computation by DIP_Image of feature measurements
%
% F = IM_MEASURE(A,GRAY,FEATURES)
%
% INPUT
% A Dataset with binary object images dataset (possibly multi-band)
% GRAY Gray-valued images (matched with A, optional)
% FEATURES Features to be computed
%
% OUTPUT
% F Dataset with computed features
%
% In each image of the measurement set GRAY the features given in FEATURES
% are measured. In A a segmented version of GRAY has to be supplied.
% When no GRAY is supplied, the binary images in A are used. Only
% the largest object in each image is considered.
%
% The following features may be computed:
% 'dimension','mean','stddev','gravity','size','center','max','min',
% 'maxval','minval','feret'','inertia','ccbendingenergy'.
% Note that some features like 'mean' (mean image intensity) and 'stddev'
% (standard deviation of image intensity) are not useful for binary images.
% Run MEASUREHELP to get some information on these measures.
%
% Use FEATURES = 'all' for computing all features.
% Use MEASUREHELP for some description of the features.
%
% SEE ALSO
% DATASETS, DATAFILES, MEASURE, MEASUREHELP
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function b = im_measure(a,gray,features)
prtrace(mfilename);
isdipimage;
if nargin < 3 features = []; end
if nargin < 2 gray = []; end
%if nargin < 2 | isempty(gray), gray = a; end
if nargin < 1 | isempty(a)
b = mapping(mfilename,'fixed',{gray,features});
b = setname(b,'DIP measurements');
elseif isdataset(a)
if (nargin < 3 | isempty(features)) & ~isdataset(gray)
features = gray;
gray = a;
end
if ~isdataset(gray)
error('Binary and gray images should be both datasets')
end
fsize = getfeatsize(a);
if any(getfeatsize(gray) ~= fsize)
error('Image structures of binary and gray images should be identical')
end
if length(fsize) == 2, fsize = [fsize 1]; end
if size(a,1) ~= size(gray,1)
error('Same number of binary and gray images expected')
end
out = [];
binim = data2im(a);
grim = data2im(gray);
nim = size(a,1)*fsize(3);
s = sprintf('Measuring %i images',nim);
prwaitbar(nim,s);
for i=1:size(a,1)
for j=1:fsize(3)
prwaitbar(nim,(i-1)*fsize(3)+j);
f = feval(mfilename,binim(:,:,j,i),grim(:,:,j,i),features);
if isempty(out)
out = repmat(f(:)',[size(a,1),1,fsize(3)]);
%out = reshape(f(:)',[size(a,1),1,fsize(3)]);
else
out(i,:,j) = f;
end
end
end
prwaitbar(0);
b = setdat(a,out);
b = setfeatsize(b,[length(f),fsize(3)]);
b = setfeatlab(b,getfeaturelabels(features));
elseif isdatafile(a)
if nargin < 3 | isempty(features) & ~isdatafile(gray)
features = gray;
gray = a;
end
if ~isdatafile(gray)
error('Binary and gray images should be both datafiles')
end
b = dyadic(a,mfilename,gray,{features});
%b = setfeatlab(b,getfeaturelabels(features));
elseif isa(a,'double') | isa(a,'dip_image') % here we have a single image
if isempty(features), features = 'dimension'; end
gray = 1.0*dip_image(gray);
%labim = label(dip_image(im_select_blob(a),'bin'));
labim = label(dip_image(a,'bin'));
c = measure(labim,gray,'size',[],2);
labid = c.id;
sz = c.size;
[bb,mm] = max(sz);
labid = labid(mm);
if strcmp(features,'all')
features = {'dimension','mean','stddev','gravity',...
'size','center','max','min', 'maxval','minval',...
'feret','inertia', 'ccbendingenergy'};
end
b = measure(labim,gray,features,labid,2);
b = double(b);
else
error('Wrong input')
end
return
function names = getfeaturelabels(features)
names = {};
for i=1:length(features)
switch features{i}
case 'dimension'
names{end+1} = 'imagewidth';
names{end+1} = 'imageheight';
case 'mean'
names{end+1} = 'mean int';
case {'mean', 'sum'}
names{end+1} = 'mass';
case 'stddev'
names{end+1} = 'standard dev.';
case 'gravity'
names{end+1} = 'gravity x';
names{end+1} = 'gravity y';
case 'size'
names{end+1} = 'size';
case 'center'
names{end+1} = 'center x';
names{end+1} = 'center y';
case 'max'
names{end+1} = 'max x coord';
names{end+1} = 'max y coord';
case 'min'
names{end+1} = 'min x coord';
names{end+1} = 'min y coord';
case 'maxval'
names{end+1} = 'max int';
case 'minval'
names{end+1} = 'min int';
case 'perimeter'
names{end+1} = 'perimeter';
case 'feret'
names{end+1} = 'max diameter';
names{end+1} = 'min diameter';
names{end+1} = 'max perp. diameter';
case 'inertia'
names{end+1} = 'inertia moment 1';
names{end+1} = 'inertia moment 2';
case 'ccbendingenergy'
names{end+1} = 'bending energy perimeter';
otherwise
error('I do not know feature %s.',features{i});
end
end
return
|
github
|
jacksky64/imageProcessing-master
|
mds_stress.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/mds_stress.m
| 1,539 |
utf_8
|
9d18dc6dbb2205f7949662a11fb0146e
|
%MDS_STRESS - Sammon stress between dissimilarity matrices
%
% E = MDS_STRESS(q,Ds,D)
%
% INPUT
% q Indicator of the Sammon stress; q = -2,-1,0,1,2
% Ds Original distance matrix
% D Approximated distance matrix
%
% OUTPUT
% E Sammon stress
%
% DESCRIPTION
% Computes the Sammon stress between the original distance matrix Ds
% and the approximated distance matrix D, expressed as follows:
%
% E = 1/(sum_{i<j} Ds_{ij}^(q+2)) sum_{i<j} (Ds_{ij} - D_{ij})^2 * Ds_{ij}^q
%
%
% Copyright: Elzbieta Pekalska, Robert P.W. Duin, [email protected], 2000-2003
% Faculty of Applied Sciences, Delft University of Technology
%
function [e,alpha] = mds_stress (q,Ds,D,isratio)
if nargin < 4
isratio = 0;
end
[m,k] = size(Ds);
if any(size(D) ~= size(Ds)),
error ('The sizes of matrices do not match.');
end
mk = m*k;
D = +D;
Ds = +Ds;
% I is the index of non-zero (> eps) values to be included
% for the computation of the stress
I = 1:mk;
nanindex = find(isnan(Ds(:)) | isnan(D(:)));
if ~isempty(nanindex),
I(nanindex) = [];
end
O = [];
if m == k & (length(intersect(find(D(:) < eps), 1:m+1:(mk))) == m),
O = 1:m+1:mk;
Ds(O) = 1;
D (O) = 1;
mm = m - 1;
else
mm = k;
end
if isratio,
II = setdiff(I,O);
alpha = sum((Ds(II).^q).*D(II).^2)/sum((Ds(II).^(q+1)).*D(II));
Ds = alpha*Ds;
else
alpha = 1;
end
c = sum(Ds(I).^(q+2)) - length(O);
if q ~= 0,
e = sum(Ds(I).^q .* ((Ds(I)-D(I)).^2))/c;
else
e = sum(((Ds(I)-D(I)).^2))/c;
end
return;
|
github
|
jacksky64/imageProcessing-master
|
closemess.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/closemess.m
| 414 |
utf_8
|
74b3dc0ce1e07c2c0a6f04f464762113
|
%CLOSEMESS Close progress message
%
% CLOSEMESS(FID,N)
%
% Closes a progress message of length N on file-id FID
%
% This routine is obsolete now and just preserved to get
% old code running.
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function closemess(fid,n)
prprogress(fid,'\n');
return
|
github
|
jacksky64/imageProcessing-master
|
gendatsin.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/gendatsin.m
| 1,008 |
utf_8
|
9b2a9a557eb3beb99a42a96d70edff8d
|
%GENREGSIN Generate sinusoidal regression data
%
% X = GENDATSIN(N,SIGMA)
%
% INPUT
% N Number of objects to generate
% SIGMA Standard deviation of the noise
%
% OUTPUT
% X Regression dataset
%
% DESCRIPTION
% Generate an artificial regression dataset [X,Y] with:
%
% y = sin(4x) + noise.
%
% where noise is Gaussian distributed with standard deviation sigma.
%
% X = GENDATSIN(100)
% generates 100 x,y pairs with data and default noise (sigma = 0.1).
%
% x = (0:0.01:1)';
% y = genregsin(x,0);
% generates the true function along the x-axis, with zero noise.
%
% SEE ALSO
% GENDATR, GENDATSINC
% Copyright: D.M.J. Tax, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function x = gendatsin(nrx,noise)
if nargin<2
noise = 0.1;
end
if (length(nrx)>1)
x = nrx;
nrx = size(x);
x = sin(4*x) + noise*randn(nrx);
else
x = rand(nrx,1);
y = sin(4*x) + noise*randn(nrx,1);
end
x = gendatr(x,y);
return
|
github
|
jacksky64/imageProcessing-master
|
im_gauss.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/im_gauss.m
| 1,667 |
utf_8
|
e3bfd4898cd066716bbbcc381e6e454f
|
%IM_GAUSS Gaussian filter of images stored in a dataset/datafile (Matlab)
%
% B = IM_GAUSS(A,SX,SY)
% B = A*IM_GAUSS([],SX,SY)
%
% INPUT
% A Dataset with object images dataset (possibly multi-band)
% SX Desired horizontal standard deviation for filter, default SX = 1
% SY Desired vertical standard deviation for filter, default SY = SX
%
% OUTPUT
% B Dataset/datafile with Gaussian filtered images
%
% DESCRIPTION
% All, possibly multi-band, 2D images in A are Gaussian filtered using the
% Matlab command CONV2. In case DIPImage is available, IM_GAUSSF may be
% used instead for faster processing.
%
% SEE ALSO
% DATASETS, DATAFILES, IM_GAUSSF, FILTIM
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function b = im_gauss(a,sx,sy)
prtrace(mfilename);
if nargin < 3, sy = []; end
if nargin < 2 | isempty(sx), sx = 1; end
if isempty(sy), sy = sx; end
if nargin < 1 | isempty(a)
b = mapping(mfilename,'fixed',{sx,sy});
b = setname(b,'Gaussian filter');
elseif isa(a,'dataset') % allows datafiles too
isobjim(a);
b = filtim(a,mfilename,{sx,sy});
elseif isa(a,'double') % here we have a single image
if sx == 0
fx = 1;
else
rx = round(3*sx);
fx = exp((-[-rx:1:rx].^2)/(2*sx*sx)); fx = fx/sum(fx);
end
if sy == 0
fy = 1;
else
ry = round(3*sy);
fy = exp((-[-ry:1:ry].^2)/(2*sy*sy)); fy = fy/sum(fy);
end
n = size(a,3);
b = zeros(size(a));
for j=1:n
b = conv2(fy,fx,a(:,:,j),'full');
%b(:,:,j) = conv2(fy,fx,a(:,:,j),'same');
end
end
return
|
github
|
jacksky64/imageProcessing-master
|
emclust.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/emclust.m
| 7,248 |
utf_8
|
239d82760474b6ec630b238a2a51be88
|
%EMCLUST Expectation-Maximization clustering
%
% [LABELS,W_EM] = EMCLUST (A,W_CLUST,K,LABTYPE,FID)
%
% INPUT
% A Dataset, possibly labeled
% W_CLUST Cluster model mapping, untrained (default: nmc)
% K Number of clusters (default: 2)
% LABTYPE Label type: 'crisp' or 'soft' (default: label type of A)
% FID File ID to write progress to (default [], see PRPROGRESS)
%
% OUTPUT
% LABELS Integer labels for the objects in A pointing to their cluster
% W_EM EM clustering mapping
%
% DESCRIPTION
% The untrained classifier mapping W_CLUST is used to update an initially
% labeled dataset A by iterating the following two steps:
% 1. Train W : W_EM = A*W_CLUST
% 2. Relabel A : A = dataset(A,labeld(A*W_EM*classc))
% This is repeated until the labeling does not change anymore. The final
% classification matrix is returned in B. The final crisp labeling is returned
% in LABELS. W_EM may be used for assigning new objects.
%
% If K is given, a random initialisation for K clusters is made and labels
% of A are neglected.
%
% LABTYPE determines the type of labeling: 'crisp' or 'soft'. Default: label
% type of A. It is assumed W_CLUST can handle the LABTYPE requested.
% Only in case LABTYPE is 'soft' the traditional EM algorithm is followed.
% In case LABTYPE is 'crisp' EMCLUST follows a generalised k-means
% algorithm.
%
% SEE ALSO
% MAPPINGS, DATASETS, KMEANS, PRPROGRESS
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: emclust.m,v 1.9 2009/02/03 21:07:26 duin Exp $
function [new_lab,w_em] = emclust (a,w_clust,n,type,fid)
prtrace(mfilename);
n_ini = 500; % Maximum size of subset to use for initialisation.
epsilon = 1e-6; % Stop when average labeling change drops below this.
% Check arguments.
if (nargin < 5), fid = []; end
if (nargin < 4)
prwarning(3,'No label type specified, using label type of dataset A.');
type = [];
end
if (nargin < 3) | isempty(n)
prwarning(3,'No number of clusters specified, using number of classes in A.');
n = [];
end
if (nargin < 2) | isempty(w_clust)
prwarning(2,'No clustering mapping specified, assuming NMC.');
w_clust = nmc;
end
isuntrained(w_clust); % Assert that clustering mapping is untrained.
% Determine number of clusters N and initialisation method.
a = testdatasize(a);
islabtype(a,'crisp','soft');
[m,k,c] = getsize(a);
rand_init = 1;
if (isempty(n))
if (c == 1) % For one class, find two clusters.
n = 2;
else
n = c;
rand_init = 0; % Use given classification as initialisation.
end
end
if (n < 1), error('Number of clusters should be at least one.'); end
if (n == 1), prwarning(4,'Clustering with 1 cluster is trivial.'); end
% Set label type, if given.
if ~isempty(type), a = setlabtype(a,type); end
a = setprior(a,[]); % make sure that priors will be deleted
% Initialise by performing KCENTRES on...
prwaitbar(2,'EM Clustering, initialization');
prwaitbar(2,1);
if (rand_init)
if (m > n_ini) % ... a random subset of A.
prwarning(2,'Initializing by performing KCENTRES on a subset of %d samples.', n_ini);
a_ini = +gendat(+a,n_ini);
else
prwarning(2,'Initializing by performing KCENTRES on the training set.');
a_ini = +a; % ... the entire set A.
end
not_found = 1;
itern = 0;
while(not_found)
% try to find an initialisation with all class sizes > 1
itern = itern + 1;
if itern > 100
error('Not possible to find desired number of components')
end
% add some noise to data to avoid problems
% 50 trials
assign = kcentres(+distm(a_ini.*(ones(size(a_ini))+0.001*randn(size(a_ini)))),n,50);
% Train initial classifier on labels generated by KCENTRES and find
% initial hard labels. Use NMC instead of W_CLUST to make sure that we
% always have enough data to estimate the parameters.
a_ini = dataset(a_ini,assign);
a_ini = setprior(a_ini,getprior(a_ini,0));
d = a*(a_ini*nmc);
if (islabtype(a,'soft'))
new_lab = +d;
not_found = 0;
else
new_lab = d*labeld;
if all(classsizes(dataset(d,new_lab)) > 1)
not_found = 0;
end
end
end
lablist_org = [];
else
lablist_org = getlablist(a);
a = setlablist(a,[1:c]');
new_lab = getlabels(a); % Use given labeling.
end
% Ready for the work.
iter = 0;
change = 1;
prwaitbar(2,2,'EM Clustering, EM loop')
prwaitbar(100,['using ' getname(w_clust)]);
if (islabtype(a,'soft'))
prprogress(fid,'emclust optim: iter, change (mse):\n');
prprogress(fid,' %i, %f \n',0,0);
a = setlabels(a,new_lab);
a = setprior(a,getprior(a,0));
laba = getlabels(a);
lab = new_lab;
while (change > epsilon) % EM loop, run until labeling is stable.
prwaitbar(100,100-100*exp(-iter/10));
w_em = a*w_clust; % 1. Train classifier, density output.
b = a*(w_em*classc); % 2. Assign probability to training samples.
a = settargets(a,b); % 3. Insert probabilities as new labels.
change = mean(mean((+b-lab).^2)); lab = b;
prprogress(fid,' %i, %f \n',iter,change);
iter = iter+1;
if iter > 500
prwarning(1,'emclust stopped after 500 iterations')
change = 0;
end
end
else % crisp labels
prprogress(fid,'emclust optim: iter, change (#obj), #clust:\n');
prprogress(fid,' %i, %i %i \n',0,0,0);
lab = ones(m,1);
while (any(lab ~= new_lab)) % EM loop, run until labeling is stable.
prwaitbar(100,100-100*exp(-iter/10));
a = setlabels(a,new_lab); % 0. Set labels and store old labels.
a = setprior(a,getprior(a,0));% Set priors to class frequencies
lab = new_lab; %
a = remclass(a,1); % demand class sizes > 2 objects
itern = 0;
while getsize(a,3) < n % increase number of classes if necessary
itern = itern + 1;
if itern > 100
error('Not possible to find desired number of components')
end
laba = getlablist(a);
labmax = max(laba);
N = classsizes(a);
[Nmax,cmax] = max(N); % find largest class
aa = seldat(a,cmax); % select just that one
new_lab_aa = kmeans(aa,2); % split it by kmeans
N1 = sum(new_lab_aa == 1);
N2 = sum(new_lab_aa == 2);
if (N1 > 1 & N2 > 1) % use it if both classes have more than one sample
J = findlabels(a,laba(cmax,:));
a = setlabels(a,new_lab_aa + labmax,J);
end
end
w_em = a*w_clust; % 1. Compute classifier, crisp output.
b = a*w_em; % 2. Classify training samples.
new_lab = labeld(b); % 3. Insert classification as new labels.
prprogress(fid,' %i, %i %i \n', ...
iter,length(find(lab ~= new_lab)),length(unique(new_lab)));
iter = iter+1; %DXD Added also the iter for the crisp labels
if iter > 50
prwarning(1,'emclust stopped after 50 iterations')
change = 0;
end
end
end
prwaitbar(0)
prwaitbar(0)
if ~isempty(lablist_org) % substitute original labels if desired
new_lab = lablist_org(new_lab);
wlab = getlabels(w_em);
wlab = lablist_org(wlab);
w_em = setlabels(w_em,wlab);
end
return;
|
github
|
jacksky64/imageProcessing-master
|
normal_map.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/normal_map.m
| 8,134 |
utf_8
|
77e33f0dee2b9f0e6a636ddff92e37d9
|
%NORMAL_MAP Map a dataset on normal-density classifiers or mappings
%
% F = NORMAL_MAP(A,W)
%
% INPUT
% A Dataset
% W Mapping
%
% OUTPUT
% F Density estimation for classes in A
%
% DESCRIPTION
% Maps the dataset A by the normal density based classifier or mapping W.
% For each object in A, F returns the densities for each of the classes or
% distributions stored in W. For classifiers, the densities are weighted
% by the class prior probabilities. This routine is automatically called for
% computing A*W if W is a normal density based classifier or a mapping.
%
% Use W = LOGDENS(W) (or W = W*LOGDENS) if absolute densities are not
% needed and a more accurate posterior probability is desired.
%
% SEE ALSO
% MAPPINGS, DATASETS, QDC, UDC, LDC, GAUSSM, LOGDENS
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: normal_map.m,v 1.16 2010/02/24 20:28:13 duin Exp $
function F = normal_map(varargin)
prtrace(mfilename);
if isstruct(varargin{1}) % this is a constructor call like W = normal_map(w,nlab,k,c)
[w,nlab,k,c] = deal(varargin{:});
deg = ndims(w.cov)-1; % linear or quadratic
n = size(w.mean,1); % number of components
w.det = zeros(1,n);
if (deg == 1)
H = w.cov;
if min(size(H)) == 1 % cov matrix stored as scalar or diagonal
E = 1./H;
w.det = repmat(sum(log(real(H+1e-16))),1,n) ;
else
if (prrank(H) < size(H,1))
prwarning(2,'Singular case, pseudo-inverse of the covariance matrix is used.');
E = real(prpinv(H));
else
E = real(prinv(H));
end
w.det = repmat(sum(log(real(preig(H)+1e-16)+realmin)),1,n);
end
w.cov = E;
elseif deg == 2
w.det = zeros(1,n);
for i=1:n
H = w.cov(:,:,i);
if (prrank(H) < size(H,1))
prwarning(1,'Singular case, pseudo-inverse of the covariance matrix is used.');
E = real(prpinv(H));
else
E = real(prinv(H));
end
w.cov(:,:,i) = E;
w.det(i) = sum(log(abs(preig(H)+1e-16)+realmin));
end
else
error('Illegal value for degree')
end
F = mapping(mfilename,'trained',w,nlab,k,c);
return
end
% Now we have an execution call like F = normal_map(A,W)
[A,W] = deal(varargin{:});
if isdatafile(A)
F = dataset(A*W);
return
end
w = +W; % data field of W (fields: w.mean, w.cov, w.prior, w.nlab)
% each of these data fields has a mean, cov, prior for separate
% Gaussian components. The nlab field assigns each component
% to a class.
if ~isfield(w,'det') % if det-field is not available, we are dealing with an old def
F = normal_map_old(A,W);
return
end
[k,c] = size(W); % c is number of classes
% DEG = 1 indicates a common cov. matrix and DEG = 2 - separate cov. matrices.
deg = ndims(w.cov)-1;
U = w.mean; G = w.cov; p = w.prior;
if (abs(1-sum(p)) > 1e-6)
error('Class or component probabilities do not sum to one.')
end
lablist = getlab(W);
[m,ka] = size(A);
if (ka ~= k),
error('Feature sizes of the dataset and the mapping do not match.');
end
n = size(U,1); % Number of components.
F = zeros(m,n); % Gaussian densities for each component to be computed.
if (deg == 1)
E = G;
end
% Loop over components.
for i=1:n
% Shift A such that the mean lies at the origin.
X = +A - ones(m,1)*U(i,:);
if (deg == 2)
E = G(:,:,i);
end
if min(size(E)) == 1 % diagonal or scalar of cov matrix
if max(size(E)) == 1, E = repmat(E,1,k); end
F(:,i) = -0.5*sum(X.*X.*repmat(E(:)',m,1),2) - (w.det(i) + k*log(2*pi))*0.5;
else
% Gaussian distribution for the i-th component. Take log of density to preserve tails
F(:,i) = -0.5*sum(X'.*(E*X'),1)' - (w.det(i) + k*log(2*pi))*0.5;
end
if (getout_conv(W) ~= 2) % take log of density to preserve tails
F(:,i) = exp(F(:,i));
end
end
if isfield(w,'nlab')
% For mixtures of Gaussians. Relates components to classes
nlab = w.nlab;
cc = max(w.nlab);
else
nlab = [1:c];
cc = c;
end
if (getout_conv(W) == 2)
Cmax = max(F(:)); % scale to gain accuracy in the tails
F = F - Cmax;
end
FF = zeros(m,cc);
% Loop over true classes. Weight the probabilities by the priors.
for j = 1:cc
J = find(nlab == j);
if (getout_conv(W) == 2) % take log of density to preserve tails
% difficult to get this right for MOGs, and moreover, probably not of
% much help. Anyway, we give it a try.
FF(:,j) = exp(F(:,J))*w.prior(J)';
% like in parzen_map, use in tails just largest component
L = find(FF(:,j) <= 1e-300);
N = find(FF(:,j) > 1e-300);
if ~isempty(L)
[FM,R] = max(F(L,J),[],2);
FF(L,j) = FM + log(w.prior(R)');
end
if ~isempty(N)
FF(N,j) = log(FF(N,j));
end
else
FF(:,j) = F(:,J) * w.prior(J)';
end
end
if (getout_conv(W) == 2) % scale to gain accuracy in the tails
Fmax = max(FF,[],2); % really needed!!!!
FF = FF - repmat(Fmax,1,cc);
FF = exp(FF);
else
FF = FF + realmin; % avoid devision by 0 in computing posterios later
end
if isdataset(A)
F = setdata(A,FF,lablist);
else
F = FF;
end
return;
%NORMAL_MAP_OLD Map a dataset on normal-density classifiers or mappings
% using an old classifier definition:
% cov inverse during execution
%
function F = normal_map_old(A,W)
prtrace(mfilename);
w = +W; % data field of W (fields: w.mean, w.cov, w.prior, w.nlab)
% each of these data fields has a mean, cov, prior for separate
% Gaussian components. The nlab field assigns each component
% to a class.
[k,c] = size(W); % c is number of classes
% DEG = 1 indicates a common cov. matrix and DEG = 2 - separate cov. matrices.
deg = ndims(w.cov)-1;
U = w.mean; G = w.cov; p = w.prior;
if (abs(1-sum(p)) > 1e-6)
error('Class or component probabilities do not sum to one.')
end
lablist = getlab(W);
[m,ka] = size(A);
if (ka ~= k),
error('Feature sizes of the dataset and the mapping do not match.');
end
n = size(U,1); % Number of components.
F = zeros(m,n); % Gaussian densities for each component to be computed.
if (deg == 1)
H = G;
if (prrank(H) < size(H,1))
prwarning(2,'Singular case, pseudo-inverse of the covariance matrix is used.');
E = real(prpinv(H));
else
E = real(prinv(H));
end
end
% Loop over components.
for i=1:n
% Shift A such that the mean lies at the origin.
X = +A - ones(m,1)*U(i,:);
if (deg == 2)
H = G(:,:,i);
if (prrank(H) < size(H,1))
prwarning(1,'Singular case, pseudo-inverse of the covariance matrix is used.');
E = real(prpinv(H));
else
E = real(prinv(H));
end
end
% Gaussian distribution for the i-th component. Take log of density to preserve tails
F(:,i) = -0.5*sum(X'.*(E*X'),1)' - (sum(log(real(preig(H)+1e-16)+realmin)) + k*log(2*pi))*0.5;
if (getout_conv(W) ~= 2) % take log of density to preserve tails
F(:,i) = exp(F(:,i));
end
end
if isfield(w,'nlab')
% For mixtures of Gaussians. Relates components to classes
nlab = w.nlab;
cc = max(w.nlab);
else
nlab = [1:c];
cc = c;
end
if (getout_conv(W) == 2)
Cmax = max(F(:)); % scale to gain accuracy in the tails
F = F - Cmax;
end
FF = zeros(m,cc);
% Loop over true classes. Weight the probabilities by the priors.
for j = 1:cc
J = find(nlab == j);
if (getout_conv(W) == 2) % take log of density to preserve tails
% difficult to get this right for MOGs, and moreover, probably not of
% much help. Anyway, we give it a try.
FF(:,j) = exp(F(:,J))*w.prior(J)';
% like in parzen_map, use in tails just largest component
L = find(FF(:,j) <= 1e-300);
N = find(FF(:,j) > 1e-300);
if ~isempty(L)
[FM,R] = max(F(L,J),[],2);
FF(L,j) = FM + log(w.prior(R)');
end
if ~isempty(N)
FF(N,j) = log(FF(N,j));
end
else
FF(:,j) = F(:,J) * w.prior(J)';
end
end
if (getout_conv(W) == 2)
FF = exp(FF);
else
FF = FF + realmin; % avoid devision by 0 in computing posterios later
end
if isdataset(A)
F = setdata(A,FF,lablist);
else
F = FF;
end
return;
|
github
|
jacksky64/imageProcessing-master
|
circles3d.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/circles3d.m
| 930 |
utf_8
|
bf2367f2ff9b48e17f9421654f4ea159
|
% CIRCLES3D Create a data set containing 2 circles in 3 dimensions.
%
% DATA = CIRCLES3D(N)
%
% Creates a data set containing N points in 3 dimensions.
%
% If N is a vector of sizes, exactly N(I) objects are generated
% for class I, I = 1,2.Default: N = [50 50].
%
% See also DATASETS, PRDATASETS
% Copyright: E. Pekalska, R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: circles3d.m,v 1.2 2006/03/08 22:06:58 duin Exp $
function data = circles3d(N)
prtrace(mfilename);
if nargin< 1, N = [50 50]; end
N = genclass(N,ones(1,2)/2);
n2a = N(1);
n2b = N(2);
ha = 0:(2*pi/n2a):2*pi*(n2a/(n2a+1)); ha = ha';
hb = 0:(2*pi/n2b):2*pi*(n2b/(n2b+1)); hb = hb';
a = [ sin(ha) cos(ha) zeros(n2a,1) ];
b = [ sin(hb) cos(hb) ones(n2b,1) ];
data = dataset ([a;b],genlab(N));
data = setname(data,'3D Circles');
return
|
github
|
jacksky64/imageProcessing-master
|
nodatafile.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/nodatafile.m
| 442 |
utf_8
|
63e4b7986ff24d4537c11522a30c50f2
|
%NODATAFILE Error return in case of datafile
%
% NODATAFILE
%
% Error message
%
% B = NODATAFILE(A)
% B = A*NODATAFILE
%
% Error message in case A is a datafile, otherwise B = A
function a = nodatafile(a)
if (nargin == 0 & nargout == 0) | (nargin == 1 & isdatafile(a) & nargout == 0)
error('prtools:nodatafile','Command not implemented for datafiles');
elseif nargin == 0
a = mapping(mfilename,'fixed');
else
;
end
return;
|
github
|
jacksky64/imageProcessing-master
|
gendatr.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/gendatr.m
| 784 |
utf_8
|
5282de6dceaaa18c3d45f24df4b4b109
|
%GENDATR Generation of regression data
%
% A = GENDATR(X,Y)
%
% INPUT
% X data matrix
% Y target values
%
% OUTPUT
% A regression dataset
%
% DESCRIPTION
% Generate a regression data from the data X and the target values Y.
%
% SEE ALSO
% SCATTERR, GENDATSINC
% Copyright: D.M.J. Tax, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function a = gendatr(x,y)
if nargin<2
y = 1;
end
if nargin<1
x = 0;
end
% check the sizes
[n,dim] = size(x);
if length(y)~=n
error('Size of X and Y do not match.');
end
% store it in the dataset:
a = dataset(x);
a = setlabtype(a,'targets',y);
if ~isa(x,'dataset')
fl = {};
for i=1:dim
fl{i} = sprintf('x_%d',i);
end
a = setfeatlab(a,fl);
end
return
|
github
|
jacksky64/imageProcessing-master
|
tree_map.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/tree_map.m
| 2,570 |
utf_8
|
cfb50d5529a9f524e2f717ffeeeb2533
|
%TREE_MAP Map a dataset by binary decision tree
%
% F = TREE_MAP(A,W)
%
% INPUT
% A Dataset
% W Decision tree mapping
%
% OUTPUT
% F Posterior probabilities
%
% DESCRIPTION
% Maps the dataset A by the binary decision tree classifier W on the
% [0,1] interval for each of the classes W is trained on. The
% posterior probabilities stored in F sum row-wise to one. W should
% be trained by a classifier like treec. This routine is called
% automatically to solve A*W if W is trained by treec.
%
% SEE ALSO
% mappings, datasets, treec
% Copyright: R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
function [F,lab,N] = tree_map(T,W)
prtrace(mfilename);
% N yields for each node and each class in T the fraction of objects
% in T that passes that node.
%[tree,classlist,type,k,c,v,num] = mapping(W);
% Unpack the classifier:
w = getdata(W);
tree = w{1};
num = w{2};
[k,c] = size(W);
% Classification of the data vectors in T starts with node
% num. In F the aposteriori probabilities for the classes
% of the final node are returned.
% N yields for each node and each class in T the fraction
% of objects in T that passes that node.
% lab returns for each data vector the column for which F is
% maximum.
% tree(n,1) - feature number to be used in node n
% tree(n,2) - threshold t to be used
% tree(n,3) - node to be processed if value <= t
% tree(n,4) - node to be processed if value > t
% tree(n,5:4+c) - aposteriori probabilities for all classes in
% node n
% If tree(n,3) == 0, stop, class in tree(n,1)
%[nlabt,lablistt,m,kt,ct,pt] = dataset_old(T);
% Setup the variables, also depending what outputs are requested by
% the user:
[m,kt,ct] = getsize(T);
if kt ~= k, error('Wrong feature size'); end
[n,d] = size(tree);
lab = zeros(m,1);
if nargout==3
b = expandd(getnlab(T),ct);
N = zeros(n,ct);
end
F = zeros(m,c);
node = num*ones(1,m);
for i = num:n
S = find(node == i);
if nargout==3
N(i,:) = sum(b(S,:));
end
if tree(i,3) > 0
SL = S(find(+T(S,tree(i,1)) <= tree(i,2)));
SR = S(find(+T(S,tree(i,1)) > tree(i,2)));
node(SL) = tree(i,3)*ones(1,length(SL));
node(SR) = tree(i,4)*ones(1,length(SR));
elseif tree(i,3) == 0
node(S) = inf * ones(1,length(S));
lab(S) = tree(i,1) * ones(1,length(S));
F(S,:) = tree(i*ones(length(S),1),5:4+c);
else
% right, what now?
end
end
if nargout==3
N=N./(ones(n,1)*(sum(b,1)./getprior(T)));
end
F = setdat(T,F,W);
%F = dataset_old(F,getlab(T),classlist,pt,lablistt);
return
|
github
|
jacksky64/imageProcessing-master
|
nu_svro.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/nu_svro.m
| 8,512 |
utf_8
|
f5e1da68470cd4080b3b5e0b71ceb0f4
|
%NU_SVRO Support Vector Optimizer
%
% [V,J] = NU_SVRO(K,Y,C)
%
% INPUT
% K Similarity matrix
% NLAB Label list consisting of -1/+1
% C Scalar for weighting the errors (optional; default: 10)
%
% OUTPUT
% V Vector of weights for the support vectors
% J Index vector pointing to the support vectors
%
% DESCRIPTION
% A low level routine that optimizes the set of support vectors for a 2-class
% classification problem based on the similarity matrix K computed from the
% training set. SVO is called directly from SVC. The labels NLAB should indicate
% the two classes by +1 and -1. Optimization is done by a quadratic programming.
% If available, the QLD function is used, otherwise an appropriate Matlab routine.
%
% SEE ALSO
% NU_SVR, SVO, SVC
% Revisions:
% DR1, 07-05-2003
% Sign error in calculation of offset
% Copyright: D.M.J. Tax, D. de Ridder, R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: nu_svro.m,v 1.2 2010/02/08 15:29:48 duin Exp $
function [v,J,epsilon_or_nu] = nu_svro(K,y,C,svr_type,nu_or_epsilon,pd,abort_on_error)
prtrace(mfilename);
if (nargin < 7) | isempty(abort_on_error)
abort_on_error = 0;
end
if (nargin < 6) | isempty(pd)
pd = 1;
end
if (nargin < 5)
nu_eps = [];
end
if (nargin < 4) | isempty(svr_type)
svr_type = 'nu';
end
switch svr_type
case 'nu'
if isempty(nu_or_epsilon)
prwarning(3,'nu is not specified, assuming 0.25.');
nu_or_epsilon = 0.25;
end
nu = nu_or_epsilon;
case {'eps', 'epsilon'}
svr_type = 'epsilon';
if isempty(nu_or_epsilon)
prwarning(3,'epsilon is not specified, assuming 1e-2.');
nu_or_epsilon = 1e-2;
end
epsilon = nu_or_epsilon;
end
if (nargin < 3)
prwarning(3,'C is not specified, assuming 1.');
C = 1;
end
vmin = C*1e-9; % Accuracy to determine when an object becomes the support object.
% Set up the variables for the optimization.
n = size(K,1);
D = K;
switch svr_type
case 'nu'
f = [-y', y'];
A = [[ones(1,n), -ones(1,n)]; ones(1,2*n)];
b = [0; C*nu*n];
case 'epsilon'
f = epsilon + [-y', y'];
A = [ones(1,n), -ones(1,n)];
b = 0;
end
lb = zeros(2*n,1);
ub = repmat(C,2*n,1);
p = rand(2*n,1);
if pd
% Make the kernel matrix K positive definite.
i = -30;
while (pd_check (D + (10.0^i) * eye(n)) == 0)
i = i + 1;
end
if (i > -30),
prwarning(2,'K is not positive definite. The diagonal is regularized by 10.0^(%d)*I',i);
end
i = i+2;
D = D + (10.0^(i)) * eye(n);
end
D = [[D, -D]; [-D, D]];
% Minimization procedure:
% minimizes: 0.5 x' D x + f' x
% subject to: Ax = b
%
if (exist('qld') == 3)
v = qld (D, f, -A, b, lb, ub, p, length(b));
elseif (exist('quadprog') == 2)
prwarning(1,'QLD not found, the Matlab routine QUADPROG is used instead.')
opt = optimset; opt.LargeScale='off'; opt.Display='off';
v = quadprog(D, f, [], [], A, b, lb, ub,[],opt);
else
prwarning(1,'QLD not found, the Matlab routine QP is used instead.')
verbos = 0;
negdef = 0;
normalize = 1;
v = qp(D, f, A, b, lb, ub, p, length(b), verbos, negdef, normalize);
end
% Find all the support vectors.
if isempty(v)
ErrMsg = 'Quadratic Optimization failed.';
[v,J,epsilon_or_nu] = ErrHandler(K,y,ErrMsg,abort_on_error);
return;
end
v = v(1:n)-v((n+1):end);
av = abs(v);
J = find(av > vmin); % sv's
I = J(av(J) < (C-vmin)); % on-tube sv's
%plot(v,y-K(:,J)*v(J),'.')
switch svr_type
case 'nu'
Ip = I(v(I) > 0);
Im = I(v(I) < 0);
if isempty(Ip) | isempty(Im);
prwarning(2,'epsilon and b are not unique: values from the admissible range will be taken');
J0 = find(av <= vmin); % non-sv's
if ~isempty(J0)
y_minus_wx_0 = y(J0)-K(J0,J)*v(J);
else
prwarning(2,'There are no non-sv''s: admissible (eps,b) range is partially unbounded');
end
end
if ~isempty(Ip)
b_plus_epsilon = mean(y(Ip)-K(Ip,J)*v(J),1);
else
lp_bound = -inf;
up_bound = inf;
if ~isempty(J0)
lp_bound = max(y_minus_wx_0,[],1);
end
Ipo = J((av(J) >= C-vmin) & v(J) > 0); % positive out of tube sv's
if ~isempty(Ipo)
up_bound = min(y(Ipo)-K(Ipo,J)*v(J),[],1);
end
if isinf(up_bound)
Msg = 'Impossible situation: there are no positive sv''s.';
[v,J,epsilon_or_nu] = ErrHandler(K,y,ErrMsg,abort_on_error);
return;
elseif isinf(lp_bound)
b_plus_epsilon = up_bound;
else
if lp_bound > up_bound
ErrMsg = 'Impossible situation: admissible (eps,b) region is empty.';
[v,J,epsilon_or_nu] = ErrHandler(K,y,ErrMsg,abort_on_error);
return;
end
b_plus_epsilon = 0.5*(lp_bound+up_bound);
end
end
if ~isempty(Im)
b_minus_epsilon = mean(y(Im)-K(Im,J)*v(J),1);
else
lm_bound = -inf;
um_bound = inf;
Imo = J((av(J) >= C-vmin) & v(J) < 0); % positive out of tube sv's
if ~isempty(Imo)
lm_bound = max(y(Imo)-K(Imo,J)*v(J),[],1);
end
if ~isempty(J0)
um_bound = min(y_minus_wx_0,[],1);
end
if isinf(lm_bound)
ErrMsg = 'Impossible situation: there are no negative sv''s.';
[v,J,epsilon_or_nu] = ErrHandler(K,y,ErrMsg,abort_on_error);
return;
elseif isinf(um_bound)
b_minus_epsilon = lm_bound;
else
if lm_bound > um_bound
ErrMsg = 'Impossible situation: admissible (eps,b) range is empty.';
[v,J,epsilon_or_nu] = ErrHandler(K,y,ErrMsg,abort_on_error);
return;
end
b_minus_epsilon = 0.5*(lm_bound+um_bound);
end
end
% one more paranoic check
if exist('J0') == 1 & ~isempty(J0)
ok = 1;
if isempty(Ip) & ~isempty(Im)
ok = b_minus_epsilon <= min(y_minus_wx_0,[],1);
elseif ~isempty(Ip) & isempty(Im)
ok = b_plus_epsilon >= max(y_minus_wx_0,[],1);
end
if ~ok
ErrMsg = 'Impossible situation: incosistance in admissible (eps,b) region.';
[v,J,epsilon_or_nu] = ErrHandler(K,y,ErrMsg,abort_on_error);
end
end
epsilon = 0.5*(b_plus_epsilon-b_minus_epsilon);
b = 0.5*(b_plus_epsilon+b_minus_epsilon);
epsilon_or_nu = epsilon;
case 'epsilon'
if ~isempty(I)
b = mean(y(I)-K(I,J)*v(J)-epsilon*sign(v(I)));
else
prwarning(2,'b is not unique: value from the admissible range will be taken');
lp_bound = -inf;
up_bound = inf;
lm_bound = -inf;
um_bound = inf;
J0 = find(av <= vmin); % non-sv's
if ~isempty(J0)
y_minus_wx_0 = y(J0)-K(J0,J)*v(J);
lp_bound = max(y_minus_wx_0,[],1)-epsilon;
um_bound = min(y_minus_wx_0,[],1)+epsilon;
else
prwarning(2,'Thers are no non-sv''s');
end
Ipo = J((av(J) >= C-vmin) & v(J) > 0); % positive out of tube sv's
if ~isempty(Ipo)
up_bound = min(y(Ipo)-K(Ipo,J)*v(J),[],1)-epsilon;
end
Imo = J((av(J) >= C-vmin) & v(J) < 0); % negative out of tube sv's
if ~isempty(Imo)
lm_bound = max(y(Imo)-K(Imo,J)*v(J),[],1)+epsilon;
end
l_bound = max(lm_bound,lp_bound);
u_bound = min(um_bound,up_bound);
ErrMsg = '';
if isinf(up_bound)
ErrMsg = 'Impossible situation: there are no positive sv''s.';
elseif isinf(lm_bound)
ErrMsg = 'Impossible situation: there are no negative sv''s.';
elseif l_bound > u_bound
keyboard
ErrMsg = 'Impossible situation: admissible b region is empty.';
end
if ~isempty(ErrMsg)
[v,J,epsilon_or_nu] = ErrHandler(K,y,ErrMsg,abort_on_error);
return;
end
b = 0.5*(l_bound+u_bound);
end
nu = sum(av(J))/(C*n);
epsilon_or_nu = nu;
end
v = [v(J); b];
return;
function [v,J, epsilon_or_nu] = ErrHandler(K,y,ErrMsg,abort_on_error)
if abort_on_error
error(ErrMsg);
else
prwarning(1,[ErrMsg 'Pseudoinverse Regression is computed instead.']);
n = size(K,1);
v = prpinv([K ones(n,1)])*y;
J = [1:n]';
epsilon_or_nu = nan;
end
return
|
github
|
jacksky64/imageProcessing-master
|
lines5d.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/lines5d.m
| 1,045 |
utf_8
|
97363967a36f35b3569e57b8804c04df
|
%LINES5D Generates three 5-dimensional lines
%
% A = LINES5D(N);
%
% Generates a data set of N points, on 3 non-crossing, non-parallel lines
% in 5 dimensions.
%
% If N is a vector of sizes, exactly N(I) objects are generated
% for class I, I = 1,2.Default: N = [50 50 50].
%
% See also DATASETS, PRDATASETS
% Copyright: E. Pekalska, R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: lines5d.m,v 1.2 2006/03/08 22:06:58 duin Exp $
function data = lines5d(N)
prtrace(mfilename);
if nargin< 1, N = [50 50 50]; end
N = genclass(N,ones(1,3)/3);
n1 = N(1);
n2 = N(2);
n3 = N(3);
s1 = [0 0 0 1 0];
s2 = [1 1 1 0 0];
s3 = [0 1 0 1 0];
s4 = [1 1 1 1 1];
s5 = [0 1 1 0 1];
s6 = [1 0 1 1 1];
c1 = [0:1/(n1-1):1]';
c2 = [0:1/(n2-1):1]';
c3 = [0:1/(n3-1):1]';
a = c1*s1 + (1-c1)*s2;
a = [a; c2*s3 + (1-c2)*s4];
a = [a; c3*s5 + (1-c3)*s6];
data = dataset(a,genlab(N));
data = setname(data,'5D Lines');
return
|
github
|
jacksky64/imageProcessing-master
|
classnames.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/classnames.m
| 1,827 |
utf_8
|
5c6e1e8f88cda024ae125da56f552a6b
|
%CLASSNAMES Get names of classes of dataset or classifier
%
% NAMES = CLASSNAMES(A,C)
% NAMES = CLASSNAMES(W,C)
%
% INPUT
% A Dataset
% W Trained classifier
% C Class number(s) in class label list, default: all
%
% OUTPUT
% NAMES Names of classes (strings or numbers)
%
% DESCRIPTION
% Returns the names of the classes used in the dataset A or the classes
% used by the classifier W. If for datasets no output is requested the
% names and the sizes of the classes are printed on the screen.
% If given, just the names of the classes corresponding to the indices in
% C are returned.
%
% SEE ALSO
% DATASETS, MAPPINGS, CLASSSIZES
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function out = classnames(a,N)
if nargin < 2, N = []; end
if isa(a,'dataset')
lablist = getlablist(a);
if nargout < 1 & islabtype(a,'crisp','soft')
s = classsizes(a);
if iscell(lablist), lablist = char(lablist); end
if isempty(N), N = 1:size(lablist,1); end
if isstr(lablist)
for j=N
if islabtype(a,'crisp')
fprintf('\n %6i %s',s(j),lablist(j,:));
else
fprintf('\n %8.2f %s',s(j),lablist(j,:));
end
end
else
for j=N
if islabtype(a,'crisp')
fprintf('\n %3i %6i',lablist(j),s(j));
else
fprintf('\n %3i %8.2f',lablist(j),s(j));
end
end
end
fprintf('\n\n');
end
names = lablist;
elseif ismapping(a)
if isuntrained(a)
error('No classes defined for untrained classifiers or mappings')
else
names = getlabels(a);
end
else
error('Dataset or trained classifier expected')
end
if ~isempty(N)
names = names(N,:);
end
if nargout > 0 | ismapping(a) | islabtype(a,'targets')
out = names;
end
return
|
github
|
jacksky64/imageProcessing-master
|
pinvr.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/pinvr.m
| 2,833 |
utf_8
|
c33e6d35b076b6ba37cda9cc196628b9
|
%PINVR PSEUDO-INVERSE REGRESSION (PCR)
%
% [W,J,C] = PINVR(A,TYPE,PAR,C,SVR_TYPE,EPS_TOL,MC,PD)
%
% INPUT
% A Dataset
% TYPE Type of the kernel (optional; default: 'p')
% PAR Kernel parameter (optional; default: 1)
%
% MC Do or do not data mean-centering (optional; default: 1 (to do))
% PD Do or do not the check of the positive definiteness (optional;
% default: 1 (to do)) (not implemented)
%
% OUTPUT
% W Mapping
% J Object identifiers of support objects
%
% DESCRIPTION
%
%
% SEE ALSO
% MAPPINGS, DATASETS, PROXM
% Copyright: S.Verzakov, [email protected]
% Based on SVC.M by D.M.J. Tax, D. de Ridder, R.P.W. Duin
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: pinvr.m,v 1.3 2010/02/08 15:29:48 duin Exp $
function [W, J] = pinvr(a,type,par,eps_tol,mc,pd)
prtrace(mfilename);
if nargin < 2 | ~isa(type,'mapping')
if nargin < 6
pd = 1;
end
if nargin < 5 |isempty(mc)
mc = 1;
end
if nargin < 4
eps_tol = [];
end
if nargin < 3 | isempty(par)
par = 1;
prwarning(3,'Kernel parameter par set to 1\n');
end
if nargin < 2 | isempty(type)
type = 'p';
prwarning(3,'Polynomial kernel type is used\n');
end
if nargin < 1 | isempty(a)
W = mapping(mfilename,{type,par,eps_tol,mc,pd});
W = setname(W,['Pseudoinverse Regression']);
return;
end
islabtype(a,'targets');
[m,k] = getsize(a);
y = gettargets(a);
if size(y,2) == 1 % 1-dim regression
uy = mean(y);
y = y - uy;
if mc
u = mean(a);
a = a - ones(m,1)*u;
else
u = [];
end
K = a*proxm(a,type,par);
if ~isempty(eps_tol)
tol = (m+1)*norm([K ones(m,1)])*eps_tol;
v = prpinv([K ones(m,1)],tol)*y;
else
v = prpinv([K ones(m,1)])*y;
end
J = [1:m]';
% Store the results:
v(end) = v(end)+uy;
W = mapping(mfilename,'trained',{u,a(J,:),v,type,par},getlablist(a),k,1);
W = setname(W,['Pseudoinverse Regression']);
%W = setcost(W,a);
J = getident(a,J);
%J = a.ident(J);
else
error('multitarget regeression is not supported');
end
else % execution
w = +type;
m = size(a,1);
% The first parameter w{1} stores the mean of the dataset. When it
% is supplied, remove it from the dataset to improve the numerical
% precision. Then compute the kernel matrix using proxm.
if isempty(w{1})
d = a*proxm(w{2},w{4},w{5});
else
d = (a-ones(m,1)*w{1})*proxm(w{2},w{4},w{5});
end
% When Data is mapped by the kernel, now we just have a linear
% classifier w*x+b:
d = [d ones(m,1)] * w{3};
W = setdat(a,d,type);
end
return;
|
github
|
jacksky64/imageProcessing-master
|
parzenc.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/parzenc.m
| 4,533 |
utf_8
|
26b1e34406d437806141f7014b7a5bc5
|
%PARZENC Optimisation of the Parzen classifier
%
% [W,H] = PARZENC(A)
% W = PARZENC(A,H,FID)
%
% INPUT
% A dataset
% H smoothing parameter (may be scalar, vector of per-class
% parameters, or matrix with parameters for each class (rows) and
% dimension (columns))
% FID File ID to write progress to (default [], see PRPROGRESS)
%
% OUTPUT
% W trained mapping
% H estimated smoothing (scalar value)
%
% DESCRIPTION
% Computation of the optimum smoothing parameter H for the Parzen
% classifier between the classes in the dataset A. The leave-one-out
% Lissack & Fu estimate is used for the classification error E. The
% final classifier is stored as a mapping in W. It may be converted
% into a classifier by W*CLASSC. PARZENC cannot be used for density
% estimation.
%
% In case smoothing H is specified, no learning is performed, just the
% discriminant W is produced for the given smoothing parameters H.
% Smoothing parameters may be scalar, vector of per-class parameters, or
% a matrix with individual smoothing for each class (rows) and feature
% directions (columns)
%
% REFERENCES
% T. Lissack and K.S. Fu, Error estimation in pattern recognition via
% L-distance between posterior density functions, IEEE Trans. Inform.
% Theory, vol. 22, pp. 34-45, 1976.
%
% SEE ALSO
% DATASETS, MAPPINGS, PARZEN_MAP, PARZENML, PARZENDC, CLASSC, PRPROGRESS
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: parzenc.m,v 1.6 2008/07/03 09:11:44 duin Exp $
function [W,h] = parzenc(a,h,fid)
prtrace(mfilename);
if nargin < 3, fid = []; end
if nargin < 2
h = [];
prwarning(4,'smoothing parameter not supplied, optimizing');
end
if nargin == 0 | isempty(a)
W = mapping(mfilename,h);
W = setname(W,'Parzen Classifier');
return;
end
islabtype(a,'crisp','soft');
isvaldfile(a,2,2); % at least 2 objects per class, 2 classes
a = testdatasize(a);
a = testdatasize(a,'objects');
[m,k,c] = getsize(a);
nlab = getnlab(a);
if ~isempty(h) % take user setting for smoothing parameter
if size(h,1) == 1, h = repmat(h,c,1); end
if size(h,2) == 1, h = repmat(h,1,k); end
if any(size(h) ~= [c,k])
error('Array with smoothing parameters has wrong size');
end
W = mapping('parzen_map','trained',{a,h},getlablist(a),k,c);
W = setname(W,'Parzen Classifier');
return
end
% compute all object distances
% make diagonal inf to exclude objects own contribution
D = +distm(a) + diag(inf*ones(1,m));
% find object frequencies
if islabtype(a,'crisp')
csize = classsizes(a);
of = csize(nlab);
else
csize = sum(gettargets(a),1);
end
% find object weights q
p = getprior(a);
a = setprior(a,p);
q = p(nlab)./csize(nlab);
% initialise
h = max(std(a)); % for sure a too high value
L = -inf;
Ln = 0;
z = 0.1^(1/k); % initial step size
% iterate
prprogress(fid,'parzenc: error optimization smoothing parameter: ');
prprogress(fid,' %6.4f %6.4f \n',0,0);
iter = 0;
prwaitbar(100,'parzenc: Optimizing smoothing parameter',m > 100);
while abs(Ln-L) > 0.001 & z < 1
% In L we store the best performance estimate found so far.
% Ln is the actual performance (for the actual h)
% If Ln > L we improve the bound L, and so we rest it.
if Ln > L, L = Ln; end
iter = iter+1;
prwaitbar(100,100-100*exp(-iter/10));
r = -0.5/(h^2);
F = q(ones(1,m),:)'.*exp(D*r); % density contributions
FS = sum(F)*((m-1)/m); IFS = find(FS>0); % joint density distribution
if islabtype(a,'crisp');
G = sum(F .* (nlab(:,ones(1,m)) == nlab(:,ones(1,m))'));
G = G.*(of-1)./of; % true-class densities
else
% here we are for soft labels (stored in targets)
G = zeros(1,m);
for j=1:c
G = G + sum(F .* (a.targets(:,j) * a.targets(:,j)'));
% to be corrected for bias?
end
end
% performance estimate
en = max(p)*ones(1,m);
en(IFS) = (G(IFS))./FS(IFS);
Ln = exp(sum(log(en))/m);
prprogress(fid,' %6.4f %6.4f \n',h,Ln);
if Ln < L % compute next estimate
z = sqrt(z); % adjust stepsize up (recall: 0 < z < 1)
h = h / z; % if we don't improve, increase h (approach h_opt from below)
else
h = h * z; % if we improve, decrease h (approach h_opt from above)
end
end
prwaitbar(0);
W = mapping('parzen_map','trained',{a,repmat(h,c,k);},getlablist(a),k,c);
W = setname(W,'Parzen Classifier');
W = setcost(W,a);
return
|
github
|
jacksky64/imageProcessing-master
|
datasetm.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/datasetm.m
| 841 |
utf_8
|
732f812706da1496992bb0c3c92127af
|
%DATASETM Mapping conversion to dataset
%
% B = DATASETM(A)
% B = A*DATASETM
%
% INPUT
% A Datafile or double array
%
% OUTPUT
% B DATASET
%
% DESCRIPTION
% This command is almost identical to B = DATASET(A), except that it
% supports the mapping type of construct: B = A*DATASETM. This may be
% especially useful to include the dataset conversion in the processing
% definitions of a datafile.
%
% SEE ALSO
% DATASETS, DATAFILES, MAPPINGS, DATASET
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function b = datasetm(a)
if nargin < 1
b = mapping(mfilename,'fixed');
elseif isdataset(a)
b = a;
elseif isdatafile(a) | isa(a,'double')
b = dataset(a);
else
error('Unexpected input')
end
|
github
|
jacksky64/imageProcessing-master
|
prversion.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/prversion.m
| 726 |
utf_8
|
31fae3be7d1e6e9b4cc4eca5bf907ad6
|
%PRVERSION PRtools version number
%
% [VERSION,STR,DATE] = PRVERSION
%
% OUTPUT
% VERSION Version number (double)
% STR Version number (string)
% DATE Version date (string)
%
% DESCRIPTION
% Returns the numerical version number of PRTools VER (e.g. VER = 3.2050)
% and as a string, e.g. STR = '3.2.5'. In DATE, the version date is returned
% as a string.
% $Id: prversion.m,v 1.2 2006/03/08 22:06:58 duin Exp $
function [version,str,date] = prversion
signature = prtver;
str = signature{1}.Version;
date = signature{1}.Date;
version = str2num(str(1)) + (str2num(str(3))*1000 + str2num(str(5))*10)/10000;
if nargout == 0
disp([newline ' PRTools version ' str newline])
clear version
end
return;
|
github
|
jacksky64/imageProcessing-master
|
im_center.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/im_center.m
| 1,438 |
utf_8
|
00ed7fe66c0c585fdbfffedd6331d429
|
%IM_CENTER Shift all binary images in dataset: center to center of gravity
%
% B = IM_CENTER(A)
% B = A*IM_CENTER
%
% The objects in the binary images are shifted such that their centers of
% gravities are in the image center.
%
% B = IM_CENTER(A,N)
%
% In all directions N rows and columns are added after shifting.
%
% SEE ALSO
% DATASETS, DATAFILES
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function b = im_center(a,n)
prtrace(mfilename);
if nargin < 2 | isempty(n), n= 0; end
if nargin < 1 | isempty(a)
b = mapping(mfilename,'fixed',{n});
b = setname(b,'Image centering');
elseif isa(a,'dataset') % allows datafiles too
isobjim(a);
b = filtim(a,mfilename,{n});
b = setfeatsize(b,getfeatsize(a));
elseif isa(a,'double') | isa(a,'dip_image') % here we have a single image
if isa(a,'dip_image'), a = double(a); end
a = im_box(a);
[ty,tx] = size(a);
[sy,sx] = size(a);
mxy = im_mean(a);
mx = 2*round(mxy(1)*sx)-1-sx;
my = 2*round(mxy(2)*sy)-1-sy;
if mx < 0
a = [zeros(sy,-mx) a];
elseif mx > 0
a = [a zeros(sy,mx)];
end
sx = sx + abs(mx);
if my < 0
a = [zeros(-my,sx); a];
elseif my > 0
a = [a; zeros(my,sx)];
end
sy = sy + abs(my);
if n > 0
b = zeros(sy+2*n,sx+2*n);
b(n+1:n+sy,n+1:n+sx) = a;
else
b = a;
end
[ry,rx] = size(b);
end
return
|
github
|
jacksky64/imageProcessing-master
|
gendatlin.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/gendatlin.m
| 940 |
utf_8
|
f1930c8927d3b84b82eba5af2f1893ac
|
%GENDATLIN Generation of linear regression data
%
% A = GENDATLIN(N,B0,B1,SIGMA)
%
% INPUT
% N Number of objects to generate
% B0 Offset
% B1 Slope
% SIGMA Standard deviation of the noise
%
% OUTPUT
% A Regression dataset
%
% DESCRIPTION
% Generate regression data A, containing N (x,y)-pairs according to :
% y = B0 + B1^T*x + N(0,SIGMA)
% Data x is distributed uniformly between 0 and 1.
%
% SEE ALSO
% GENDATSIN, GENDATSINC
% Copyright: D.M.J. Tax, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function a = gendatlin(n,b0,b1,sig)
if nargin<4
sig = 0.1;
end
if nargin<3
b1 = -1;
end
if nargin<2
b0 = 2;
end
if nargin<1
n = 25;
end
% check the size of beta1
b1 = b1(:);
dim = size(b1,1);
% generate the data:
x = rand(n,dim);
y = x*b1 + b0 + sig(1).*randn(n,1);
% store it in the dataset:
a = gendatr(x,y);
return
|
github
|
jacksky64/imageProcessing-master
|
image_dbr.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/image_dbr.m
| 18,808 |
utf_8
|
9b920c6c4de759707dd093ddb4db2928
|
function varargout = image_dbr(varargin)
%IMAGE_DBR M-file for image_dbr.fig
% IMAGE_DBR, by itself, creates a new IMAGE_DBR or raises the existing
% singleton*.
%
% H = IMAGE_DBR returns the handle to a new IMAGE_DBR or the handle to
% the existing singleton*.
%
% IMAGE_DBR('Property','Value',...) creates a new IMAGE_DBR using the
% given property value pairs. Unrecognized properties are passed via
% varargin to image_dbr_OpeningFcn. This calling syntax produces a
% warning when there is an existing singleton*.
%
% IMAGE_DBR('CALLBACK') and IMAGE_DBR('CALLBACK',hObject,...) call the
% local function named CALLBACK in IMAGE_DBR.M with the given input
% arguments.
%
% *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one
% instance to run (singleton)".
%
% See also: GUIDE, GUIDATA, GUIHANDLES
% Edit the above text to modify the response to help image_dbr
% Last Modified by GUIDE v2.5 16-Aug-2008 23:50:43
% Begin initialization code - DO NOT EDIT
gui_Singleton = 1;
gui_State = struct('gui_Name', mfilename, ...
'gui_Singleton', gui_Singleton, ...
'gui_OpeningFcn', @image_dbr_OpeningFcn, ...
'gui_OutputFcn', @image_dbr_OutputFcn, ...
'gui_LayoutFcn', [], ...
'gui_Callback', []);
if nargin && ischar(varargin{1})
gui_State.gui_Callback = str2func(varargin{1});
end
if nargout
[varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:});
else
gui_mainfcn(gui_State, varargin{:});
end
% End initialization code - DO NOT EDIT
function image_dbr_OpeningFcn(hObject, eventdata, handles, varargin)
% Main routine, this starts after creation of the guide
% Handling of input parameters
handles.output = hObject;
h = my_handles(handles);
if length(varargin) < 4
cclasf = meanc; % default combiner
else
cclasf = varargin{4};
end
if length(varargin) < 3
clasf = knnc([],1); % default classifier
else
clasf = varargin{3};
end
featsets = varargin{2}; % arrange feature sets in cells
if ~iscell(featsets), featsets = {featsets}; end
if ~iscell(clasf)
clasf = repmat({clasf},1,length(featsets));
else
if length(clasf) ~= length(featsets)
error('Number of classifiers and number of feature sets do not match')
end
end
database = varargin{1}; % dataset or datafile, just used for image display
m = size(database,1);
labels = zeros(1,m); % user give labels: 0 (no), 1 (target, 2 (outlier)
S = randperm(m); % initial set of images to be shown
for j=1:4 % feature weight slider bars default invisible
set(h.feat_weight(j),'visible','off');
set(handles.(['text' num2str(j+2)]),'visible','off');
end
% optimize location sliders in case of 2 or 3 sliders
if length(featsets) == 1, feath = 2; end
if length(featsets) == 2, feath = [2 3]; end
if length(featsets) == 3, feath = [1 2 3]; end
if length(featsets) == 4, feath = [1 2 3 4]; end
for j=1:length(featsets) % initialize feature sets and related sliders
if size(featsets{j},1) ~= m
error('Feature set(s) have wrong size')
end
set(handles.(['text' num2str(feath(j)+2)]),'visible','on');
set(handles.(['text' num2str(feath(j)+2)]),'string',getname(featsets{j}));
% re-arrange datasets with features, use 'target' and 'outlier' as labels
featsets{j} = dataset(featsets{j});
featsets{j} = featsets{j}*scalem(featsets{j},'variance');
featsets{j} = dataset(+featsets{j});
featsets{j} = setlablist(featsets{j},char('target','outlier'));
featsets{j} = setnlab(featsets{j},labels);
set(h.feat_weight(feath(j)),'value',1); % initial weights: 1
set(h.feat_weight(feath(j)),'visible','on');
end
if length(featsets) == 1 % no feature slider bar in case of one feature set
set(h.feat_weight(feath(1)),'visible','off');
set(handles.(['text' num2str(feath(j)+2)]),'visible','off');
end
% store useful data in figure user data field
guidata(hObject,{h,database,featsets,clasf,cclasf});
set(h.classify,'userdata','reset'); % initial procedure: reset
set(h.all,'value',0); % initial ranking:
set(h.unlabeled,'value',2); % unlabeled objects
labels_im = zeros(1,10); % labels of shown images
weights = zeros(1,4); % weights of feature sets
Targets = [];
Outliers= [];
show_targ_out(database,Targets,1,h,'Targets'); % initialise target /
show_targ_out(database,Outliers,1,h,'Outliers');% outlier images
S = randperm(m);
waitstate = prwaitbar;
prwaitbar off
% main loop
while(1) % S, images to be shown
show_images(database,S,1,h); % show images on axes
% wait for the user
% ..........
uiwait % uiresume is activated by one of the
% CLASSIFY / LABEL / RESET / QUIT buttonS.
% get labels and weights
Targets = get(h.targets,'userdata'); % get Tagerts
Outliers = get(h.outliers,'userdata');%abd Outliers
if isempty(guidata(hObject)) % we need to stop (QUIT button is pressed)
guidata(hObject,{S,Targets,Outliers});%
prwaitbar(waitstate);
return % return ranking S, Targets and Outliers.
end
proch = proc(h); % classify / label / reset
if ~strcmp(proch,'reset');
for j=1:10 % labels are 1-2, convert from 0-1 check boxes
labels_im(j) = 2 - get(h.handlab(j),'value');
end
for j=1:length(featsets) % get feature weights from sliders
weights(j) = get(h.feat_weight(feath(j)),'value');
end
T = [1:m]; % potential trainingset
% Targets = get(h.targets,'userdata');
% Outliers = get(h.outliers,'userdata');
t = get(h.next,'userdata'); n = t{2};
SS = []; % training set indices in case of 'label' procedure
for j=1:10 % update labeling
SAct = S(j-1+n); % object number j in S
if labels_im(j) == 1
if isempty(find(Targets==SAct)), Targets = [Targets SAct]; end
else
if isempty(find(Outliers==SAct)), Outliers = [Outliers SAct]; end
end
SS = [SS SAct];
end
set(h.targets,'userdata',Targets); % store indices of Targets
set(h.outliers,'userdata',Outliers); % and outliers
labels = zeros(1,m); % construct labels training set
labels(Targets) = ones(1,length(Targets));
labels(Outliers) = 2*ones(1,length(Outliers));
if strcmp(proch,'label') % Label: learn from present set only
if all(labels_im == labels_im(1)) % present set is uniformly labeled
if labels_im(1) == 1 % no outliers, use remaining unlabeled dataset as outlier
labels_train = ones(size(labels))*2; % Label entire set as outlier
labels_train(SS) = ones(size(SS)); % Label present set as target
else % no targets, use remaining dataset as target
labels_train = ones(size(labels)); % Label entire set as target
labels_train(SS) = 2*ones(size(SS)); % Label present set as outlier
end
else
T = SS;
labels_train = labels_im;
end
[S,W] = train_classify(featsets,T,labels_train,clasf,cclasf,weights);
else % Classify procedure:
T = find(labels > 0); % learn from all Targets and Outliers
labels_train = labels(T);
[S,W] = train_classify(featsets,T,labels_train,clasf,cclasf,weights);
end
U = find(labels(S) == 0);
set(h.all,'userdata',{S,U});
if (get(h.all,'value') == 0)
S = S(U);
end
else % reset
S = randperm(m);
end
% S is the ranked set of indices, either for all objects, or for just
% the unlabeled objects
end
return
function h = my_handles(handles)
% construct meaningfull handle names / numbers
T = [1 2 3 4 7 8 9 10 11 12]; % correct checkbox numbering
h.image = zeros(1,10);
h.handlab = zeros(1,10);
h.feat_weight = zeros(1,4);
for j=1:10
h.image(j) = handles.(['axes' num2str(j)]);
h.imnum(j) = handles.(['text' num2str(j+8)]);
h.obnum(j) = handles.(['text' num2str(j+18)]);
h.handlab(j) = handles.(['checkbox' num2str(T(j))]);
end
for j=1:4
h.feat_weight(j) = handles.(['slider' num2str(j)]);
end
h.all = handles.radiobutton4;
h.unlabeled = handles.radiobutton5;
h.classify = handles.pushbutton1;
h.targets = handles.axes11;
h.outliers = handles.axes12;
h.target_slider = handles.slider5;
h.outliert_slider = handles.slider6;
h.target_title = handles.text1;
h.outlier_title = handles.text2;
h.target_obnum = handles.text29;
h.outlier_obnum = handles.text30;
h.target_delete = handles.pushbutton2;
h.target_move = handles.pushbutton3;
h.outlier_delete = handles.pushbutton5;
h.outlier_move = handles.pushbutton4;
h.next = handles.pushbutton8;
h.previous = handles.pushbutton9;
return
function varargout = image_dbr_OutputFcn(hObject, eventdata, handles)
% takes care of final return of main return
varargout = guidata(hObject);
delete(hObject);
return
function pushbutton1_Callback(hObject, eventdata, handles)
% CLASSIFY button, this reactivates the UIWAIT function in the main loop
s = guidata(hObject);
h = s{1};
set(h.classify,'userdata','classify');
uiresume
return
function show_images(database,S,n,h)
% show images on axes
Targets = get(h.targets,'userdata');
Outliers = get(h.outliers,'userdata');
im = data2im(database,S(n:n+9));
% the new images
for j=1:10
show_im(im,j,h.image(j));
set(h.handlab(j),'value',1); % set target default
set(h.imnum(j),'string',num2str(n+j-1));
set(h.obnum(j),'string',num2str(S(n+j-1)));
end
set(h.next,'userdata',{S,n});
% set visibility of next / previous buttons
if n > 1
set(h.previous,'visible','on');
else
set(h.previous,'visible','off');
end
if n < (length(S)-9)
set(h.next,'visible','on');
else
set(h.next,'visible','off');
end
% the targets, show most recent one
if ~isempty(Targets)
show_targ_out(database,Targets,length(Targets),h,'Targets');
set(h.target_obnum,'string',num2str(Targets(end)));
end
% the outliers, show most recent one
if ~isempty(Outliers)
show_targ_out(database,Outliers,length(Outliers),h,'Outliers');
set(h.outlier_obnum,'string',num2str(Outliers(end)));
end
return
function show_targ_out(database,Pointers,n,h,name)
% show targets or outliers and update properties
if strcmp(name,'Targets') % create single handle variable for targets and outliers
h_axes = h.targets;
h_slider = h.target_slider;
h_title = h.target_title;
h_delete = h.target_delete;
h_move = h.target_move;
h_obnum = h.target_obnum;
else % Outliers
h_axes = h.outliers;
h_slider = h.outliert_slider;
h_title = h.outlier_title;
h_delete = h.outlier_delete;
h_move = h.outlier_move;
h_obnum = h.outlier_obnum;
end
if isempty(Pointers) % all Targets / Outliers deleted, make all related objects invisible
set(h_title,'String',['No ' name ' defined'])
set(get(h_axes,'children'),'visible','off')
set(h_axes,'visible','off');
set(h_slider,'visible','off');
set(h_delete,'visible','off');
set(h_move,'visible','off');
set(h_obnum,'visible','off');
set(h_axes,'userdata',[]);
else
show_im(data2im(database,Pointers(n)),1,h_axes); % show image
set(h_axes,'userdata',Pointers);
num_pointers = length(Pointers); % number of images
set(h_title,'String',[num2str(num_pointers) ' ' name]); % set title
set(h_obnum,'String',num2str(Pointers(n)));
set(h_slider,'Min',0.99999/num_pointers); % min (about 1/n)
set(h_slider,'Max',1); % max
if num_pointers > 1 % step size
set(h_slider,'SliderStep',[1/(num_pointers-1) 1/(num_pointers-1)]);
set(h_slider,'value',n/num_pointers);
set(h_slider,'visible','on');
else % special case: one target/outlier image
set(h_slider,'SliderStep',[1 1]);
set(h_slider,'value',n/num_pointers);
set(h_slider,'visible','off');
end
% make image and buttons visible
set(get(h_axes,'children'),'visible','on')
set(h_delete,'visible','on');
set(h_move,'visible','on');
set(h_obnum,'visible','on');
end
return
function proch = proc(h)
% get desired procedure
proch = get(h.classify,'userdata'); % classify / label / reset
return
function show_im(im,n,h_axes)
% low level display routine
if iscell(im), y = im{n};
else, y = squeeze(im(:,:,:,n));
end
if size(y,3) == 1 % reset gray images to color
y = cat(3,y,y,y);
end
axes(h_axes); % activate the right axes
image(y); % display
% prepare for callback
set(get(gca,'children'),'ButtonDownFcn',{@resetlab,n})
axis off % make axes invisible
axis equal
return
function resetlab(hObject, eventdata,n)
% Reset the image labels (target/outlier) by clicking in the image
s = guidata(hObject);
h = s{1};
get(h.handlab(n),'value');
set(h.handlab(n),'value',1 - get(h.handlab(n),'value'));
get(h.handlab(n),'value');
return
function [S,W] = train_classify(featsets,T,labels_train,clasf,cclasf,weights);
% train, combine, classify and rank
% S will be the ranked object indices of all or unlabeled objects
d = [];
W = [];
for j=1:length(featsets)
b = featsets{j};
trainset = setnlab(b(T,:),labels_train);
%trainset = setprior(trainset,getprior(trainset,0));
trainset = setprior(trainset,0);
if ~isvaldset(trainset,2)
v = trainset*knnc([],1);
else
v = trainset*clasf{j}*classc;
end
d = [d featsets{j}*v*weights(j)];
W = [W;v];
end
d = d*cclasf;
d = +d(:,'target');
[dd,S] = sort(-d);
% W = v*affine(weights)*cclasf; % to be corrected
W = [];
return
function slider5_Callback(hObject, eventdata, handles)
% target slider
s = guidata(hObject);
h = s{1};
database = s{2};
Targets = get(h.targets,'userdata');
n = round(get(h.target_slider,'value')*length(Targets));
show_im(data2im(database,Targets(n)),1,h.targets);
set(h.targets,'userdata',Targets); % image() destroys userdata, restore it
set(h.target_title,'String',[num2str(length(Targets)) ' Targets']); % needed???
set(h.target_obnum,'String',num2str(Targets(n)));
return
function slider6_Callback(hObject, eventdata, handles)
% outlier slider
s = guidata(hObject);
h = s{1};
database = s{2};
Outliers = get(h.outliers,'userdata');
n = round(get(h.outliert_slider,'value')*length(Outliers));
show_im(data2im(database,Outliers(n)),1,h.outliers);
set(h.outliers,'userdata',Outliers); % image() destroys userdata, restore it
set(h.outlier_title,'String',[num2str(length(Outliers)) ' Outliers']); % needed???
set(h.outlier_obnum,'String',num2str(Outliers(n)));
return
function pushbutton2_Callback(hObject, eventdata, handles)
% delete target
s = guidata(hObject);
h = s{1};
database = s{2};
Targets = get(h.targets,'userdata');
n = round(get(h.target_slider,'value')*length(Targets));
Targets(n) = [];
n = max(n-1,1);
show_targ_out(database,Targets,n,h,'Targets');
return
function pushbutton3_Callback(hObject, eventdata, handles)
% move target to outlier
s = guidata(hObject);
h = s{1};
database = s{2};
Targets = get(h.targets,'userdata');
Outliers = get(h.outliers,'userdata');
n = round(get(h.target_slider,'value')*length(Targets));
Outliers = [Outliers Targets(n)];
Targets(n) = [];
n = max(n-1,1);
show_targ_out(database,Outliers,length(Outliers),h,'Outliers');
show_targ_out(database,Targets,n,h,'Targets');
return
function pushbutton4_Callback(hObject, eventdata, handles)
% move outlier to target
s = guidata(hObject);
h = s{1};
database = s{2};
Outliers = get(h.outliers,'userdata');
Targets = get(h.targets,'userdata');
n = round(get(h.outliert_slider,'value')*length(Outliers));
Targets = [Targets Outliers(n)];
Outliers(n) = [];
n = max(n-1,1);
show_targ_out(database,Targets,length(Targets),h,'Targets');
show_targ_out(database,Outliers,n,h,'Outliers');
return
function pushbutton5_Callback(hObject, eventdata, handles)
% delete outlier
s = guidata(hObject);
h = s{1};
database = s{2};
Outliers = get(h.outliers,'userdata');
n = round(get(h.outliert_slider,'value')*length(Outliers));
Outliers(n) = [];
n = max(n-1,1);
show_targ_out(database,Outliers,n,h,'Outliers');
return
function pushbutton6_Callback(hObject, eventdata, handles)
% Reset
s = guidata(hObject);
h = s{1};
database = s{2};
set(h.targets,'userdata',[]);
set(h.outliers,'userdata',[]);
show_targ_out(database,[],0,h,'Targets');
show_targ_out(database,[],0,h,'Outliers');
set(h.classify,'userdata','reset');
uiresume
return
function pushbutton7_Callback(hObject, eventdata, handles)
% Quit
guidata(hObject,[]);
uiresume
return
function radiobutton4_Callback(hObject, eventdata, handles)
% All
s = guidata(hObject);
h = s{1};
database = s{2};
if (get(h.all,'value') == 0)
set(h.unlabeled,'value',2);
else
set(h.unlabeled,'value',0);
end
t = get(h.all,'userdata');
S = t{1};
show_images(database,S,1,h)
return
function radiobutton5_Callback(hObject, eventdata, handles)
% unlabeled
s = guidata(hObject);
h = s{1};
database = s{2};
if (get(h.unlabeled,'value') == 0)
set(h.all,'value',2);
else
set(h.all,'value',0);
end
t = get(h.all,'userdata');
S = t{1};
U = t{2};
S = S(U);
show_images(database,S,1,h)
return
function pushbutton8_Callback(hObject, eventdata, handles)
% Next
s = guidata(hObject);
h = s{1};
database = s{2};
t = get(h.next,'userdata');
S = t{1};
n = t{2};
n = min(length(S)-9,n+10);
show_images(database,S,n,h);
return
function pushbutton9_Callback(hObject, eventdata, handles)
% Previous
s = guidata(hObject);
h = s{1};
database = s{2};
t = get(h.next,'userdata');
S = t{1};
n = t{2};
n = max(1,n-10);
show_images(database,S,n,h);
return
function pushbutton10_Callback(hObject, eventdata, handles)
% Label
s = guidata(hObject);
h = s{1};
set(h.classify,'userdata','label');
uiresume
return
function pushbutton11_Callback(hObject, eventdata, handles)
% All Target
s = guidata(hObject);
h = s{1};
for j=1:10
set(h.handlab(j),'value',1);
end
return
function pushbutton12_Callback(hObject, eventdata, handles)
% All Outlier
s = guidata(hObject);
h = s{1};
for j=1:10
set(h.handlab(j),'value',0);
end
return
|
github
|
jacksky64/imageProcessing-master
|
wvotec.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/wvotec.m
| 3,605 |
utf_8
|
e97b52d375f85b99e5bfbf4719945011
|
%WVOTEC Weighted combiner (Adaboost weights)
%
% W = WVOTEC(A,V) compute weigths and store
% W = WVOTEC(V,U) Construct weighted combiner using weights U
%
% INPUT
% A Labeled dataset
% V Parallel or stacked set of trained classifiers
% U Set of classifier weights
%
% OUTPUT
% W Combined classifier
%
% DESCRIPTION
% The set of trained classifiers V is combined using weighted
% majority voting. If given the weights U are used. If not
% given, the weights are computed from the classification
% results of the labeled dataset A using 0.5*log((1-E)/E)
% if E is the classifier error.
%
% SEE ALSO
% MAPPINGS, DATASETS,
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function w = wvotec(a,v)
prtrace(mfilename);
if nargin < 1 | isempty(a)
%w = mapping(mfilename,'untrained');
w = mapping(mfilename,'combiner');
elseif nargin < 2
if ismapping(a)
w = mapping(mfilename,'untrained',{a}); % a is the set of base classifiers
else
error('Illegal call')
end
elseif isdataset(a) % train or classify
if isuntrained(v)
v = a*v; % train base classifiers
end
if ~strcmp(v.mapping_file,mfilename) % training (base classifiers are already trained)
if isparallel(v) % parallel combiner
n = 0;
e = zeros(1,length(v.data));
for j=1:length(v.data)
vv = v.data{j};
d = a(:,n+1:n+size(vv,1))*vv*classc;
e(j) = testmc(d);
n = n+size(vv,1);
end
elseif isstacked(v) % stacked combiner
e = zeros(1,length(v.data));
for j=1:length(v.data)
vv = v.data{j};
%e(j) = testc(a,vv,'soft');
e(j) = testc(a,vv);
end
else
error('Classifier combination expected')
end
% Find weights
L = find(e < 1-max(getprior(a))); % take classifier better than prior
alf = zeros(1,length(e));
alf(L) = log((1-e(L))./e(L))/2;
alf = alf/sum(alf);
% construct the classifier
[m,k,c] = getsize(a);
w = mapping(mfilename,'trained',{v,alf},getlabels(vv),k,c);
w = setname(w,'Weighted Voting');
else % testing
alf = v.data{2}; % get the weights
u = v.data{1}; % get the set of classifiers
m = size(a,1);
dtot = zeros(m,size(v,2));
if isparallel(u) % parallel combiner
n = 0;
for j=1:length(u.data) % weight them
vv = u.data{j};
aa = a(:,n+1:n+size(vv,1));
d = a(:,n+1:n+size(vv,1))*vv;
[dd,jj] = max(+d,[],2);
dd = zeros(size(dtot));
dd([1:m]'+(jj-1)*m) = alf(j);
dtot = dtot + dd;
n = n+size(vv,1);
end
elseif isstacked(u) % stacked combiner
for j=1:length(u.data) % weight them
vv = u.data{j};
d = a*vv;
[dd,jj] = max(+d,[],2);
dd = zeros(size(dtot));
dd([1:m]'+(jj-1)*m) = alf(j);
dtot = dtot + dd;
end
else
error('Classifier combination expected')
end
w = setdat(d,dtot);
end
else % store classifier from given weights
ismapping(a);
u = v; % the weights
v = a; % the combined classifier
n = length(v.data);
if length(u) ~= n
error('Wrong number of weights given')
end
[k,c] = getsize(v.data{1});
w = mapping(mfilename,'trained',{v,u},getlabels(v{1}),k,c);
w = setname(w,'Weighted Voting');
end
|
github
|
jacksky64/imageProcessing-master
|
im_mean.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/im_mean.m
| 1,216 |
utf_8
|
2bd973e381a00889d4869daeb84c6e27
|
%IM_MEAN Computation of the centers of gravity of images
%
% B = IM_MEAN(A)
% B = A*IM_MEAN
%
% INPUT
% A Dataset with object images dataset (possibly multi-band)
%
% OUTPUT
% B Dataset with centers-of-gravity replacing images
% (possibly multi-band). The first component is always measured
% in the horizontal direction (X).
%
% DESCRIPTION
% Computes the centers of gravity of all images stored in A.
% This center is computed, relative to the top-left corner of the image.
%
% SEE ALSO
% DATASETS, DATAFILES
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function b = im_mean(a)
prtrace(mfilename);
if nargin < 1 | isempty(a)
b = mapping(mfilename,'fixed');
b = setname(b,'Image centers');
elseif isa(a,'dataset') % allows datafiles too
isobjim(a);
b = filtim(a,mfilename);
elseif isa(a,'double') % here we have a single image
if isa(a,'dip_image'), a = double(a); end
[m,n] = size(a);
g = sum(sum(a));
JX = repmat([1:n],m,1);
JY = repmat([1:m]',1,n);
mx = sum(sum(a.*JX))/g;
my = sum(sum(a.*JY))/g;
b = [mx/n my/m];
end
return
|
github
|
jacksky64/imageProcessing-master
|
preig.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/preig.m
| 419 |
utf_8
|
3ab159964b67953da77039eaf03ca236
|
%PREIG Call to EIG() including PRWAITBAR
%
% [E,D] = PREIG(A)
%
% This calls [E,D] = EIG(A) and includes a message to PRWAITBAR
% in case of a large A
function [E,D] = preig(A)
[m,n] = size(A);
if min([m,n]) > 500
prwaitbaronce('Computing %i x %i eigenvectors ...',[m,n]);
if nargout < 2
E = eig(A);
else
[E,D] = eig(A);
end
prwaitbar(0);
else
if nargout == 1
E = eig(A);
else
[E,D] = eig(A);
end
end
|
github
|
jacksky64/imageProcessing-master
|
mlrc.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/mlrc.m
| 3,609 |
utf_8
|
abbaafe79e6764055df8ab5351d3f24d
|
% MLRC Muli-response Linear Regression Combiner
%
% W = A*(WU*MLRC)
% W = WT*MLRC(B*WT)
% D = C*W
%
% INPUT
% A Dataset used for training base classifiers as well as combiner
% B Dataset used for training combiner of trained base classifiers
% C Dataset used for testing (executing) the combiner
% WU Set of untrained base classifiers, see STACKED
% WT Set of trained base classifiers, see STACKED
%
% OUTPUT
% W Trained Muli-response Linear Regression Combiner
% D Dataset with prob. products (over base classifiers) per class
%
% DESCRIPTION
% Using dataset A that contains the posterior probabilities of each instance
% belonging to each class predicted by the base classifiers to train a
% multi-response linear regression combiner.
% If the original classification problem has K classes, it is converted
% into K seperate regression problems, where the problem for class c has
% instances with responses equal to 1 when they have label c and zero
% otherwise. Put in another way, this function establish a multi-response
% linear regression model for each class and utilize these models to estimate
% the probability that the instances belong to each class.
% Note that in the model for class c, only the probabilities of class c
% predicted by the set of base classifiers are used.
%
% REFERENCE
% 1. Ting, KM, Witten IH. Issues in stacked generalization, Journal of
% Artificial Intelligent Research, 1999, 10: 271-289.
% 2. Dzeroski S, Zenko B. Is combining classifiers with stacking better
% than selecting the best one? Machine Learning, 2004, 54(3): 255-273.
%
% SEE ALSO
% DATASETS, MAPPINGS, STACKED, CLASSC, TESTD, LABELD
% Copyright: Chunxia Zhang, R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function W = mlrc(A)
prtrace(mfilename);
name = 'MLR combiner';
% If there are no inputs, return an untrained mapping.
% Handle untrained calls like MLRC([])
if nargin < 1 || isempty(A)
W = mapping(mfilename);
W = setname(W,name);
return
end
islabtype(A,'crisp'); % allow crisp labels only
isvaldfile(A,1,2); % at least one object per class and 2 classes
A = testdatasize(A,'features'); % test whether they fit
A = setprior(A,getprior(A)); % avoid many warnings
[m,k,c] = getsize(A); % size of training set; (m objects; k features; c classes)
L = k/c; % compute the number of classifiers
A = setfeatlab(A,repmat([1:c]',L,1)); % reset the feature labels of dataset A such that the first c features correspond to
% the first classifier, the next c features correspond to the second classifier, ect.
C = zeros(c*L,c); % register the coefficients of each model
options = optimset('ToLX',1e-4);
for i = 1:c % run over all classes
Res = zeros(m,1);
Index = find(A.featlab == i); % find the indices correspond to the jth class
B = seldat(A,[],Index); % select the data corresponding to the jth class(m x L matrix)
I = A.nlab == i;
Res(I) = 1;
[x,resnorm,residual,exitflag] = lsqnonneg(B.data,Res,[],options); % compute the nonnegative coefficients
if exitflag == 0
resnorm
end
for j = 1:L
C((j-1)*c+i,i) = x(j);
end
end
W = affine(C,[],A,getlablist(A),k,c);
W = setname(W,name);
return
|
github
|
jacksky64/imageProcessing-master
|
obj2feat.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/obj2feat.m
| 421 |
utf_8
|
a2aea409c80661f5c8384fd5c33e10b6
|
%OBJ2FEAT Transform object images to feature images in dataset
%
% B = OBJ2FEAT(A)
%
% INPUT
% A Dataset with object images, possible with multiple bands.
%
% OUTPUT
% B Dataset with features images.
%
% SEE ALSO
% DATASETS, IM2OBJ, IM2FEAT, DATA2IM, FEAT2OBJ
function b = obj2feat(a)
prtrace(mfilename);
isdataset(a)
isobjim(a);
im = data2im(a);
b = im2feat(im);
|
github
|
jacksky64/imageProcessing-master
|
minc.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/minc.m
| 1,724 |
utf_8
|
16990543f698f89a59801ae3c4ca10d9
|
%MINC Minimum combining classifier
%
% W = MINC(V)
% W = V*MINC
%
% INPUT
% V Set of classifiers
%
% OUTPUT
% W Minimum combining classifier on V
%
% DESCRIPTION
% If V = [V1,V2,V3, ... ] is a set of classifiers trained on the
% same classes and W is the minimum combiner: it selects the class
% with the minimum of the outputs of the input classifiers. This
% might also be used as A*[V1,V2,V3]*MINC in which A is a dataset to
% be classified. Consequently, if S is a dissimilarity matrix with
% class feature labels (e.g. S = A*PROXM(A,'d')) then S*MINC*LABELD
% is the nearest neighbor classifier.
%
% If it is desired to operate on posterior probabilities then the
% input classifiers should be extended like V = V*CLASSC;
%
% The base classifiers may be combined in a stacked way (operating
% in the same feature space by V = [V1,V2,V3, ... ] or in a parallel
% way (operating in different feature spaces) by V = [V1;V2;V3; ... ]
%
% SEE ALSO
% MAPPINGS, DATASETS, VOTEC, MAXC, MEANC, MEDIANC, PRODC,
% AVERAGEC, STACKED, PARALLEL
%
% EXAMPLES
% See PREX_COMBINING
% Copyright: R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: minc.m,v 1.2 2006/03/08 22:06:58 duin Exp $
function w = minc(p1)
type = 'min'; % define the operation processed by FIXEDCC.
% define the name of the combiner.
% this is the general procedure for all possible calls of fixed combiners
% handled by FIXEDCC
name = 'Minimum combiner';
if nargin == 0
w = mapping('fixedcc','combiner',{[],type,name});
else
w = fixedcc(p1,[],type,name);
end
if isa(w,'mapping')
w = setname(w,name);
end
return
|
github
|
jacksky64/imageProcessing-master
|
knnr.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/knnr.m
| 986 |
utf_8
|
6f361a2a60209246c49a0820cef1a076
|
%KNNR Nearest neighbor regression
%
% Y = KNNR(X,K)
%
% INPUT
% X Regression dataset
% K number of neighbors (default K=3)
%
% OUTPUT
% Y k-nearest neighbor regression
%
% DESCRIPTION
% Define a k-Nearest neighbor regression on dataset X.
%
% SEE ALSO
% LINEARR, TESTR, PLOTR
% Copyright: D.M.J. Tax, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function y = knnr(x,k)
if nargin<2
k = 3;
end
if nargin<1 | isempty(x)
y = mapping(mfilename,{k});
y = setname(y,'k-nearest neighbor regression');
return
end
if ~ismapping(k) %training
[n,d] = size(x);
W.x = +x;
W.y = gettargets(x);
W.k = k;
y = mapping(mfilename,'trained',W,1,d,1);
y = setname(y,'k-nearest neighbor regression');
else
% evaluation
w = getdata(k);
[n,d] = size(x);
D = distm(+x,w.x);
[sD,I] = sort(D,2);
if n==1
out = mean(w.y(I(:,1:w.k)));
else
out = mean(w.y(I(:,1:w.k)),2);
end
y = setdat(x,out);
end
|
github
|
jacksky64/imageProcessing-master
|
kmeans.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/kmeans.m
| 3,529 |
utf_8
|
b75218508c7fee80e8ca7e0ba1ced02a
|
%KMEANS k-means clustering
%
% [LABELS,A] = KMEANS(A,K,MAXIT,INIT,FID)
%
% INPUT
% A Matrix or dataset
% K Number of clusters to be found (optional; default: 2)
% MAXIT maximum number of iterations (optional; default: 50)
% INIT Labels for initialisation, or
% 'rand' : take at random K objects as initial means, or
% 'kcentres' : use KCENTRES for initialisation (default)
% FID File ID to write progress to (default [], see PRPROGRESS)
%
% OUTPUT
% LABELS Cluster assignments, 1..K
% A Dataset with original data and labels LABELS
%
% DESCRIPTION
% K-means clustering of data vectors in A.
%
% SEE ALSO
% DATASETS, HCLUST, KCENTRES, MODESEEK, EMCLUST, PRPROGRESS
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: kmeans.m,v 1.7 2008/07/03 09:08:43 duin Exp $
function [assign,a] = kmeans(a,kmax,maxit,init,fid)
prtrace(mfilename);
n_ini = 100; % Maximum size of subset to use for initialisation.
if (nargin < 2) | isempty(kmax)
kmax = 2;
%prwarning(3,'No K supplied, assuming K = 2.');
end
if nargin < 3 | isempty(maxit)
maxit = 50;
end
if nargin < 4 | isempty(init)
init = 'kcentres';
end
if nargin < 5
fid = [];
end
% Create dataset with all equal labels and no priors.
m = size(a,1);
a = dataset(a);
islabtype(a,'crisp');
a=set(a,'labels',ones(m,1),'lablist',[1:kmax]','prior',[]); % for speed
n_ini = max(n_ini,kmax*5); % initialisation needs sufficient samples
prwaitbar(maxit,'k-means clustering');
% Initialise by performing KCENTRES on...
if (size(init,1) == 1) & strcmp(init,'kcentres') & (m > n_ini)
%prwarning(2,'Initializing by performing KCENTRES on subset of %d samples.', n_ini);
b = +gendat(a,n_ini); % ... a random subset of A.
d = +distm(b);
assign = kcentres(d,kmax,[]);
bb = setprior(dataset(b,assign),0);
w = nmc(bb); % Initial partition W and assignments ASSIGN.
elseif (size(init,1) == 1) & strcmp(init,'kcentres')
%prwarning(2,'Initializing by performing KCENTRES on training set.');
d = +distm(a); % ... the entire set A.
assign = kcentres(d,kmax,[]);
aa = setprior(dataset(a,assign),0);
w = nmc(aa); % mapping trained on the complete dataset
elseif (size(init,1) == 1) & strcmp(init,'rand')
%prwarning(2,'Initializing by randomly selected objects');
R = randperm(m);
w = nmc(dataset(a(R(1:kmax),:),[1:kmax]')); % mapping trained on kmax random samples
elseif (size(init,1) == m)
assign = renumlab(init);
kmax = max(assign);
%prwarning(2,'Initializing by given labels, k = %i',kmax);
w = nmc(dataset(a,assign));
else
error('Wrong initialisation supplied')
end
assign = labeld(a*w);
a = dataset(a,assign);
a = setprior(a,0);
%tmp_assign = zeros(m,1); % Allocate temporary array.
% Main loop, while assignments change
it=1; % number of iterations
ndif = 1;
while (it<maxit) & (ndif > 0)
prwaitbar(maxit,it);
tmp_assign = assign; % Remember previous assignments.
a = setnlab(a,assign);
%J = find(classsizes(a) == 0);
w = a * nmc; % Re-partition the space by assigning samples to nearest mean.
assign = a * w * labeld; % Re-calculate assignments.
it = it+1; % increase the iteration counter
ndif = sum(tmp_assign ~= assign);
end
prwaitbar(0);
if it>=maxit
%prwarning(1,['No convergence reached before the maximum number of %d iterations passed. ' ...
%'The last result was returned.'], maxit);
end
return;
|
github
|
jacksky64/imageProcessing-master
|
im_norm.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/im_norm.m
| 974 |
utf_8
|
108b2fee72ae6fd49c5ae1efc8afac78
|
%IM_NORM Mapping for normalizing images: mean, variance
%
% B = IM_NORM(A)
% B = A*IM_NORM
%
% INPUT
% A Dataset or datafile
%
% OUTPUT
% B Dataset or datafile
%
% DESCRIPTION
% The objects stored as images in the dataset or datafile A are normalised
% w.r.t. their mean (0) and variance (1).
%% SEE ALSO
% MAPPINGS, DATASETS, DATAFILES, IM2OBJ, DATA2IM
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function b = im_norm(a)
if nargin < 1
a = [];
end
if isempty(a)
b = mapping(mfilename,'fixed');
b = setname(b,'image normalisation');
elseif isa(a,'dataset') % allows datafiles too
isobjim(a);
b = filtim(a,mfilename);
elseif isa(a,'double') | isa(a,'dip_image') % here we have a single image
b = double(a);
u = mean(b(:));
v = var(b(:));
b = (b-u)./sqrt(v);
else
error('Datatype not supported')
end
|
github
|
jacksky64/imageProcessing-master
|
logdens.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/logdens.m
| 1,743 |
utf_8
|
f19d2f4edeb5a881769faebd6e7e4b2c
|
%LOGDENS Force density based classifiers to use log-densities
%
% V = LOGDENS(W)
% V = W*LOGDENS
%
% INPUT
% W Density based trained classifier
%
% OUTPUT
% V Log-density based trained classifier
%
% DESCRIPTION
% Density based classifiers suffer from a low numeric accuracy in the tails
% of the distributions. Especially for overtrained or high dimensional
% classifiers this may cause zero-density estimates for many test samples,
% resulting in a bad performance. This can be avoided by computing
% log-densities offered by this routine. This works for all classifiers
% based on normal distributions (e.g. LDC, QDC, MOGC) and Parzen estimates
% (PARZENC, PARZENDC). The computation of log-densities is , in order to be
% effective, combined with a normalisation, resulting in posterior
% distributions. As a consequence, the possibility to output densities is
% lost.
%
% SEE ALSO
% MAPPINGS, LDC, UDC, QDC, MOGC, PARZENC, PARZENDC, NORMAL_MAP, PARZEN_MAP,
% CLASSC
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function v = logdens(w)
prtrace(mfilename);
if (nargin < 1 | isempty(w))
v = mapping(mfilename,'combiner');
return
else
mfile = getmapping_file(w);
if issequential(w)
wdata = getdata(w);
wdata{end} = feval(mfilename,wdata{end});
v = setdata(w,wdata);
elseif isuntrained(w)
v = mapping('sequential','untrained',{w,logdens});
else
if (~strcmp(mfile,'normal_map') & ~strcmp(mfile,'parzen_map'))
error('LOGDENS can only be applied to normal densities and Parzen estimators')
end
v = classc(w);
end
end
return
|
github
|
jacksky64/imageProcessing-master
|
plsr.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/plsr.m
| 2,956 |
utf_8
|
5ca665d50587aa96474a51fd0a120597
|
% PLSR Partial Least Squares Regression
%
% W = PLSR
% W = PLSR([],MAXLV,METHOD)
%
% [W, INFORM] = PLSR(A,MAXLV,METHOD)
%
% INPUT
% A training dataset
% MAXLV maximal number of latent variables (will be corrected
% if > rank(A));
% MAXLV=inf means MAXLV=min(size(A)) -- theoretical
% maximum number of LV;
% by default = inf
% METHOD 'NIPALS' or 'SIMPLS'; by default = 'SIMPLS'
%
% OUTPUT
% W PLS feature extraction mapping
% INFORM extra algorithm output
%
% DESRIPTION
% PRTools Adaptation of PLS_TRAIN/PLS_APPLY routines. No preprocessing
% is done inside this mapping. It is the user responsibility to train
% preprocessing on training data and apply it to the test data.
%
% Crisp labels will be converted into soft labels which will be used as
% a target matrix.
%
% In order to do regression with the smaller number of latent variables
% than the number of LV's selected during trainig do
% d = w.data;
% d.n = new_n;
% w.data = d;
%
% SEE ALSO
% PLS_TRAIN, PLS_TRANSFORM, PLS_APPLY
% Copyright: S.Verzakov, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: plsr.m,v 1.1 2007/08/28 11:00:39 davidt Exp $
%
function [w,inform]=plsm(par1,par2,par3)
% No dataset given: return untrained mapping.
if (nargin < 1) | (isempty(par1))
if nargin < 2
par2 = inf;
end
if nargin < 3
par3 = 'SIMPLS';
end
data = {par2,par3};
w = mapping(mfilename,'untrained',data);
w = setname(w,'Partial Least Squares Regression');
return
end
isdataset(par1); % Assert that A is a dataset.
% training
if nargin < 2 | ~isa(par2,'mapping')
% a*w when w is untrained or
if nargin < 2
par2 = inf;
end
if nargin < 3
par3 = 'SIMPLS';
end
maxLV = par2;
method = par3;
if strcmp(par1.labtype,'crisp')
y=gettargets(setlabtype(par1,'soft'));
else
y=gettargets(par1);
end
% options
Options.maxLV = maxLV;
Options.method = method;
Options.X_centering=[];
Options.Y_centering=[];
Options.X_scaling=[];
Options.Y_scaling=[];
[B,XRes,YRes,Options]=pls_train(+par1,y,Options);
clear B
data.n=Options.maxLV;
data.R=XRes.R;
data.C=YRes.C;
data.Options=Options;
% Save all useful data.
w = mapping(mfilename,'trained',data,[],size(XRes.R,1),size(YRes.C,1));
w = setname(w,'Partial Least Squares Mapping');
if nargout > 1
inform.XRes=XRes;
inform.YRes=YRes;
end
% execution
else
data = getdata(par2); % Unpack the mapping.
if data.n > size(data.R,2)
ErrMsg = sprintf(['PLS: The nubmer of LV(s) asked (%d) is greater than\n'...
'the number of LV(s) available (%d)'],data.n,size(data.R,2));
error(ErrMsg);
end
Y = pls_apply(+par1,data.R(:,1:data.n)*data.C(:,1:data.n)',data.Options);
w = setdat(par1,Y,par2);
end
return
|
github
|
jacksky64/imageProcessing-master
|
im_select_blob.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/im_select_blob.m
| 935 |
utf_8
|
e1e9f866b26bbf9e94a36e83d99d1fe3
|
%IM_SELECT_BLOB Select largest blob in binary images in dataset (DIP_Image)
%
% B = IM_SELECT_BLOB(IM)
%
% Just the largest object in the image is returned.
%
% SEE ALSO
% DATASETS, DATAFILES, DIP_IMAGE
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function b = im_select_blob(a)
prtrace(mfilename);
if nargin < 1 | isempty(a)
b = mapping(mfilename,'fixed');
b = setname(b,'Select largest blob');
elseif isa(a,'dataset') % allows datafiles too
isobjim(a);
b = filtim(a,mfilename);
b = setfeatsize(b,getfeatsize(a));
elseif isa(a,'double') | isa(a,'dip_image') % here we have a single image
if ~isa(a,'dip_image')
a = dip_image(a,'bin');
end;
labim = label(a);
c = measure(labim,labim,{'size','mean'});
c = double(c);
[cc,ind] = max(c(:,1));
b = a.*(labim==round(c(ind,2)));
end
return
|
github
|
jacksky64/imageProcessing-master
|
featrank.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/featrank.m
| 1,548 |
utf_8
|
1b6fbcb41238f457e3235100517ed770
|
%FEATRANK Feature ranking on individual performance for classification
%
% [I,F] = FEATRANK(A,CRIT,T)
%
% INPUT
% A input dataset
% CRIT string name of a method or untrained mapping
% T validation dataset (optional)
%
% OUTPUT
% I vector with sorted feature indices
% F vector with criteria values
%
% DESCRIPTION
% Feature ranking based on the training dataset A. CRIT determines
% the criterion used by the feature evaluation routine feateval. If
% the dataset T is given, it is used as test set for feateval. In I
% the features are returned in decreasing performance. In F the
% corresponding values of feateval are given. Default: crit='NN'.
%
% SEE ALSO
% MAPPINGS, DATASETS, FEATEVAL, FEATSELO, FEATSELB, FEATSELF,
% FEATSELP, FEATSELM
% Copyright: R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: featrank.m,v 1.3 2007/04/21 23:06:46 duin Exp $
function [I,F] = featrank(a,crit,t)
prtrace(mfilename);
[m,k,c] = getsize(a);
F = zeros(1,k);
if nargin < 3
t = [];
end
if nargin < 2
crit = 'NN';
prwarning(4,'set criterion to NN');
end
isvaldfile(a,1,2); % at least 1 object per class, 2 classes
a = testdatasize(a);
iscomdset(a,t);
if isempty(t)
for j = 1:k
F(j) = feateval(a(:,j),crit);
end
else
% run the criterion on the validation set
for j = 1:k
F(j) = feateval(a(:,j),crit,t(:,j));
end
end
[F,I] = sort(-F);
F = -F;
return
|
github
|
jacksky64/imageProcessing-master
|
udc.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/udc.m
| 1,305 |
utf_8
|
5f7a31ca4be7e6246f97f82f5ef2c63d
|
%UDC Uncorrelated normal based quadratic Bayes classifier (BayesNormal_U)
%
% W = UDC(A)
% W = A*UDC
%
% INPUT
% A input dataset
%
% OUTPUT
% W output mapping
%
% DESCRIPTION
% Computation a quadratic classifier between the classes in the
% dataset A assuming normal densities with uncorrelated features.
%
% The use of probabilistic labels is supported. The classification A*W is
% computed by normal_map.
%
% EXAMPLES
% PREX_DENSITY
%
% SEE ALSO
% MAPPINGS, DATASETS, NMC, NMSC, LDC, QDC, QUADRC, NORMAL_MAP
% Copyright: R.P.W. Duin, [email protected]
% Faculty of Applied Physics, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: udc.m,v 1.6 2007/06/05 12:45:44 duin Exp $
function W = udc(a)
prtrace(mfilename);
if nargin == 0
W = mapping(mfilename);
W = setname(W,'Bayes-Normal-U');
return
end
islabtype(a,'crisp','soft');
isvaldfile(a,2,2); % at least 2 objects per class, 2 classes
[m,k,c] = getsize(a);
[U,G] = meancov(a); %computing mean and covariance matrix
p = getprior(a);
for j = 1:c
G(:,:,j) = diag(diag(G(:,:,j)));
end
w.mean = +U;
w.cov = G;
w.prior = p;
%W = mapping('normal_map','trained',w,getlab(U),k,c);
W = normal_map(w,getlab(U),k,c);
W = setname(W,'Bayes-Normal-U');
W = setcost(W,a);
return
|
github
|
jacksky64/imageProcessing-master
|
naivebc.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/naivebc.m
| 5,420 |
utf_8
|
02e1f3a2851e88570a15d684978299f5
|
%NAIVEBC Naive Bayes classifier
%
% W = NAIVEBC(A,N)
% W = A*NAIVEBC([],N)
%
% W = NAIVEBC(A,DENSMAP)
% W = A*NAIVEBC([],DENSMAP)
%
% INPUT
% A Training dataset
% N Scalar number of bins (default: 10)
% DENSMAP Untrained mapping for density estimation
%
% OUTPUT
% W Naive Bayes classifier mapping
%
% DESCRIPTION
% The Naive Bayes Classifier estimates for every class and every feature
% separately. Total class densities are constructed by assuming
% independency and consequently multiplying the separate feature densities.
%
% The default version divides each axis into N bins, counts the number of
% training examples for each of the classes in each of the bins, and
% classifies the object to the class that gives maximum posterior
% probability. Missing values will be put into a separate bin.
%
% This routine assumes continuous data. It may be applied to discrete data
% in case all features have the same number of discrete values. For proper
% results the parameter N should be set to this number.
%
% If N is NaN it is optimised by REGOPTC.
%
% Alternatively an untrained mapping DENSMAP may be supplied that will be
% used to estimate the densities per class and per features separately.
% Examples are PARZENM and GAUSSM.
%
% SEE ALSO
% DATASETS, MAPPINGS, PARZENM, GAUSSM, UDC, QDC, PARZENC, PARZENDC, REGOPTC
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: naivebc.m,v 1.5 2007/06/15 09:58:30 duin Exp $
function w = naivebc(a,arg2)
if (nargin < 2)
prwarning (2,'number of bins not specified, assuming 10.');
arg2 = 10;
end
% No arguments given: return untrained mapping.
if (nargin < 1) | (isempty(a))
w = mapping(mfilename,{arg2});
w = setname(w,'Naive Bayes');
return
end
if (~ismapping(arg2)) % Second argument is not a mapping: training.
N = arg2;
if isnan(N) % optimize complexity parameter
defs = {10};
parmin_max = [2,50];
w = regoptc(a,mfilename,{N},defs,[1],parmin_max,testc([],'soft'),0);
else
islabtype(a,'crisp');
isvaldfile(a,1,2); % at least 2 object per class, 2 classes
a = testdatasize(a);
[m,k,c] = getsize(a); M = classsizes(a);
% Train the mapping. First, find the scale and offset of the data
% and normalise (this is very non-robust, but ok...)
offset_a = min(a); maxa = max(a); scale_a = maxa - offset_a;
K = find(scale_a~=0);
% if(any(scale_a==0))
% prwarning (2,'one of the features has the same value for all data; scale change to realmin');
% scale_a(scale_a==0) = realmin;
% end
a = a - repmat(offset_a,m,1);
a = a ./ repmat(scale_a,m,1);
% P will contain the probability per bin per class, P0 the probability
% per class. The highest and lowest bounds will not be used; the lowest
% bound will be used to store the missing values.
p = zeros(N+1,k,c);
% Count the number of objects for each of the classes.
for i = 1:c
Ic = findnlab(a,i); % Extract one class.
Ia = ceil(N*+(a(Ic,K))); % Find out in which bin it falls.
Ia(Ia<1) = 1; Ia(Ia>N) = N; % Sanity check.
for j=1:N
p(j,K,i) = sum(Ia==j); % Count for all bins.
end
p(N+1,K,i) = sum(~isnan(+a(Ic,K))); % The missing values.
% Use Bayes estimators are used, like elsewhere in PRTools.
p(:,K,i) = (p(:,K,i)+1) / (M(i)+N); % Probabilities.
p(:,K,i) = p(:,K,i) ./ repmat(scale_a(K)/N,N+1,1); % Densities.
end
% Save all useful data.
pars.p0 = getprior(a); pars.p = p; pars.N = N;
pars.offset_a = offset_a; pars.scale_a = scale_a;
pars.feats = K;
w = mapping(mfilename,'trained',pars,getlablist(a),k,c);
w = setname(w,'Naive Bayes');
w = setcost(w,a);
end
elseif isuntrained(arg2) % train given untrained mapping, e.g. parzen
[m,k,c] = getsize(a);
v = cell(c,k);
for i=1:c
b = seldat(a,i);
for j=1:k
v{i,j} = b(:,j)*arg2;
end
end
pars.dens = v;
pars.p0 = getprior(a);
w = mapping(mfilename,'trained',pars,getlablist(a),k,c);
w = setname(w,'Naive Bayes');
w = setcost(w,a);
else % Second argument is a mapping: testing.
w = arg2;
pars = getdata(w); % Unpack the mapping.
[m,k] = getsize(a);
if isfield(pars,'dens')
v = pars.dens;
[c,k] = size(v);
out = zeros(size(a,1),c);
for i=1:c
for j=1:k
out(:,i) = out(:,i) + +log(a(:,j)*v{i,j});
end
end
out = exp(out);
else
c = length(pars.p0); % Could also use size(w.labels,1)...
K = pars.feats; % relevant features
% Shift and scale the test set.
a(:,K) = a(:,K) - repmat(pars.offset_a(K),m,1);
a(:,K) = a(:,K) ./ repmat(pars.scale_a(K),m,1);
% Classify the test set. First find in which bins the objects fall.
Ia = ceil(pars.N*+(a(:,K)));
Ia(Ia<1) = 1; Ia(Ia>pars.N) = pars.N; % Sanity check.
% Find the class probability for each object for each feature
out = zeros(m,length(K),c);
for i=1:length(K)
out(:,i,:) = pars.p(Ia(:,i),K(i),:);
end
% Multiply the per-feature probs.
out = squeeze(prod(out,2));
if m == 1
out = out';
end
end
% Weight with class priors
out = out .* repmat(pars.p0,m,1);
% Done!
w = setdat(a,out,w);
end
return
|
github
|
jacksky64/imageProcessing-master
|
im_profile.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/im_profile.m
| 1,839 |
utf_8
|
4ac4ef7c3a6157021c8a985dd9cd1c34
|
%IM_PROFILE Computation of horizontal and vertical image profile
%
% P = IM_PROFILE(A,NX,NY)
% P = A*IM_PROFILE([],NX,NY)
%
% INPUT
% A Dataset with object images dataset (possibly multi-band)
% NX Number of bins for horizontal profile
% NY Number of bins for vertical profile
%
% OUTPUT
% P Dataset with profiles replacing images (possibly multi-band)
%
% DESCRIPTION
% Computes for the images in A a 1*(NX+NY) vector P storing the horizontal and
% vertical profile (i.e. normalized image projections) given by NX respectively
% NY points. Because of the normalization holds sum(P) = 1.
% The computation is based on a bilinear interpolation of A to an image of NX*NY
% points. Consequently is the horizontal profile (P(1:NX)) slightly dependent
% on NY and the vertical profile (P(NX+1:NX+NY)) slightly dependent on NX.
% Defaults : NY = NX, NX = 16.
%
% SEE ALSO
% DATASETS, DATAFILES
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function b = im_profile(a,nx,ny)
prtrace(mfilename);
if nargin < 3, ny = []; end
if nargin < 2, nx = 16; end
if nargin < 1 | isempty(a)
b = mapping(mfilename,'fixed',{nx,ny});
b = setname(b,'Image profile');
elseif isa(a,'dataset') % allows datafiles too
isobjim(a);
b = filtim(a,mfilename,{nx,ny});
elseif isa(a,'double') | isa(a,'dip_image') % here we have a single image
if isa(a,'dip_image'), a = double(a); end
if isempty(ny)
ny = nx;
end
if any([nx,ny] < 4)
b = imresize(a,10*[ny,nx],'bilinear');
sx = sum(reshape(sum(b,1),10,nx),1);
sy = sum(reshape(sum(b,2)',10,ny),1);
s = sum(sx);
b = [sx sy]/s;
else
b = imresize(a,[ny,nx],'bilinear');
s = sum(b(:));
b = [sum(b,1) sum(b,2)']/s;
end
end
return
|
github
|
jacksky64/imageProcessing-master
|
plotf.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/plotf.m
| 2,216 |
utf_8
|
e952c5f78e0c02fd34f82ba6895f5e72
|
%PLOTF Plot feature distribution, special version
%
% h = PLOTF(A,N)
%
% Produces 1-D density plots for all the features in dataset A. The
% densities are estimated using PARZENML. N is the number of
% feature density plots on a row.
%
% See also DATASETS, PARZENML
% Copyright: R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: plotf.m,v 1.6 2009/11/13 08:54:18 davidt Exp $
function h_out = plotf(a,n)
%DXD make a standard setting for n, I'm getting crazy!
if nargin<2
n = 1;
end
prtrace(mfilename);
[m,k,c] = getsize(a);
% Define the color for each of the classes:
if c == 2
map = [0 0 1; 1 0 0];
else
map = hsv(c);
end
% Make subplots for each feature, so a grid of p x q subplots is
% defined
h = [];
if k >= n
p = ceil(k/n); q = n;
else
p = k; q = 1;
end
% Get the feature names
feats = getfeatlab(a,'string');
%DXD what happens here?!
if size(feats,2) == 1
feats = [repmat('Feature ',size(feats,1),1) feats];
end
if isempty(feats)
feats = num2str((1:k)');
end
% Make the plot for each of the features:
for j = 1:k
b = a(:,j);
s = zeros(1,c);
d = zeros(121,c);
bb = [-0.10:0.01:1.10]' * (max(b)-min(b)) + min(b);
ex = 0;
% Make a density estimate of each of the classes:
for i = 1:c
I = findnlab(a,i);
D = +distm(bb,b(I,:));
s(i) = parzenml(b(I,:));
% Compute the density function
d(:,i) = sum(exp(-D/(s(i).^2)),2)./(length(I)*s(i));;
end
% Create the subplots with the correct sizes:
subplot(p,q,j)
plot(bb,zeros(size(bb)),'w.');
hold on
h = [];
% Scatter the data and plot the density functions for each of the
% classes:
for i = 1:c
I = findnlab(a,i);
hh = plot(b(I),zeros(size(b(I))),'x',bb,+d(:,i));
set(hh,'color',map(i,:));
h = [h;hh];
end
legend(h(1:2:end)',num2str(getlablist(a))); %does not work properly
title([getname(a) ': ' feats(j,:)]);
V = axis;
axis([bb(1) bb(end) V(3) V(4)]);
set(gca,'xtick',[]);
set(gca,'ytick',[]);
xlabel(feats(j,:));
hold off
end
% The last details to take care of:
if k == 1, title(''); end
if nargout > 0
h_out = h;
end
return
|
github
|
jacksky64/imageProcessing-master
|
mds_init.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/mds_init.m
| 2,981 |
utf_8
|
d3873ccbbaa3a28678a465dd0a47b32b
|
%MDS_INIT Initialization for MDS (variants of Sammon) mapping
%
% Y = MDS_INIT (D,N,INIT)
%
% INPUT
% D Square dissimilarity matrix of the size M x M
% N Desired output dimensionality (optional; default: 2)
% INIT Initialization method (optional; default: 'randnp')
%
% OUTPUT
% Y Initial configuration for the MDS method
%
% DESCRIPTION
% Finds a configuration of points Y in an N-dimensional space, used as
% a starting configuration for an MDS mapping based on the distance matrix D.
% The parameter INIT is a string standing for the initialization method:
% 'randp' - linear mapping of D on n randomly (uniform distribution) chosen vectors
% 'randnp' - linear mapping of D on n randomly (normal distribution) chosen vectors
% 'randv' - randomly (uniform distribution) chosen vectors from D
% 'maxv' - n columns of D with the largest variances
% 'kl' - Karhunen Loeve projection (linear mapping) of D (first n eigenvectors)
% 'cs' - Classical Scaling
%
% SEE ALSO
% MAPPINGS, MDS_CS, MDS
%
% Undocumented use: if W is an already trained MDS, then for adding new points Dnew
% W = mds_init(W,Ystart)
% would assign Ystart as an initial configuration for he new MDS.
%
%
% Copyright: Elzbieta Pekalska, [email protected], 2000-2003
% Faculty of Applied Sciences, Delft University of Technology
%
function [Y,w] = mds_init(D,n,initm)
w = [];
if isa(D,'mapping') % assign an initial configuration for the new MDS
ww = getdata(D);
if size(n,2) ~= size(ww{1},2),
error('The dimensionality of the MDS map and the initial configuration does not match.')
end
if nargin < 3,
;
end
ww{6} = n;
Y = setdata(D,ww);
return
else
if nargin < 2, n = 2; end
if nargin < 3, initm = 'randnp'; end
if isa(D,'dataset'),
lab = getlab(D);
D=+D;
end
D = remove_nan(D);
[m,mm] = size(D);
w = [];
switch initm
case 'randp',
Y = D * rand(mm,n)/10;
case 'randnp',
Y = D * randn(mm,n)/10;
case 'randv',
Y = rand(m,n);
case 'maxv',
U = std(D);
[V,I] = sort(-U);
Y = D(:,I(1:n));
case 'kl',
options.disp = 0;
[E,L] = eigs(prcov(+D),n,'lm',options);
Y = D * E/100;
case 'cs',
if length(n) == 1 & m == mm,
w = mds_cs(D,n);
Y = D*w;
else
error('The CS initialization is not possible');
end
otherwise,
error('The possible initializations are: randp, randnp, randv, maxv, cs or kl.');
end
end
return;
function D = remove_nan(D)
% Remove all appearing NaN's by replacing them by the nearest neighbors.
%
[m,mm] = size(D);
nanindex = find(isnan(D(:)));
if ~isempty(nanindex),
for i=1:m
K = find(isnan(D(i,:)));
I = 1:mm;
[md,kk] = min(D(i,:));
if md < eps,
I(kk) = [];
end
D(i,K) = min(D(i,I));
end
% check whether D has a zero diagonal
if length(intersect(find(D(:) < eps), 1:m+1:(m*m))) >= m,
D = (D+D')/2;
end
end
|
github
|
jacksky64/imageProcessing-master
|
plotm.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/plotm.m
| 4,530 |
utf_8
|
af07489432cd67f10f4d42a6a96e2fc6
|
%PLOTM Plot mapping values, contours or surface
%
% H = PLOTM(W,S,N)
%
% INPUT
% W Trained mapping
% S Plot strings, or scalar selecting type of plot
% 1: density plot;
% 2: contour plot (default);
% 3: 3D surface plot;
% 4: 3D surface plot above 2D contour plot;
% 5; 3D mesh plot;
% 6: 3D mesh plot above 2D contour plot)
% N Contour level(s) to plot
% (default: 10 contours between minimum and maximum)
%
% OUTPUT
% H Array of graphics handles
%
% DESCRIPTION
% This routine, similar to PLOTC, plots contours (not just decision
% boundaries) of the mapping W on predefined axis, typically generated by
% SCATTERD. Plotstrings may be set in S. The vector N selects the contour.
%
% EXAMPLES
% See PREX_DENSITY
%
% SEE ALSO
% MAPPINGS, SCATTERD, PLOTC
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: plotm.m,v 1.4 2009/09/25 13:15:17 duin Exp $
function handle = plotm(w,arg2,n,cnd)
prtrace(mfilename);
ismapping(w); % Assert that W is a mapping.
% Get the parameters, the plotstrings and the number of contours.
if (nargin < 4)
cnd = 1;
end;
[k,c] = size(w);
if (nargin < 3)
n = [];
end
plottype = 2; s = [];
if (nargin >= 2)
if (~isstr(arg2) & ~isempty(arg2))
plottype = arg2;
else
s = arg2;
end
end
if plottype == 2 & size(w,1) == 1
plottype = 1;
end
if (nargin < 2) | (isempty(s))
col = 'brmk';
s = [col' repmat('-',4,1)];
s = char(s,[col' repmat('--',4,1)]);
s = char(s,[col' repmat('-.',4,1)]);
s = char(s,[col' repmat(':',4,1)]);
s = char(s,s,s,s);
end
% When one contour should be plotted, two entries have to be given in
% the contour plot (Matlab bug/feature).
%if (~isempty(n)) & (length(n) == 1), n = [n n]; end
% Setup the mesh-grid, use the axis of the currently active figure.
% Note: this will be a 0-1 grid in case of no given scatterplot.
hold on; V = axis;
gs = gridsize; dx = (V(2)-V(1))/gs; dy = (V(4)-V(3))/gs;
if (plottype == 1)
m = (gs+1); X = (V(1):dx:V(2))';
D = double([X,zeros(m,k-1)]*w);
else
m = (gs+1)*(gs+1); [X Y] = meshgrid(V(1):dx:V(2),V(3):dy:V(4));
D = double([X(:),Y(:),zeros(m,k-2)]*w);
end;
if (~cnd)
D = sum(D,2);
end;
% HH will contain all handles to graphics created in this routine.
hh = [];
% Plot the densities in case of 1D output.
if (plottype == 1)
for j = 1:size(D,2)
if (size(s,1) > 1), ss = s(j,:); else ss = s; end
% Plot the densities and add the handles to HH.
h = plot([V(1):dx:V(2)],D(:,j),deblank(ss));
hh = [hh; h];
end
axis ([V(1) V(2) 0 1.2*max(max(D))]);
ylabel('Density')
end
% Plot the contours in case of 2D output.
if (plottype == 2) | (plottype == 4) | (plottype == 6)
% Define the contour-heights if they are not given.
if (isempty(n))
n = 10;
dmax = max(D(:)); dmin = min(D(:)); dd = (dmax-dmin)/(n+1);
n = [dmin+dd:dd:dmax-dd];
end;
if length(n) == 1, n = [n n]; end
% Plot the contours for each of the classes.
for j = 1:size(D,2)
if (size(s,1) > 1), ss = s(j,:); else, ss = s; end
Z = reshape(D(:,j),gs+1,gs+1);
% Plot the contours and add the handles to HH.
[cc, h] = contour([V(1):dx:V(2)],[V(3):dy:V(4)],Z,n,deblank(ss));
hh = [hh; h];
end
view(2);
end
% Plot the surface in case of 3D output.
if (plottype == 3) | (plottype == 4) | (plottype == 5) | (plottype == 6)
% Scale the outputs to cover the whole colour range.
%E = D - min(D(:));
%E = 255*E/max(E(:))+1;
E = D; % Scaling appears disputable (RD)
if (c>1)
Z = reshape(sum(E,2),gs+1,gs+1);
else
Z = reshape(E(:,1),gs+1,gs+1);
end
if (plottype == 4) | (plottype == 6)
Z = Z + max(max(Z));
end;
% Plot the surface, set up lighting and add the handles to HH.
h = surf([V(1):dx:V(2)],[V(3):dy:V(4)],Z);
if (plottype == 3) | (plottype == 4)
colormap jet;
shading interp;
set(h,'FaceColor','interp','EdgeColor','none','FaceLighting','phong');
else
colormap white;
shading faceted;
end
view(-37.5,20);
camlight left; % Necessary to solve camlight bug?
camlight headlight;
camlight right;
hh = [hh; h];
end
hold off; if (nargout > 0), handle = hh; end
return
|
github
|
jacksky64/imageProcessing-master
|
datunif.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/datunif.m
| 1,690 |
utf_8
|
c63527e249a4fe53eaca6011b7f02698
|
%DATUNIF Apply uniform filter on images in a dataset
%
% B = DATUNIF(A,NX,NY)
%
% INPUT
% A Dataset containing images
% NX,NY Filtersize in X- and Y-direction (default: NY = NX)
%
% OUTPUT
% B Dataset with filtered images
%
% DESCRIPTION
% All images stored as objects (rows) or as features (columns) of
% dataset A are filtered with an NX*NY uniform filter and stored in
% dataset B. Image borders are mirrored before filtering.
%
% SEE ALSO
% DATASETS, DATAIM, IM2OBJ, IM2FEAT, DATGAUSS, DATFILT
% Copyright: R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: datunif.m,v 1.2 2006/03/08 22:06:58 duin Exp $
function a = datunif (a,nx,ny)
prtrace(mfilename);
% Check arguments.
if (nargin < 2)
error('Filter size should be specified.');
end
if (nargin < 3)
prwarning(4,'filter height (NY) not specified, assuming equal to NX');
ny = nx;
end
% Construct filter.
bordersize = floor(max(nx,ny)/2);
filter_x = ones(1,nx)/nx; filter_y = ones(1,ny)/ny;
% Convert dataset to image or image array.
im = data2im(a); [imheight,imwidth,nim] = size(im);
% Process all images...
for i = 1:nim
out = bord(im(:,:,i),NaN,bordersize); % Add mirrored border.
out = conv2(filter_y,filter_x,out,'same'); % Convolve with filter.
im(:,:,i) = resize(out,bordersize,imheight,imwidth);
% Crop back to original size.
end
% Place filtered images back in dataset.
if (isfeatim(a))
a = setdata(a,im2feat(im),getfeatlab(a));
else
a = setdata(a,im2obj(im),getfeatlab(a));
end
return
|
github
|
jacksky64/imageProcessing-master
|
regoptc.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/regoptc.m
| 5,370 |
utf_8
|
02e08726d2d767122b8a1c7c730e6aed
|
%REGOPTC Optimise regularisation and complexity parameters by crossvalidation
%
% [W,PARS] = REGOPTC(A,CLASSF,PARS,DEFS,NPAR,PAR_MIN_MAX,TESTFUN,REALINT)
%
% INPUT
% A Dataset, training set
% CLASSF Untrained classifiers (mapping)
% PARS Cell array with parameters for CLASSF
% DEFS Defaults for PARS
% NPAR Index in PARS of parameters to be optimised
% PAR_MIN_MAX Minimum and maximum values of the search interval for
% the parameters to be optimised
% TESTFUN Function computing the criterion value to be minimized
% REALINT 0/1 vector, indicating for every parameter in PARS whether
% it is real (1) or integer (0). Default: all real.
%
% OUTPUT
% W Best classifier, trained by A
% PARS Resulting parameter vector
%
% DESCRIPTIOM
% This routine is used inside classifiers and mappings to optimise a
% regularisation or complexity parameter. Using cross-validation the
% performance of the classifier is estimated using TESTFUN (e.g. TESTC).
% Matlab's FMINBND is used for the optimisation. Parameters are optimised
% one by one, in the order as supplied by NPAR.
%
% The final parameters PARS can also be retrieved by GETOPT_PARS. This is
% useful if W is optimised inside training a classifier that does not
% return these parameters in the output.
%
% EXAMPLE
% A = GENDATD([30 30],50);
% W = LDC(A,0,NaN);
% GETOPT_PARS
%
% SEE ALSO
% DATASETS, MAPPINGS, CROSSVAL, TESTC, GETOPT_PARS
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function [w,varargout] = regoptc(a,classf,parms,defs,regparnum,regparmin_max,testfunc,realint)
global REGOPT_NFOLDS REGOPT_REPS REGOPT_ITERMAX REGOPT_ITER REGOPT_OPTCRIT REGOPT_PARS
if isempty(REGOPT_NFOLDS), REGOPT_NFOLDS = 5; end
if isempty(REGOPT_REPS), REGOPT_REPS = 1; end
if isempty(REGOPT_ITERMAX), REGOPT_ITERMAX = 20; end
REGOPT_OPTCRIT = inf;
REGOPT_PARS = [];
isdataset(a);
isuntrained(feval(classf,[],parms{:}));
if nargin < 8, realint = ones(1,length(parms)); end
if nargin < 7, testfunc = testc([],'crisp'); end
% if (length(parms) ~= length(defs)) | (length(parms) < max(regparnum)) | ...
% (length(parms) ~= size(regparmin_max,1)) | (length(regparnum) ~= length(realint)) | ...
% (size(regparmin_max,2) ~= 2)
% error('Some parameters have wrong size')
% end
if (length(parms) ~= length(defs)) | (length(parms) < max(regparnum)) | ...
(length(parms) ~= size(regparmin_max,1)) | (length(parms) ~= length(realint)) | ...
(size(regparmin_max,2) ~= 2)
error('Some parameters have wrong size')
end
J = [];
K = zeros(1,length(parms));
for j=1:length(parms)
if ~isempty(parms{j}) & ~ismapping(parms{j}) & ~isstruct(parms{j}) & isnan(parms{j})
J = [J j];
K(j) = 1; % parameters to be optimised
end
end
parms(J) = defs(J); % store defaults (needed in case of optimal parameters)
matwarn = warning;
warning off
prwarn = prwarning;
prwarning(0);
prwaitbar(length(regparnum),'Parameter optimization');
for j=1:length(regparnum)
prwaitbar(length(regparnum),j);
n = regparnum(j);
if K(n)
regparmin = regparmin_max(n,1);
regparmax = regparmin_max(n,2);
if regparmin > 0 & regparmax > 0 & realint(n) % if interval positive and real
setlog = 1; % better to use logarithmic scaling
regparmin = log(regparmin);
regparmax = log(regparmax);
else
setlog = 0;
end
REGOPT_ITER = 0;
%if length(regparnum) == 1
prprogress([],' par optim: %i steps, %i folds: \n', ...
REGOPT_ITERMAX,REGOPT_NFOLDS);
%else
% prprogress([],'%i-par optim: %i, %i steps, %i folds: \n', ...
% length(regparnum),n,REGOPT_ITERMAX,REGOPT_NFOLDS);
%end
prwaitbar(REGOPT_ITERMAX,'Parameter optimization');
if realint(n) == 1
regpar = fminbnd(@evalregcrit,regparmin,regparmax, ...
optimset('Display','off','maxiter',REGOPT_ITERMAX), ...
classf,a,parms,n,setlog,REGOPT_NFOLDS,REGOPT_REPS,testfunc,1);
else
regpar = nfminbnd(@evalregcrit,regparmin,regparmax,REGOPT_ITERMAX, ...
classf,a,parms,n,setlog,REGOPT_NFOLDS,REGOPT_REPS,testfunc,0);
end
prwaitbar(0)
if setlog
parms{n} = exp(regpar);
else
parms{n} = regpar;
end
end
end
prwaitbar(0);
varargout = cell(1,nargout-1);
[w,varargout{:}] = feval(classf,a,parms{:});
REGOPT_PARS = parms;
warning(matwarn);
prwarning(prwarn);
return
function regcrit = evalregcrit(regpar,classf,a,parms,regparnum, ...
setlog,nfolds,reps,testfunc,realint);
global REGOPT_ITER REGOPT_OPTCRIT REGOPT_ITERMAX
REGOPT_ITER = REGOPT_ITER+1;
prwaitbar(REGOPT_ITERMAX,REGOPT_ITER);
if setlog
parms{regparnum} = exp(regpar);
else
parms{regparnum} =regpar;
end
if realint
prprogress([],' %i %5.3f %6.2e \n',REGOPT_ITER,REGOPT_OPTCRIT,parms{regparnum});
else
prprogress([],' %i %5.3f %i \n',REGOPT_ITER,REGOPT_OPTCRIT,parms{regparnum});
end
w = feval(classf,[],parms{:});
rand('state',1); randn('state',1);
regcrit = crossval(a,w,nfolds,reps,testfunc); % use soft error as criterion (more smooth)
REGOPT_OPTCRIT = min(mean(regcrit),REGOPT_OPTCRIT);
return
|
github
|
jacksky64/imageProcessing-master
|
gendatc.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/gendatc.m
| 2,506 |
utf_8
|
6b89a3ef6b64f2046f10c2d6190fd8cc
|
%GENDATC Generation of two spherical classes with different variances
%
% A = GENDATC(N,K,U,LABTYPE)
%
% INPUT
% N Vector with class sizes (default: [50,50])
% K Dimensionality of the dataset (default: 2)
% U Mean of class 1 (default: 0)
% LABTYPE 'crisp' or 'soft' labels (default: 'crisp')
%
% OUTPUT
% A Dataset
%
% DESCRIPTION
% Generation of a K-dimensional 2-class dataset A of N objects.
% Both classes are spherically Gaussian distributed.
%
% Class 1 has the identity matrix as covariance matrix and
% mean U. If U is a scalar then [U,0,0,..] is used as class mean.
% Class 2 has also the identity matrix as covariance matrix, except
% for a variance of 4 for the first two features. Its mean is 0.
% Class priors are P(1) = P(2) = 0.5.
%
% If N is a vector of sizes, exactly N(I) objects are generated
% for class I, I = 1,2.
%
% The default means result in a class overlap of 0.16.
%
% LABTYPE defines the desired label type: 'crisp' or 'soft'. In the
% latter case true posterior probabilities are set for the labels.
%
% Defaults: N = [50,50], K = 2, U = 0, LABTYPE = 'crisp'.
%
% SEE ALSO
% DATASETS, PRDATASETS
% Copyright: R.P.W. Duin, [email protected]
% Faculty of Applied Sciences, Delft University of Technology
% P.O. Box 5046, 2600 GA Delft, The Netherlands
% $Id: gendatc.m,v 1.3 2009/01/27 13:01:42 duin Exp $
function A = gendatc(N,k,ma,labtype)
prtrace(mfilename);
if nargin < 1
N = [50 50];
prwarning(4,'size of classes not specified, assuming [50 50]');
end
if nargin < 2
k=2;
prwarning(4,'dimension not specified, assuming 2');
end
if nargin < 3
ma=0;
prwarning(4,'mean not specified, assuming 0');
end
if nargin < 4
labtype = 'crisp';
prwarning(4,'label type not specified, assuming crisp');
end
p = [0.5 0.5];
N = genclass(N,p);
% When a scalar ma is given, the mean vector should be generated:
if (length(ma) == 1) & (k>1),
ma=[ma,zeros(1,k-1)];
end
GA = eye(k);
GB = eye(k); GB(1,1) = 9;
if k > 1, GB(2,2) = 9; end
mb = zeros(1,k);
U = dataset([ma;mb],[1 2]','prior',p);
A = gendatgauss(N,U,cat(3,GA,GB));
A = set(A,'name','Spherical Set');
% Take care for the different types of labels:
switch labtype
case 'crisp'
;
case 'soft'
W = nbayesc(U,cat(3,GA,GB));
targets = A*W*classc;
A = setlabtype(A,'soft',targets);
otherwise
error(['Label type ' labtype ' not supported'])
end
return
|
github
|
jacksky64/imageProcessing-master
|
gridsize.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/gridsize.m
| 1,279 |
utf_8
|
d7cf4b33766da4b039ee2ecd0d4dbf0a
|
%GRIDSIZE Set gridsize used in the plot commands
%
% O = GRIDSIZE(N)
%
% INPUT
% N New grid size (optional, default: display current gridsize)
%
% OUTPUT
% O New grid size (optional)
%
% DESCRIPTION
% The initial gridsize is 30, enabling fast plotting of PLOTC and PLOTM.
% This is, however, insufficient to obtain accurate graphs, for which a
% gridsize of at least 100 and preferably 250 is needed.
% Default: display or return the current gridsize.
%
% EXAMPLES
% See PREX_CONFMAT
%
% SEE ALSO
% PLOTC, PLOTM
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: gridsize.m,v 1.3 2006/12/19 12:10:06 duin Exp $
function out = gridsize(n)
prtrace(mfilename);
persistent CURRENT_GRIDSIZE;
% If the global variable was not yet initialised, set it to 30 (default).
if (isempty(CURRENT_GRIDSIZE))
prwarning(4,'initialising gridsize to 30');
CURRENT_GRIDSIZE = 30;
end
if (nargin < 1)
if (nargout == 0)
disp(['Gridsize is ' num2str(CURRENT_GRIDSIZE) ]);
end
else
if isstr(n)
n= str2num(n);
end
if isempty(n)
error('Illegal gridsize')
end
CURRENT_GRIDSIZE = n;
end
if (nargout > 0), out = CURRENT_GRIDSIZE; end
return
|
github
|
jacksky64/imageProcessing-master
|
gendatsinc.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/gendatsinc.m
| 912 |
utf_8
|
28f4043efaddf36174a9db40003229ae
|
%GENDATSINC Generate Sinc data
%
% A = GENDATSINC(N,SIGMA)
%
% INPUT
% N Number of objects to generate
% SIGMA Standard deviation of the noise (default SIGMA=0.1)
%
% OUTPUT
% A Regression dataset
%
% DESCRIPTION
%
% Generate the standard 1D Sinc data containing N objects, with Gaussian
% noise with standard deviation SIGMA.
%
% SEE ALSO
% GENDATR, GENDATLIN, GENDATSIN
% Copyright: D.M.J. Tax, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function a = gendatsinc(n,sig)
if nargin<2
sig = 0.1;
end
if nargin<1
n = 25;
end
% input data between -5 and +5
x = -5+10*rand(n,1);
% avoid problems with x==0: for x==0 the result is 1 anyway:
y = ones(size(x));
I = find(x); % find the x's unequal to 0
y(I) = sin(pi*x(I))./(pi*x(I));
a = dataset(x);
a = setlabtype(a,'targets',y);
a = setfeatlab(a,'x_1');
return
|
github
|
jacksky64/imageProcessing-master
|
parzenml.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/parzenml.m
| 5,834 |
utf_8
|
c96aea24e5e46fcc58f6c854e494f330
|
%PARZENML Optimum smoothing parameter in Parzen density estimation.
%
% H = PARZENML(A)
%
% INPUT
% A Input dataset
%
% OUTPUT
% H Scalar smoothing parameter (in case of crisp labels)
% Vector with smoothing parameters (in case of soft labels)
%
% DESCRIPTION
% Maximum likelihood estimation for the smoothing parameter H in the
% Parzen denstity estimation of the data in A. A leave-one out
% maximum likelihood estimation is used.
%
% The dataset A can either be crisp or soft labeled. In case of crisp
% labeling the class information is not used and a single smoothing
% parameter is estimated. In case of soft labels a smoothing parameter
% for every class is estimated and objects are weighted in relation to
% their class weigthts (soft label value).
% It may be profitable to scale the data before calling it. eg.
% WS = SCALEM(A,'variance'); A = A*WS.
%
% SEE ALSO
% DATASETS, MAPPINGS, SCALEM, SELDAT, PARZENM, PARZENDC, PRPROGRESS
% Copyright: R.P.W. Duin, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% $Id: parzenml.m,v 1.11 2010/03/25 15:39:46 duin Exp $
function h = parzenml(A,fid)
prtrace(mfilename);
if nargin < 2, fid = []; end
if isdouble(A), A = dataset(A); end
A = testdatasize(A);
A = testdatasize(A,'objects');
if islabtype(A,'crisp')
h = parzenmlc(A,fid);
elseif islabtype(A,'soft')
h = parzenmls(A,fid);
else
error('Label type should be either ''crisp'' or ''soft''')
end
return
function h = parzenmlc(A,fid) %crisp version
[m,k] = size(A);
DD= distm(+A) + diag(1e70*ones(1,m));
E = min(DD);
h1 = sqrt(max(E)); % initial estimate of h
F1 = derlc(DD,E,h1,k); % derivative
prprogress(fid,'parzenml:\n');
prprogress(fid,' %6.4f %6.3e\n',h1,F1);
if abs(F1) < 1e-70
h = h1;
prwarning(4,'jump out\n');
return;
end
a1 = (F1+m*k)*h1*h1;
h2 = sqrt(a1/(m*k)); % second guess
F2 = derlc(DD,E,h2,k); % derivative
prprogress(fid,' %6.4f %6.3e\n',h2,F2);
if (abs(F2) < 1e-70) | (abs(1e0-h1/h2) < 1e-6)
h = h2;
prwarning(4,'jump out\n');
return
end
% find zero-point of derivative to optimize h^2
% stop if improvement is small, or h does not change significantly
alf = 1;
prwaitbar(100,'parzenml: Optimizing smoothing parameter',m > 100)
iter = 0;
while abs(1e0-F2/F1) > 1e-4 & abs(1e0-h2/h1) > 1e-3 & abs(F2) > 1e-70
iter = iter+1;
h3 = (h1*h1*h2*h2)*(F2-F1)/(F2*h2*h2-F1*h1*h1);
if h3 < 0 % this should not happen
h3 = sqrt((F2+m*k)*h2*h2/(m*k));
else
h3 = sqrt(h3);
end
prwaitbar(100,100-100*exp(-iter/10));
h3 = h2 +alf*(h3-h2);
F3 = derlc(DD,E,h3,k);
prprogress(fid,' %6.4f %6.3e\n',h3,F3);
F1 = F2; F2 = F3;
h1 = h2; h2 = h3;
alf = alf*0.99; % decrease step size
end
h = h2;
prwaitbar(0);
return
function F = derlc(DD,E,h,k) % crisp version
% computation of the likelihood derivative for Parzen density
% given distances D and their object minima E (for increased accuracy)
m = size(DD,1);
warning off MATLAB:divideByZero;
Y = (DD-repmat(E,m,1))/(2*h*h); % correct for minimum distance to save accuracy
warning on MATLAB:divideByZero;
IY = find(Y<20); % take small distance only, others don't contribute
P = zeros(m,m);
P(IY) = exp(-Y(IY));
PP = sum(P,2)';
FU = repmat(realmax,1,m);
J = find(PP~=0);
FU(J) = 1./PP(J);
FF = sum(DD.*P,2);
warning off MATLAB:divideByZero;
F = (FU*FF)./(h*h) - m*k;
warning on MATLAB:divideByZero;
return
function h = parzenmls(A,fid) %soft version
SS = gettargets(setlabtype(A,'soft'));
[m,k,c] = getsize(A);
DD= distm(+A) + diag(1e70*ones(1,m));
E = min(DD);
h = zeros(c,1);
h0 = sqrt(max(E)); % initial estimate of h
s = sprintf('parzenml: runover classes');
prwaitbar(c,s,m > 100);
iter = 0;
for j=1:c
prwaitbar(c,j)
S = SS(:,j);
h1 = h0;
F1 = derls(DD,E,h1,k,S); % derivative
prprogress(fid,'parzenml: class %i : \n',j);
prprogress(fid,' %6.4f %6.3e\n',h1,F1);
if abs(F1) < 1e-70
h(j) = h1;
prwarning(4,'jump out\n');
break;
end
a1 = (F1+m*k)*h1*h1;
h2 = sqrt(a1/(m*k)); % second guess
F2 = derls(DD,E,h2,k,S); % derivative
prprogress(fid,' %6.4f %6.3e\n',h2,F2);
if (abs(F2) < 1e-70) | (abs(1e0-h1/h2) < 1e-6)
h(j) = h2;
prwarning(4,'jump out\n');
break;
end
% find zero-point of derivative to optimize h^2
% stop if improvement is small, or h does not change significantly
prwaitbar(100,'parzenml: Optimizing smoothing parameter',m > 100)
iter = 0;
alf = 1;
while abs(1e0-F2/F1) > 1e-4 & abs(1e0-h2/h1) > 1e-3 & abs(F2) > 1e-70
iter = iter+1;
prwaitbar(100,100-100*exp(-iter/10));
h3 = (h1*h1*h2*h2)*(F2-F1)/(F2*h2*h2-F1*h1*h1);
if h3 < 0 % this should not happen
h3 = sqrt((F2+m*k)*h2*h2/(m*k));
else
h3 = sqrt(h3);
end
h3 = h2 +alf*(h3-h2);
F3 = derls(DD,E,h3,k,S);
prprogress(fid,' %6.4f %6.3e\n',h3,F3);
F1 = F2; F2 = F3;
h1 = h2; h2 = h3;
alf = alf*0.99; % decrease step size
end
prwaitbar(0)
h(j) = h2;
end
prwaitbar(0)
return
function F = derls(DD,E,h,k,S) %soft version
% computation of the likelihood derivative for Parzen density
% given distances D and their object minima E (for increased accuracy)
% S are the object weigths
c = size(S,2); % number of classes
m = size(DD,1);
Y = (DD-repmat(E,m,1))/(2*h*h); % correct for minimum distance to save accuracy
IY = find(Y<20); % take small distance only, others don't contribute
F = 0;
for j=1:c
P = zeros(m,m);
P(IY) = exp(-Y(IY));
PP = S(:,j)'*P';
FU = repmat(realmax,1,m);
J = find(PP~=0);
FU(J) = S(J,j)'./PP(J);
K = find(S(:,j)==0);
FU(K) = zeros(1,length(K));
FF = (DD.*P)*S(:,j);
F = F + (FU*FF)./(h*h);
end
F = F - sum(S(:))*k;
return
|
github
|
jacksky64/imageProcessing-master
|
lassor.m
|
.m
|
imageProcessing-master/Matlab PRTools/prtools_com/prtools/lassor.m
| 973 |
utf_8
|
ba861862740a37071cd2dbdd02b46801
|
%LASSOR LASSO regression
%
% W = LASSOR(X,LAMBDA)
%
% INPUT
% X Regression dataset
% LAMBDA Regularization parameter
%
% OUTPUT
% W LASSO regression mapping
%
% DESCRIPTION
% The 'Least Absolute Shrinkage and Selection Operator' regression,
% using the regularization parameter LAMBDA.
%
% SEE ALSO
% RIDGER, LINEARR, PLOTR
% Copyright: D.M.J. Tax, [email protected]
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
function y = lassor(x,lambda)
if nargin<2
lambda = 1;
end
if nargin<1 | isempty(x)
y = mapping(mfilename,{lambda});
y = setname(y,'LASSO regression');
return
end
if ~ismapping(lambda) %training
[n,d] = size(x);
y = gettargets(x);
W = arrfit(+x,(y-mean(y)),lambda);
W = [mean(y); W];
y = mapping(mfilename,'trained',W,1,d,1);
y = setname(y,'LASSO regression');
else
% evaluation
w = getdata(lambda);
[n,d] = size(x);
out = [ones(n,1) +x]*w;
y = setdat(x,out);
end
|
github
|
jacksky64/imageProcessing-master
|
wlsFilter.m
|
.m
|
imageProcessing-master/edgeAwareFilters/wlsFilter/wlsFilter.m
| 2,515 |
utf_8
|
364d9ee14f487f190131610f8461a4a5
|
%
% original src: http://www.cs.huji.ac.il/~danix/epd/wlsFilter.m
% original author: Dani Lischinski
% <[email protected]>
% http://www.cs.huji.ac.il/~danix/
%
function OUT = wlsFilter(IN, lambda, alpha, L)
%WLSFILTER Edge-preserving smoothing based on the weighted least squares(WLS)
% optimization framework, as described in Farbman, Fattal, Lischinski, and
% Szeliski, "Edge-Preserving Decompositions for Multi-Scale Tone and Detail
% Manipulation", ACM Transactions on Graphics, 27(3), August 2008.
%
% Given an input image IN, we seek a new image OUT, which, on the one hand,
% is as close as possible to IN, and, at the same time, is as smooth as
% possible everywhere, except across significant gradients in L.
%
%
% Input arguments:
% ----------------
% IN Input image (2-D, double, N-by-M matrix).
%
% lambda Balances between the data term and the smoothness
% term. Increasing lambda will produce smoother images.
% Default value is 1.0
%
% alpha Gives a degree of control over the affinities by non-
% lineary scaling the gradients. Increasing alpha will
% result in sharper preserved edges. Default value: 1.2
%
% L Source image for the affinity matrix. Same dimensions
% as the input image IN. Default: log(IN)
%
%
% Example
% -------
% RGB = imread('peppers.png');
% I = double(rgb2gray(RGB));
% I = I./max(I(:));
% res = wlsFilter(I, 0.5);
% figure, imshow(I), figure, imshow(res)
% res = wlsFilter(I, 2, 2);
% figure, imshow(res)
if(~exist('L', 'var')),
L = log(IN+eps);
end
if(~exist('alpha', 'var')),
alpha = 1.2;
end
if(~exist('lambda', 'var')),
lambda = 1;
end
smallNum = 0.0001;
[r,c] = size(IN);
k = r*c;
% Compute affinities between adjacent pixels based on gradients of L
dy = diff(L, 1, 1);
dy = -lambda./(abs(dy).^alpha + smallNum);
dy = padarray(dy, [1 0], 'post');
dy = dy(:);
dx = diff(L, 1, 2);
dx = -lambda./(abs(dx).^alpha + smallNum);
dx = padarray(dx, [0 1], 'post');
dx = dx(:);
% Construct a five-point spatially inhomogeneous Laplacian matrix
B(:,1) = dx;
B(:,2) = dy;
d = [-r,-1];
A = spdiags(B,d,k,k);
e = dx;
w = padarray(dx, r, 'pre'); w = w(1:end-r);
s = dy;
n = padarray(dy, 1, 'pre'); n = n(1:end-1);
D = 1-(e+w+s+n);
A = A + A' + spdiags(D, 0, k, k);
% Solve
OUT = A\IN(:);
OUT = reshape(OUT, r, c);
|
github
|
jacksky64/imageProcessing-master
|
bilateralFilter.m
|
.m
|
imageProcessing-master/edgeAwareFilters/bilateralFilter/bilateralFilter.m
| 6,854 |
utf_8
|
55e1c9ea9a2c9a29a09d57cc3da742c9
|
%
% original src: http://people.csail.mit.edu/jiawen/software/bilateralFilter.m
% original author: Jiawen (Kevin) Chen
% <[email protected]>
% http://people.csail.mit.edu/jiawen/
%
% output = bilateralFilter( data, edge, ...
% edgeMin, edgeMax, ...
% sigmaSpatial, sigmaRange, ...
% samplingSpatial, samplingRange )
%
% Bilateral and Cross-Bilateral Filter using the Bilateral Grid.
%
% Bilaterally filters the image 'data' using the edges in the image 'edge'.
% If 'data' == 'edge', then it the standard bilateral filter.
% Otherwise, it is the 'cross' or 'joint' bilateral filter.
% For convenience, you can also pass in [] for 'edge' for the normal
% bilateral filter.
%
% Note that for the cross bilateral filter, data does not need to be
% defined everywhere. Undefined values can be set to 'NaN'. However, edge
% *does* need to be defined everywhere.
%
% data and edge should be of the greyscale, double-precision floating point
% matrices of the same size (i.e. they should be [ height x width ])
%
% data is the only required argument
%
% edgeMin and edgeMax specifies the min and max values of 'edge' (or 'data'
% for the normal bilateral filter) and is useful when the input is in a
% range that's not between 0 and 1. For instance, if you are filtering the
% L channel of an image that ranges between 0 and 100, set edgeMin to 0 and
% edgeMax to 100.
%
% edgeMin defaults to min( edge( : ) ) and edgeMax defaults to max( edge( : ) ).
% This is probably *not* what you want, since the input may not span the
% entire range.
%
% sigmaSpatial and sigmaRange specifies the standard deviation of the space
% and range gaussians, respectively.
% sigmaSpatial defaults to min( width, height ) / 16
% sigmaRange defaults to ( edgeMax - edgeMin ) / 10.
%
% samplingSpatial and samplingRange specifies the amount of downsampling
% used for the approximation. Higher values use less memory but are also
% less accurate. The default and recommended values are:
%
% samplingSpatial = sigmaSpatial
% samplingRange = sigmaRange
%
function output = bilateralFilter( data, edge, edgeMin, edgeMax, sigmaSpatial, sigmaRange, ...
samplingSpatial, samplingRange )
if( ndims( data ) > 2 ),
error( 'data must be a greyscale image with size [ height, width ]' );
end
if( ~isa( data, 'double' ) ),
error( 'data must be of class "double"' );
end
if ~exist( 'edge', 'var' ),
edge = data;
elseif isempty( edge ),
edge = data;
end
if( ndims( edge ) > 2 ),
error( 'edge must be a greyscale image with size [ height, width ]' );
end
if( ~isa( edge, 'double' ) ),
error( 'edge must be of class "double"' );
end
inputHeight = size( data, 1 );
inputWidth = size( data, 2 );
if ~exist( 'edgeMin', 'var' ),
edgeMin = min( edge( : ) );
warning( 'edgeMin not set! Defaulting to: %f\n', edgeMin );
end
if ~exist( 'edgeMax', 'var' ),
edgeMax = max( edge( : ) );
warning( 'edgeMax not set! Defaulting to: %f\n', edgeMax );
end
edgeDelta = edgeMax - edgeMin;
if ~exist( 'sigmaSpatial', 'var' ),
sigmaSpatial = min( inputWidth, inputHeight ) / 16;
fprintf( 'Using default sigmaSpatial of: %f\n', sigmaSpatial );
end
if ~exist( 'sigmaRange', 'var' ),
sigmaRange = 0.1 * edgeDelta;
fprintf( 'Using default sigmaRange of: %f\n', sigmaRange );
end
if ~exist( 'samplingSpatial', 'var' ),
samplingSpatial = sigmaSpatial;
end
if ~exist( 'samplingRange', 'var' ),
samplingRange = sigmaRange;
end
if size( data ) ~= size( edge ),
error( 'data and edge must be of the same size' );
end
% parameters
derivedSigmaSpatial = sigmaSpatial / samplingSpatial;
derivedSigmaRange = sigmaRange / samplingRange;
paddingXY = floor( 2 * derivedSigmaSpatial ) + 1;
paddingZ = floor( 2 * derivedSigmaRange ) + 1;
% allocate 3D grid
downsampledWidth = floor( ( inputWidth - 1 ) / samplingSpatial ) + 1 + 2 * paddingXY;
downsampledHeight = floor( ( inputHeight - 1 ) / samplingSpatial ) + 1 + 2 * paddingXY;
downsampledDepth = floor( edgeDelta / samplingRange ) + 1 + 2 * paddingZ;
gridData = zeros( downsampledHeight, downsampledWidth, downsampledDepth );
gridWeights = zeros( downsampledHeight, downsampledWidth, downsampledDepth );
% compute downsampled indices
[ jj, ii ] = meshgrid( 0 : inputWidth - 1, 0 : inputHeight - 1 );
% ii =
% 0 0 0 0 0
% 1 1 1 1 1
% 2 2 2 2 2
% jj =
% 0 1 2 3 4
% 0 1 2 3 4
% 0 1 2 3 4
% so when iterating over ii( k ), jj( k )
% get: ( 0, 0 ), ( 1, 0 ), ( 2, 0 ), ... (down columns first)
di = round( ii / samplingSpatial ) + paddingXY + 1;
dj = round( jj / samplingSpatial ) + paddingXY + 1;
dz = round( ( edge - edgeMin ) / samplingRange ) + paddingZ + 1;
% perform scatter (there's probably a faster way than this)
% normally would do downsampledWeights( di, dj, dk ) = 1, but we have to
% perform a summation to do box downsampling
for k = 1 : numel( dz ),
dataZ = data( k ); % traverses the image column wise, same as di( k )
if ~isnan( dataZ ),
dik = di( k );
djk = dj( k );
dzk = dz( k );
gridData( dik, djk, dzk ) = gridData( dik, djk, dzk ) + dataZ;
gridWeights( dik, djk, dzk ) = gridWeights( dik, djk, dzk ) + 1;
end
end
% make gaussian kernel
kernelWidth = 2 * derivedSigmaSpatial + 1;
kernelHeight = kernelWidth;
kernelDepth = 2 * derivedSigmaRange + 1;
halfKernelWidth = floor( kernelWidth / 2 );
halfKernelHeight = floor( kernelHeight / 2 );
halfKernelDepth = floor( kernelDepth / 2 );
[gridX, gridY, gridZ] = meshgrid( 0 : kernelWidth - 1, 0 : kernelHeight - 1, 0 : kernelDepth - 1 );
gridX = gridX - halfKernelWidth;
gridY = gridY - halfKernelHeight;
gridZ = gridZ - halfKernelDepth;
gridRSquared = ( gridX .* gridX + gridY .* gridY ) / ( derivedSigmaSpatial * derivedSigmaSpatial ) + ( gridZ .* gridZ ) / ( derivedSigmaRange * derivedSigmaRange );
kernel = exp( -0.5 * gridRSquared );
% convolve
blurredGridData = convn( gridData, kernel, 'same' );
blurredGridWeights = convn( gridWeights, kernel, 'same' );
% divide
blurredGridWeights( blurredGridWeights == 0 ) = -2; % avoid divide by 0, won't read there anyway
normalizedBlurredGrid = blurredGridData ./ blurredGridWeights;
normalizedBlurredGrid( blurredGridWeights < -1 ) = 0; % put 0s where it's undefined
% for debugging
% blurredGridWeights( blurredGridWeights < -1 ) = 0; % put zeros back
% upsample
[ jj, ii ] = meshgrid( 0 : inputWidth - 1, 0 : inputHeight - 1 ); % meshgrid does x, then y, so output arguments need to be reversed
% no rounding
di = ( ii / samplingSpatial ) + paddingXY + 1;
dj = ( jj / samplingSpatial ) + paddingXY + 1;
dz = ( edge - edgeMin ) / samplingRange + paddingZ + 1;
% interpn takes rows, then cols, etc
% i.e. size(v,1), then size(v,2), ...
output = interpn( normalizedBlurredGrid, di, dj, dz );
|
github
|
jacksky64/imageProcessing-master
|
domainTransform.m
|
.m
|
imageProcessing-master/edgeAwareFilters/domainTransform/domainTransform.m
| 6,133 |
utf_8
|
edf22a2a39db28d4f8e098ce7eab590d
|
% NC Domain transform normalized convolution edge-preserving filter.
%
% F = NC(img, sigma_s, sigma_r, num_iterations, joint_image)
%
% Parameters:
% img Input image to be filtered.
% sigma_s Filter spatial standard deviation.
% sigma_r Filter range standard deviation.
% num_iterations Number of iterations to perform (default: 3).
% joint_image Optional image for joint filtering.
%
%
%
% This is the reference implementation of the domain transform NC filter
% described in the paper:
%
% Domain Transform for Edge-Aware Image and Video Processing
% Eduardo S. L. Gastal and Manuel M. Oliveira
% ACM Transactions on Graphics. Volume 30 (2011), Number 4.
% Proceedings of SIGGRAPH 2011, Article 69.
%
% Please refer to the publication above if you use this software. For an
% up-to-date version go to:
%
% http://inf.ufrgs.br/~eslgastal/DomainTransform/
%
%
% THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESSED OR IMPLIED WARRANTIES
% OF ANY KIND, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
% OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN
% THIS SOFTWARE.
%
% Version 1.0 - August 2011.
function F = domainTransform(img, sigma_s, sigma_r, num_iterations, joint_image)
I = double(img);
if ~exist('num_iterations', 'var')
num_iterations = 3;
end
if exist('joint_image', 'var') && ~isempty(joint_image)
J = double(joint_image);
if (size(I,1) ~= size(J,1)) || (size(I,2) ~= size(J,2))
error('Input and joint images must have equal width and height.');
end
else
J = I;
end
[h w num_joint_channels] = size(J);
%% Compute the domain transform (Equation 11 of our paper).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Estimate horizontal and vertical partial derivatives using finite
% differences.
dIcdx = diff(J, 1, 2);
dIcdy = diff(J, 1, 1);
dIdx = zeros(h,w);
dIdy = zeros(h,w);
% Compute the l1-norm distance of neighbor pixels.
for c = 1:num_joint_channels
dIdx(:,2:end) = dIdx(:,2:end) + abs( dIcdx(:,:,c) );
dIdy(2:end,:) = dIdy(2:end,:) + abs( dIcdy(:,:,c) );
end
% Compute the derivatives of the horizontal and vertical domain transforms.
dHdx = (1 + sigma_s/sigma_r * dIdx);
dVdy = (1 + sigma_s/sigma_r * dIdy);
% Integrate the domain transforms.
ct_H = cumsum(dHdx, 2);
ct_V = cumsum(dVdy, 1);
% The vertical pass is performed using a transposed image.
ct_V = ct_V';
%% Perform the filtering.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
N = num_iterations;
F = I;
sigma_H = sigma_s;
for i = 0:num_iterations - 1
% Compute the sigma value for this iteration (Equation 14 of our paper).
sigma_H_i = sigma_H * sqrt(3) * 2^(N - (i + 1)) / sqrt(4^N - 1);
% Compute the radius of the box filter with the desired variance.
box_radius = sqrt(3) * sigma_H_i;
F = TransformedDomainBoxFilter_Horizontal(F, ct_H, box_radius);
F = image_transpose(F);
F = TransformedDomainBoxFilter_Horizontal(F, ct_V, box_radius);
F = image_transpose(F);
end
F = cast(F, class(img));
end
%% Box filter normalized convolution in the transformed domain.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function F = TransformedDomainBoxFilter_Horizontal(I, xform_domain_position, box_radius)
[h w num_channels] = size(I);
% Compute the lower and upper limits of the box kernel in the transformed domain.
l_pos = xform_domain_position - box_radius;
u_pos = xform_domain_position + box_radius;
% Find the indices of the pixels associated with the lower and upper limits
% of the box kernel.
%
% This loop is much faster in a compiled language. If you are using a
% MATLAB version which supports the 'parallel for' construct, you can
% improve performance by replacing the following 'for' by a 'parfor'.
l_idx = zeros(size(xform_domain_position));
u_idx = zeros(size(xform_domain_position));
for row = 1:h
xform_domain_pos_row = [xform_domain_position(row,:) inf];
l_pos_row = l_pos(row,:);
u_pos_row = u_pos(row,:);
local_l_idx = zeros(1, w);
local_u_idx = zeros(1, w);
local_l_idx(1) = find(xform_domain_pos_row > l_pos_row(1), 1, 'first');
local_u_idx(1) = find(xform_domain_pos_row > u_pos_row(1), 1, 'first');
for col = 2:w
local_l_idx(col) = local_l_idx(col-1) + ...
find(xform_domain_pos_row(local_l_idx(col-1):end) > l_pos_row(col), 1, 'first') - 1;
local_u_idx(col) = local_u_idx(col-1) + ...
find(xform_domain_pos_row(local_u_idx(col-1):end) > u_pos_row(col), 1, 'first') - 1;
end
l_idx(row,:) = local_l_idx;
u_idx(row,:) = local_u_idx;
end
% Compute the box filter using a summed area table.
SAT = zeros([h w+1 num_channels]);
SAT(:,2:end,:) = cumsum(I, 2);
row_indices = repmat((1:h)', 1, w);
F = zeros(size(I));
for c = 1:num_channels
a = sub2ind(size(SAT), row_indices, l_idx, repmat(c, h, w));
b = sub2ind(size(SAT), row_indices, u_idx, repmat(c, h, w));
F(:,:,c) = (SAT(b) - SAT(a)) ./ (u_idx - l_idx);
end
end
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function T = image_transpose(I)
[h w num_channels] = size(I);
T = zeros([w h num_channels], class(I));
for c = 1:num_channels
T(:,:,c) = I(:,:,c)';
end
end
|
github
|
jacksky64/imageProcessing-master
|
RF.m
|
.m
|
imageProcessing-master/edgeAwareFilters/domainTransform/RF.m
| 4,586 |
utf_8
|
68802a817292d988927c2ebcd90b8e91
|
% RF Domain transform recursive edge-preserving filter.
%
% F = RF(img, sigma_s, sigma_r, num_iterations, joint_image)
%
% Parameters:
% img Input image to be filtered.
% sigma_s Filter spatial standard deviation.
% sigma_r Filter range standard deviation.
% num_iterations Number of iterations to perform (default: 3).
% joint_image Optional image for joint filtering.
%
%
%
% This is the reference implementation of the domain transform RF filter
% described in the paper:
%
% Domain Transform for Edge-Aware Image and Video Processing
% Eduardo S. L. Gastal and Manuel M. Oliveira
% ACM Transactions on Graphics. Volume 30 (2011), Number 4.
% Proceedings of SIGGRAPH 2011, Article 69.
%
% Please refer to the publication above if you use this software. For an
% up-to-date version go to:
%
% http://inf.ufrgs.br/~eslgastal/DomainTransform/
%
%
% THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESSED OR IMPLIED WARRANTIES
% OF ANY KIND, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
% OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN
% THIS SOFTWARE.
%
% Version 1.0 - August 2011.
function F = RF(img, sigma_s, sigma_r, num_iterations, joint_image)
I = double(img);
if ~exist('num_iterations', 'var')
num_iterations = 3;
end
if exist('joint_image', 'var') && ~isempty(joint_image)
J = double(joint_image);
if (size(I,1) ~= size(J,1)) || (size(I,2) ~= size(J,2))
error('Input and joint images must have equal width and height.');
end
else
J = I;
end
[h w num_joint_channels] = size(J);
%% Compute the domain transform (Equation 11 of our paper).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Estimate horizontal and vertical partial derivatives using finite
% differences.
dIcdx = diff(J, 1, 2);
dIcdy = diff(J, 1, 1);
dIdx = zeros(h,w);
dIdy = zeros(h,w);
% Compute the l1-norm distance of neighbor pixels.
for c = 1:num_joint_channels
dIdx(:,2:end) = dIdx(:,2:end) + abs( dIcdx(:,:,c) );
dIdy(2:end,:) = dIdy(2:end,:) + abs( dIcdy(:,:,c) );
end
% Compute the derivatives of the horizontal and vertical domain transforms.
dHdx = (1 + sigma_s/sigma_r * dIdx);
dVdy = (1 + sigma_s/sigma_r * dIdy);
% We do not integrate the domain transforms since our recursive filter
% uses the derivatives directly.
%ct_H = cumsum(dHdx, 2);
%ct_V = cumsum(dVdy, 1);
% The vertical pass is performed using a transposed image.
dVdy = dVdy';
%% Perform the filtering.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
N = num_iterations;
F = I;
sigma_H = sigma_s;
for i = 0:num_iterations - 1
% Compute the sigma value for this iteration (Equation 14 of our paper).
sigma_H_i = sigma_H * sqrt(3) * 2^(N - (i + 1)) / sqrt(4^N - 1);
F = TransformedDomainRecursiveFilter_Horizontal(F, dHdx, sigma_H_i);
F = image_transpose(F);
F = TransformedDomainRecursiveFilter_Horizontal(F, dVdy, sigma_H_i);
F = image_transpose(F);
end
F = cast(F, class(img));
end
%% Recursive filter.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function F = TransformedDomainRecursiveFilter_Horizontal(I, D, sigma)
% Feedback coefficient (Appendix of our paper).
a = exp(-sqrt(2) / sigma);
F = I;
V = a.^D;
[h w num_channels] = size(I);
% Left -> Right filter.
for i = 2:w
for c = 1:num_channels
F(:,i,c) = F(:,i,c) + V(:,i) .* ( F(:,i - 1,c) - F(:,i,c) );
end
end
% Right -> Left filter.
for i = w-1:-1:1
for c = 1:num_channels
F(:,i,c) = F(:,i,c) + V(:,i+1) .* ( F(:,i + 1,c) - F(:,i,c) );
end
end
end
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function T = image_transpose(I)
[h w num_channels] = size(I);
T = zeros([w h num_channels], class(I));
for c = 1:num_channels
T(:,:,c) = I(:,:,c)';
end
end
|
github
|
jacksky64/imageProcessing-master
|
NC.m
|
.m
|
imageProcessing-master/edgeAwareFilters/domainTransform/NC.m
| 6,120 |
utf_8
|
fadc0cc0bab9db203394ccc9c4d2b953
|
% NC Domain transform normalized convolution edge-preserving filter.
%
% F = NC(img, sigma_s, sigma_r, num_iterations, joint_image)
%
% Parameters:
% img Input image to be filtered.
% sigma_s Filter spatial standard deviation.
% sigma_r Filter range standard deviation.
% num_iterations Number of iterations to perform (default: 3).
% joint_image Optional image for joint filtering.
%
%
%
% This is the reference implementation of the domain transform NC filter
% described in the paper:
%
% Domain Transform for Edge-Aware Image and Video Processing
% Eduardo S. L. Gastal and Manuel M. Oliveira
% ACM Transactions on Graphics. Volume 30 (2011), Number 4.
% Proceedings of SIGGRAPH 2011, Article 69.
%
% Please refer to the publication above if you use this software. For an
% up-to-date version go to:
%
% http://inf.ufrgs.br/~eslgastal/DomainTransform/
%
%
% THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESSED OR IMPLIED WARRANTIES
% OF ANY KIND, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
% OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN
% THIS SOFTWARE.
%
% Version 1.0 - August 2011.
function F = NC(img, sigma_s, sigma_r, num_iterations, joint_image)
I = double(img);
if ~exist('num_iterations', 'var')
num_iterations = 3;
end
if exist('joint_image', 'var') && ~isempty(joint_image)
J = double(joint_image);
if (size(I,1) ~= size(J,1)) || (size(I,2) ~= size(J,2))
error('Input and joint images must have equal width and height.');
end
else
J = I;
end
[h w num_joint_channels] = size(J);
%% Compute the domain transform (Equation 11 of our paper).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Estimate horizontal and vertical partial derivatives using finite
% differences.
dIcdx = diff(J, 1, 2);
dIcdy = diff(J, 1, 1);
dIdx = zeros(h,w);
dIdy = zeros(h,w);
% Compute the l1-norm distance of neighbor pixels.
for c = 1:num_joint_channels
dIdx(:,2:end) = dIdx(:,2:end) + abs( dIcdx(:,:,c) );
dIdy(2:end,:) = dIdy(2:end,:) + abs( dIcdy(:,:,c) );
end
% Compute the derivatives of the horizontal and vertical domain transforms.
dHdx = (1 + sigma_s/sigma_r * dIdx);
dVdy = (1 + sigma_s/sigma_r * dIdy);
% Integrate the domain transforms.
ct_H = cumsum(dHdx, 2);
ct_V = cumsum(dVdy, 1);
% The vertical pass is performed using a transposed image.
ct_V = ct_V';
%% Perform the filtering.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
N = num_iterations;
F = I;
sigma_H = sigma_s;
for i = 0:num_iterations - 1
% Compute the sigma value for this iteration (Equation 14 of our paper).
sigma_H_i = sigma_H * sqrt(3) * 2^(N - (i + 1)) / sqrt(4^N - 1);
% Compute the radius of the box filter with the desired variance.
box_radius = sqrt(3) * sigma_H_i;
F = TransformedDomainBoxFilter_Horizontal(F, ct_H, box_radius);
F = image_transpose(F);
F = TransformedDomainBoxFilter_Horizontal(F, ct_V, box_radius);
F = image_transpose(F);
end
F = cast(F, class(img));
end
%% Box filter normalized convolution in the transformed domain.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function F = TransformedDomainBoxFilter_Horizontal(I, xform_domain_position, box_radius)
[h w num_channels] = size(I);
% Compute the lower and upper limits of the box kernel in the transformed domain.
l_pos = xform_domain_position - box_radius;
u_pos = xform_domain_position + box_radius;
% Find the indices of the pixels associated with the lower and upper limits
% of the box kernel.
%
% This loop is much faster in a compiled language. If you are using a
% MATLAB version which supports the 'parallel for' construct, you can
% improve performance by replacing the following 'for' by a 'parfor'.
l_idx = zeros(size(xform_domain_position));
u_idx = zeros(size(xform_domain_position));
for row = 1:h
xform_domain_pos_row = [xform_domain_position(row,:) inf];
l_pos_row = l_pos(row,:);
u_pos_row = u_pos(row,:);
local_l_idx = zeros(1, w);
local_u_idx = zeros(1, w);
local_l_idx(1) = find(xform_domain_pos_row > l_pos_row(1), 1, 'first');
local_u_idx(1) = find(xform_domain_pos_row > u_pos_row(1), 1, 'first');
for col = 2:w
local_l_idx(col) = local_l_idx(col-1) + ...
find(xform_domain_pos_row(local_l_idx(col-1):end) > l_pos_row(col), 1, 'first') - 1;
local_u_idx(col) = local_u_idx(col-1) + ...
find(xform_domain_pos_row(local_u_idx(col-1):end) > u_pos_row(col), 1, 'first') - 1;
end
l_idx(row,:) = local_l_idx;
u_idx(row,:) = local_u_idx;
end
% Compute the box filter using a summed area table.
SAT = zeros([h w+1 num_channels]);
SAT(:,2:end,:) = cumsum(I, 2);
row_indices = repmat((1:h)', 1, w);
F = zeros(size(I));
for c = 1:num_channels
a = sub2ind(size(SAT), row_indices, l_idx, repmat(c, h, w));
b = sub2ind(size(SAT), row_indices, u_idx, repmat(c, h, w));
F(:,:,c) = (SAT(b) - SAT(a)) ./ (u_idx - l_idx);
end
end
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function T = image_transpose(I)
[h w num_channels] = size(I);
T = zeros([w h num_channels], class(I));
for c = 1:num_channels
T(:,:,c) = I(:,:,c)';
end
end
|
github
|
jacksky64/imageProcessing-master
|
IC.m
|
.m
|
imageProcessing-master/edgeAwareFilters/domainTransform/IC.m
| 8,210 |
utf_8
|
bd552db5ff4ac282aeff763f73d9bfa4
|
% IC Domain transform interpolated convolution edge-preserving filter.
%
% F = IC(img, sigma_s, sigma_r, num_iterations, joint_image)
%
% Parameters:
% img Input image to be filtered.
% sigma_s Filter spatial standard deviation.
% sigma_r Filter range standard deviation.
% num_iterations Number of iterations to perform (default: 3).
% joint_image Optional image for joint filtering.
%
%
%
% This is the reference implementation of the domain transform IC filter
% described in the paper:
%
% Domain Transform for Edge-Aware Image and Video Processing
% Eduardo S. L. Gastal and Manuel M. Oliveira
% ACM Transactions on Graphics. Volume 30 (2011), Number 4.
% Proceedings of SIGGRAPH 2011, Article 69.
%
% Please refer to the publication above if you use this software. For an
% up-to-date version go to:
%
% http://inf.ufrgs.br/~eslgastal/DomainTransform/
%
%
% THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESSED OR IMPLIED WARRANTIES
% OF ANY KIND, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
% OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN
% THIS SOFTWARE.
%
% Version 1.0 - August 2011.
function F = IC(img, sigma_s, sigma_r, num_iterations, joint_image)
I = double(img);
if ~exist('num_iterations', 'var')
num_iterations = 3;
end
if exist('joint_image', 'var') && ~isempty(joint_image)
J = double(joint_image);
if (size(I,1) ~= size(J,1)) || (size(I,2) ~= size(J,2))
error('Input and joint images must have equal width and height.');
end
else
J = I;
end
[h w num_joint_channels] = size(J);
%% Compute the domain transform (Equation 11 of our paper).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Estimate horizontal and vertical partial derivatives using finite
% differences.
dIcdx = diff(J, 1, 2);
dIcdy = diff(J, 1, 1);
dIdx = zeros(h,w);
dIdy = zeros(h,w);
% Compute the l1-norm distance of neighbor pixels.
for c = 1:num_joint_channels
dIdx(:,2:end) = dIdx(:,2:end) + abs( dIcdx(:,:,c) );
dIdy(2:end,:) = dIdy(2:end,:) + abs( dIcdy(:,:,c) );
end
% Compute the derivatives of the horizontal and vertical domain transforms.
dHdx = (1 + sigma_s/sigma_r * dIdx);
dVdy = (1 + sigma_s/sigma_r * dIdy);
% Integrate the domain transforms.
ct_H = cumsum(dHdx, 2);
ct_V = cumsum(dVdy, 1);
% The vertical pass is performed using a transposed image.
ct_V = ct_V';
%% Perform the filtering.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
N = num_iterations;
F = I;
sigma_H = sigma_s;
for i = 0:num_iterations - 1
% Compute the sigma value for this iteration (Equation 14 of our paper).
sigma_H_i = sigma_H * sqrt(3) * 2^(N - (i + 1)) / sqrt(4^N - 1);
% Compute the radius of the box filter with the desired variance.
box_radius = sqrt(3) * sigma_H_i;
F = TransformedDomainBoxFilter_Horizontal(F, ct_H, box_radius);
F = image_transpose(F);
F = TransformedDomainBoxFilter_Horizontal(F, ct_V, box_radius);
F = image_transpose(F);
end
F = cast(F, class(img));
end
%% Box filter interpolated convolution in the transformed domain.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function F = TransformedDomainBoxFilter_Horizontal(I, xform_domain_position, box_radius)
[h w num_channels] = size(I);
% Compute the lower and upper limits of the box kernel in the transformed domain.
l_pos = xform_domain_position - box_radius;
u_pos = xform_domain_position + box_radius;
% Find the indices of the pixels associated with the lower and upper limits
% of the box kernel.
%
% This loop is much faster in a compiled language. If you are using a
% MATLAB version which supports the 'parallel for' construct, you can
% improve performance by replacing the following 'for' by a 'parfor'.
l_idx = zeros(size(xform_domain_position));
u_idx = zeros(size(xform_domain_position));
for row = 1:h
xform_domain_pos_row = [xform_domain_position(row,:) inf];
l_pos_row = l_pos(row,:);
u_pos_row = u_pos(row,:);
local_l_idx = zeros(1, w);
local_u_idx = zeros(1, w);
local_l_idx(1) = find(xform_domain_pos_row > l_pos_row(1), 1, 'first');
local_u_idx(1) = find(xform_domain_pos_row > u_pos_row(1), 1, 'first');
for col = 2:w
local_l_idx(col) = local_l_idx(col-1) + ...
find(xform_domain_pos_row(local_l_idx(col-1):end) > l_pos_row(col), 1, 'first') - 1;
local_u_idx(col) = local_u_idx(col-1) + ...
find(xform_domain_pos_row(local_u_idx(col-1):end) > u_pos_row(col), 1, 'first') - 1;
end
l_idx(row,:) = local_l_idx;
u_idx(row,:) = local_u_idx;
end
% Compute the box filter using a summed area table. This SAT is built using
% the area under the graph (in the transformed domain) of the interpolated
% signal. We use linear interpolation and compute the area using the
% trapezoidal rule.
areas = bsxfun(@times, ...
0.5 .* (I(:,2:end,:) + I(:,1:end-1,:)), ...
xform_domain_position(:,2:end,:) - xform_domain_position(:,1:end-1,:) ...
);
SAT = zeros([h w num_channels]);
SAT(:,2:end,:) = cumsum(areas, 2);
row_indices = repmat((1:h)', 1, w);
F = zeros(size(I));
I = padarray(I, [0 1 0], 'replicate');
SAT = padarray(SAT, [0 1 0]);
xform_domain_position = padarray(xform_domain_position, [0 1 0], 'replicate');
% Pixel values outside the bounds of the image are assumed to equal the
% nearest pixel border value.
xform_domain_position(:,1) = xform_domain_position(:,1) - 1.2 * box_radius;
xform_domain_position(:,end) = xform_domain_position(:,end) + 1.2 * box_radius;
l_idx = l_idx + 1;
for c = 1:num_channels
l1_c = sub2ind(size(SAT), row_indices, l_idx, repmat(c, h, w));
u0_c = sub2ind(size(SAT), row_indices, u_idx, repmat(c, h, w));
l0_c = sub2ind(size(SAT), row_indices, l_idx - 1, repmat(c, h, w));
u1_c = sub2ind(size(SAT), row_indices, u_idx + 1, repmat(c, h, w));
l1 = sub2ind(size(SAT), row_indices, l_idx);
u0 = sub2ind(size(SAT), row_indices, u_idx);
l0 = sub2ind(size(SAT), row_indices, l_idx - 1);
u1 = sub2ind(size(SAT), row_indices, u_idx + 1);
% Full (center) areas.
C = SAT(u0_c) - SAT(l1_c);
% Left fractional areas.
alpha = (l_pos - xform_domain_position(l0)) ./ (xform_domain_position(l1) - xform_domain_position(l0));
yi = I(l0_c) + alpha .* ( I(l1_c) - I(l0_c) );
L = 0.5 .* (yi + I(l1_c)) .* (1-alpha) .* (xform_domain_position(l1) - xform_domain_position(l0));
% Right fractional areas.
alpha = (u_pos - xform_domain_position(u0)) ./ (xform_domain_position(u1) - xform_domain_position(u0));
yi = I(u0_c) + alpha .* ( I(u1_c) - I(u0_c) );
R = 0.5 .* (yi + I(u0_c)) .* (alpha) .* (xform_domain_position(u1) - xform_domain_position(u0));
F(:,:,c) = (L + C + R) / (2 * box_radius);
end
end
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function T = image_transpose(I)
[h w num_channels] = size(I);
T = zeros([w h num_channels], class(I));
for c = 1:num_channels
T(:,:,c) = I(:,:,c)';
end
end
|
github
|
jacksky64/imageProcessing-master
|
reconstruct_laplacian_pyramid.m
|
.m
|
imageProcessing-master/edgeAwareFilters/localLaplacian/reconstruct_laplacian_pyramid.m
| 1,035 |
utf_8
|
70a32d1857a137731c2af429fa31e3fa
|
% Reconstruction of image from Laplacian pyramid
%
% Arguments:
% pyramid 'pyr', as generated by function 'laplacian_pyramid'
% subwindow indices 'subwindow', given as [r1 r2 c1 c2] (optional)
%
% [email protected], August 2007
% [email protected], March 2011 [modified to handle subwindows]
%
%
% More information:
% 'The Laplacian Pyramid as a Compact Image Code'
% Burt, P., and Adelson, E. H.,
% IEEE Transactions on Communication, COM-31:532-540 (1983).
%
function R = reconstruct_laplacian_pyramid(pyr,subwindow)
r = size(pyr{1},1);
c = size(pyr{1},2);
nlev = length(pyr);
subwindow_all = zeros(nlev,4);
if ~exist('subwindow','var')
subwindow_all(1,:) = [1 r 1 c];
else
subwindow_all(1,:) = subwindow;
end
for lev = 2:nlev
subwindow_all(lev,:) = child_window(subwindow_all(lev-1,:));
end
% start with low pass residual
R = pyr{nlev};
filter = pyramid_filter;
for lev = nlev-1 : -1 : 1
% upsample, and add to current level
R = pyr{lev} + upsample(R,filter,subwindow_all(lev,:));
end
|
github
|
jacksky64/imageProcessing-master
|
laplacian_pyramid.m
|
.m
|
imageProcessing-master/edgeAwareFilters/localLaplacian/laplacian_pyramid.m
| 1,204 |
utf_8
|
e9029b66ed513370880964ed7feb583d
|
% Contruction of Laplacian pyramid
%
% Arguments:
% image 'I'
% 'nlev', number of levels in the pyramid (optional)
% subwindow indices 'subwindow', given as [r1 r2 c1 c2] (optional)
%
% [email protected], August 2007
% [email protected], March 2011 [modified to handle subwindows]
%
%
% More information:
% 'The Laplacian Pyramid as a Compact Image Code'
% Burt, P., and Adelson, E. H.,
% IEEE Transactions on Communication, COM-31:532-540 (1983).
%
function pyr = laplacian_pyramid(I,nlev,subwindow)
r = size(I,1);
c = size(I,2);
if ~exist('subwindow','var')
subwindow = [1 r 1 c];
end
if ~exist('nlev','var')
nlev = numlevels([r c]); % build highest possible pyramid
end
% recursively build pyramid
pyr = cell(nlev,1);
filter = pyramid_filter;
J = I;
for l = 1:nlev - 1
% apply low pass filter, and downsample
[I,subwindow_child] = downsample(J,filter,subwindow);
% in each level, store difference between image and upsampled low pass version
pyr{l} = J - upsample(I,filter,subwindow);
J = I; % continue with low pass image
subwindow = subwindow_child;
end
pyr{nlev} = J; % the coarest level contains the residual low pass image
|
github
|
jacksky64/imageProcessing-master
|
gaussian_pyramid.m
|
.m
|
imageProcessing-master/edgeAwareFilters/localLaplacian/gaussian_pyramid.m
| 754 |
utf_8
|
cae3a399a2b8402078ae3fdb967093e8
|
% Construction of Gaussian pyramid
%
% Arguments:
% image 'I'
% 'nlev', number of levels in the pyramid (optional)
% subwindow indices 'subwindow', given as [r1 r2 c1 c2] (optional)
%
% [email protected], August 2007
% [email protected], March 2011 [modified to handle subwindows]
%
function pyr = gaussian_pyramid(I,nlev,subwindow)
r = size(I,1);
c = size(I,2);
if ~exist('subwindow','var')
subwindow = [1 r 1 c];
end
if ~exist('nlev','var')
nlev = numlevels([r c]); % build highest possible pyramid
end
% start by copying the image to the finest level
pyr = cell(nlev,1);
pyr{1} = I;
% recursively downsample the image
filter = pyramid_filter;
for l = 2:nlev
I = downsample(I,filter,subwindow);
pyr{l} = I;
end
|
github
|
jacksky64/imageProcessing-master
|
pyramid_filter.m
|
.m
|
imageProcessing-master/edgeAwareFilters/localLaplacian/pyramid_filter.m
| 404 |
utf_8
|
e4ad49a0eb06ac9461d5eca754362697
|
% This is a 2D separable low pass filter for constructing Gaussian and
% Laplacian pyramids, built from a 1D 5-tap low pass filter.
%
% [email protected], August 2007
% [email protected], March 2011 [imfilter faster with 2D filter]
%
function f = pyramid_filter()
f = [.05, .25, .4, .25, .05]; % original [Burt and Adelson, 1983]
%f = [.0625, .25, .375, .25, .0625]; % binom-5
f = f'*f;
end
|
github
|
jacksky64/imageProcessing-master
|
downsample.m
|
.m
|
imageProcessing-master/edgeAwareFilters/localLaplacian/downsample.m
| 1,192 |
utf_8
|
7181a465fce3611807bbaf1b8cc1cfc9
|
% Downsampling procedure.
%
% Arguments:
% 'I': image
% downsampling filter 'filter', should be a 2D separable filter.
% 'border_mode' should be 'circular', 'symmetric', or 'replicate'. See 'imfilter'.
% subwindow indices 'subwindow', given as [r1 r2 c1 c2] (optional)
%
% [email protected], August 2007
% [email protected], March 2011 [handle subwindows, reweighted boundaries]
%
function [R,subwindow_child] = downsample(I, filter, subwindow)
r = size(I,1);
c = size(I,2);
if ~exist('subwindow','var')
subwindow = [1 r 1 c];
end
subwindow_child = child_window(subwindow);
border_mode = 'reweighted';
%border_mode = 'symmetric';
switch border_mode
case 'reweighted'
% low pass, convolve with 2D separable filter
R = imfilter(I,filter);
% reweight, brute force weights from 1's in valid image positions
Z = imfilter(ones(size(I)),filter);
R = R./Z;
otherwise
% low pass, convolve with 2D separable filter
R = imfilter(I,filter,border_mode);
end
% decimate
reven = mod(subwindow(1),2)==0;
ceven = mod(subwindow(3),2)==0;
R = R(1+reven:2:r, 1+ceven:2:c, :);
end
|
github
|
jacksky64/imageProcessing-master
|
lapfilter_core.m
|
.m
|
imageProcessing-master/edgeAwareFilters/localLaplacian/lapfilter_core.m
| 3,421 |
utf_8
|
322bf34feaded4e8e02a88ea5334fb41
|
% Laplacian Filtering
% - public Matlab implementation for reproducibility
% - about 30x slower than our single-thread C++ version
%
% This script implements the core image processing algorithm
% described in Paris, Hasinoff, and Kautz, "Local Laplacian Filters:
% Edge-aware Image Processing with a Laplacian Pyramid", ACM
% Transactions on Graphics (Proc. SIGGRAPH 2011), 30(4), 2011.
%
% Processes an input image using a general pointwise remapping function
% r(I,g0) in a pyramid-based way. Its running time is O(N log N), where
% N is the number of pixels.
%
% Most of the code is bookkeeping to isolate the subpyramid contributing
% to a particular Laplacian coefficient. See below for a 14-line naive
% O(N^2) implementation that gives identical results.
%
% Arguments:
% image 'I'
% pixel-wise remapping function 'r', expects arguments r(I,g0)
%
% [email protected], March 2011
%
function R = lapfilter_core(I,r)
G = gaussian_pyramid(I); % compute input Gaussian pyramid
% build up the result, one Laplacian coefficient at a time
L = laplacian_pyramid(zeros(size(I))); % allocate space for result
tic;
for lev0 = 1:length(L)-1
hw = 3*2^lev0 - 2; % half-width of full-res footprint (conservative)
fprintf('level %d (%dx%d), footprint %dx%d ... 0%',lev0,size(G{lev0},1),size(G{lev0},2),min(2*hw+1,size(I,1)),min(2*hw+1,size(I,2)));
for y0 = 1:size(G{lev0},1)
for x0 = 1:size(G{lev0},2)
% coords in full-res image corresponding to (lev0,y0,x0)
yf = (y0-1)*2^(lev0-1) + 1;
xf = (x0-1)*2^(lev0-1) + 1;
% subwindow in full-res image needed to evaluate (lev0,y0,x0) in result
yrng = [max(1,yf-hw) min(size(I,1),yf+hw)];
xrng = [max(1,xf-hw) min(size(I,2),xf+hw)];
Isub = I(yrng(1):yrng(2),xrng(1):xrng(2),:);
% use the corresponding Gaussian pyramid coefficient to remap
% the full-res subwindow
g0 = G{lev0}(y0,x0,:);
Iremap = r(Isub,g0);
% compute Laplacian pyramid for remapped subwindow
Lremap = laplacian_pyramid(Iremap,lev0+1,[yrng xrng]);
% bookkeeping to compute index of (lev0,y0,x0) within the
% subwindow, at full-res and at current pyramid level
yfc = yf - yrng(1) + 1;
xfc = xf - xrng(1) + 1;
yfclev0 = floor((yfc-1)/2^(lev0-1)) + 1;
xfclev0 = floor((xfc-1)/2^(lev0-1)) + 1;
% set coefficient in result based on the corresponding
% coefficient in the remapped pyramid
L{lev0}(y0,x0,:) = Lremap{lev0}(yfclev0,xfclev0,:);
end
fprintf('\b\b\b\b%3d%%',floor(y0/size(G{lev0},1)*100));
end
fprintf('\n');
end
L{end} = G{end}; % residual not affected
R = reconstruct_laplacian_pyramid(L); % collapse result Laplacian pyramid
toc;
end
%% naive O(N^2) version for reference
%
% G = gaussian_pyramid(I);
% L = laplacian_pyramid(zeros(size(I)));
% for lev0 = 1:length(L)-1
% for y0 = 1:size(G{lev0},1)
% for x0 = 1:size(G{lev0},2)
% g0 = G{lev0}(y0,x0,:);
% Iremap = r(I,g0);
% Lremap = laplacian_pyramid(Iremap,lev0+1);
% L{lev0}(y0,x0,:) = Lremap{lev0}(y0,x0,:);
% end
% end
% end
% L{end} = G{end};
% R = reconstruct_laplacian_pyramid(L);
|
github
|
jacksky64/imageProcessing-master
|
upsample.m
|
.m
|
imageProcessing-master/edgeAwareFilters/localLaplacian/upsample.m
| 1,455 |
utf_8
|
00e6ea292a13419fd7dcb31030dc82fd
|
% Upsampling procedure.
%
% Argments:
% 'I': image
% 'filter': 2D separable upsampling filter
% parent subwindow indices 'subwindow', given as [r1 r2 c1 c2]
%
% [email protected], August 2007
% [email protected], March 2011 [handle subwindows, reweighted boundaries]
%
function R = upsample(I, filter, subwindow)
% increase size to match dimensions of the parent subwindow,
% about 2x in each dimension
r = subwindow(2) - subwindow(1) + 1;
c = subwindow(4) - subwindow(3) + 1;
k = size(I,3);
reven = mod(subwindow(1),2)==0;
ceven = mod(subwindow(3),2)==0;
border_mode = 'reweighted';
%border_mode = 'symmetric';
switch border_mode
case 'reweighted'
% interpolate, convolve with 2D separable filter
R = zeros(r,c,k);
R(1+reven:2:r, 1+ceven:2:c, :) = I;
R = imfilter(R,filter);
% reweight, brute force weights from 1's in valid image positions
Z = zeros(r,c,k);
Z(1+reven:2:r, 1+ceven:2:c, :) = 1;
Z = imfilter(Z,filter);
R = R./Z;
otherwise
% increase resolution
I = padarray(I,[1 1 0],'replicate'); % pad the image with a 1-pixel border
R = zeros(r+4,c+4,k);
R(1+reven:2:end, 1+ceven:2:end, :) = 4*I;
% interpolate, convolve with 2D separable filter
R = imfilter(R,filter,border_mode);
% remove the border
R = R(3:end-2, 3:end-2, :);
end
end
|
github
|
jacksky64/imageProcessing-master
|
lapfilter.m
|
.m
|
imageProcessing-master/edgeAwareFilters/localLaplacian/lapfilter.m
| 4,441 |
utf_8
|
63a454542b341b47dbc6c0498e09e930
|
% Laplacian Filtering
% - public Matlab implementation for reproducibility
% - about 30x slower than our single-thread C++ version
%
% This script implements edge-aware detail and tone manipulation as
% described in Paris, Hasinoff, and Kautz, "Local Laplacian Filters:
% Edge-aware Image Processing with a Laplacian Pyramid", ACM
% Transactions on Graphics (Proc. SIGGRAPH 2011), 30(4), 2011.
%
% This is a wrapper around the core algorithm (see lapfilter_core.m).
% It defines the remapping function, the color treatment, the processing
% domain (linear or log), and it implements simple postprocessing
% for our tone mapping results.
%
% Arguments:
% image 'I'
% pixel-wise remapping parameters 'sigma_r', 'alpha', 'beta'
% remapping method for color images 'colorRemapping' ['rgb' or 'lum']
% processing domain 'domain' ['lin' or 'log']
%
% [email protected], April 2011
%
function R = lapfilter(I,sigma_r,alpha,beta,colorRemapping,domain)
% interpret the input arguments
if size(I,3)==1
I = repmat(I,[1 1 3]);
end
if strcmp(domain,'log')
sigma_r = log(sigma_r);
end
% detail remapping function
noise_level = 0.01;
function out = fd(d)
out = d.^alpha;
if alpha<1
tau = smooth_step(noise_level,2*noise_level,d*sigma_r);
out = tau.*out + (1-tau).*d;
end
end
% edge remapping function
function out = fe(a)
out = beta*a;
end
% define the overall pixel-wise remapping function r, using
% the threshold sigma_r for edge-detail separation
switch colorRemapping
case 'rgb'
% process pixels as vectors in RGB color space
r = @(i,g0)(r_color(i,g0,sigma_r,@fd,@fe));
case 'lum'
% save RGB color ratios for later, process the luminance
IY = luminance(I);
Iratio = I ./ repmat(IY+eps,[1 1 3]);
I = IY;
r = @(i,g0)(r_gray(i,g0,sigma_r,@fd,@fe));
otherwise
error('invalid color remapping');
end
% define the processing domain
switch domain
case 'lin',
to_domain = @(I) I;
from_domain = @(R) R;
case 'log',
to_domain = @(I) log(I + eps);
from_domain = @(R) exp(R) - eps;
otherwise
error('invalid domain');
end
% call the core Laplacian filtering algorithm
if alpha==1 && beta==1
R = I;
else
I = to_domain(I);
R = lapfilter_core(I,r);
R = from_domain(R);
end
% postprocessing
if strcmp(domain,'log') && beta<=1
% for tone mapping, remap middle 99% of intensities to
% fixed dynamic range using a gamma curve
DR_desired = 100;
prc_clip = 0.5;
RY = luminance(R);
Rmax_clip = prctile(RY(:),100-prc_clip);
Rmin_clip = prctile(RY(:),prc_clip);
DR_clip = Rmax_clip/Rmin_clip;
exponent = log(DR_desired)/log(DR_clip);
R = max(0,R/Rmax_clip) .^ exponent;
end
if strcmp(colorRemapping,'lum')
% if working with luminance, reintroduce color ratios
R = repmat(R,[1 1 3]) .* Iratio;
end
% clip out of bounds intensities
R = max(0,R);
if beta<=1
R = min(1,R);
end
if strcmp(domain,'log') && beta<=1
% for tone mapping, gamma correct linear intensities for display
gamma_val = 2.2;
R = R.^(1/gamma_val);
end
%% helper functions
% smooth step edge between (xmin,0) and (xmax,1)
function y = smooth_step(xmin,xmax,x)
y = (x - xmin)/(xmax - xmin);
y = max(0,min(1,y));
y = y.^2.*(y-2).^2;
end
% convert RGB to grayscale intensity
function Y = luminance(I)
switch size(I,3),
case 1, Y = I;
case 3, Y = (20*I(:,:,1) + 40*I(:,:,2) + I(:,:,3))/61;
end
end
% color remapping function
function inew = r_color(i,g0,sigma_r,fd,fe)
g0 = repmat(g0,[size(i,1) size(i,2) 1]);
dnrm = sqrt(sum((i-g0).^2,3));
unit = (i-g0)./repmat(eps + dnrm,[1 1 3]);
% detail and edge processing
rd = g0 + unit.*repmat(sigma_r*fd(dnrm/sigma_r),[1 1 3]);
re = g0 + unit.*repmat((fe(dnrm - sigma_r) + sigma_r),[1 1 3]);
% edge-detail separation based on sigma_r threshold
isedge = repmat(dnrm > sigma_r,[1 1 3]);
inew = ~isedge.*rd + isedge.*re;
end
% grayscale remapping function
function inew = r_gray(i,g0,sigma_r,fd,fe)
dnrm = abs(i-g0);
dsgn = sign(i-g0);
% detail and edge processing
rd = g0 + dsgn*sigma_r.*fd(dnrm/sigma_r);
re = g0 + dsgn.*(fe(dnrm - sigma_r) + sigma_r);
% edge-detail separation based on sigma_r threshold
isedge = dnrm > sigma_r;
inew = ~isedge.*rd + isedge.*re;
end
end
|
github
|
jacksky64/imageProcessing-master
|
localExtrema.m
|
.m
|
imageProcessing-master/edgeAwareFilters/localExtrema/localExtrema.m
| 1,486 |
iso_8859_13
|
a49498397d018caeabfa95129072da39
|
%
% [M, Sminima, Smaxima, Eminima, Emaxima, D] = localExtrema(I, k)
%
% Local Extrema filter
%
% I: the input image data
% Y: the reference/cross/joint data, default to luminance(I)
% k: the width of neighborhood for idenfication of local minima/maxima
% default to 3
%
% M: smoothed image (base)
% Sminima: local minima extrema
% Smaxima: local maxima extrema
% Eminima: extermal envelope of minima extrema
% Emaxima: extremal envelopes of maxima extrema
%
% author: Shuen-Huei (Drake) Guan
%
function [M, Sminima, Smaxima, Eminima, Emaxima] = localExtrema(I, Y, k)
dim = ndims(I);
channel = size(I, 3);
if (~isa(I, 'double'))
I = double(I)/255;
end
if (~exist('Y'))
%% convert the I into luminance if necessary
if (channel == 3)
yiq = rgb2ntsc(I);
Y = yiq(:, :, 1);
else
Y = I;
end
end
if (~exist('k'))
k = 3;
end
%disp(' Identification of local minima and local maxima of I');
Sminima = double(ordfilt2(Y, k, true(k)) >= Y);
Smaxima = double(ordfilt2(Y, k*k-k+1, true(k)) <= Y);
%disp(' Interpolation of the local minima and maxima to compute minimal and maximal extremal envelopes respectively');
Icolor(:, :, 1) = Y;
for i=1:channel
Icolor(:, :, i+1) = I(:, :, i);
end
Eminima = getColorExact(Sminima, Icolor);
Emaxima = getColorExact(Smaxima, Icolor);
%disp(' Computation of the smoothed mean M as the average of the extremal envelopes');
M = (Eminima(:,:,2:(channel+1)) + Emaxima(:,:,2:(channel+1)))/2;
|
github
|
jacksky64/imageProcessing-master
|
l0Minimization.m
|
.m
|
imageProcessing-master/edgeAwareFilters/l0Minimization/l0Minimization.m
| 2,319 |
utf_8
|
6d1efb02aba3e8da95d8d1c8f6c435ac
|
% Distribution code Version 1.0 -- 09/23/2011 by Jiaya Jia Copyright 2011, The Chinese University of Hong Kong.
%
% The Code is created based on the method described in the following paper
% [1] "Image Smoothing via L0 Gradient Minimization", Li Xu, Cewu Lu, Yi Xu, Jiaya Jia, ACM Transactions on Graphics,
% (SIGGRAPH Asia 2011), 2011.
%
% The code and the algorithm are for non-comercial use only.
function S = l0Minimization(Im, lambda, kappa)
%L0Smooth - Image Smoothing via L0 Gradient Minimization
% S = L0Smooth(Im, lambda, kappa) performs L0 graidient smoothing of input
% image Im, with smoothness weight lambda and rate kappa.
%
% Paras:
% @Im : Input UINT8 image, both grayscale and color images are acceptable.
% @lambda: Smoothing parameter controlling the degree of smooth. (See [1])
% Typically it is within the range [1e-3, 1e-1], 2e-2 by default.
% @kappa : Parameter that controls the rate. (See [1])
% Small kappa results in more iteratioins and with sharper edges.
% We select kappa in (1, 2].
% kappa = 2 is suggested for natural images.
%
% Example
% ==========
% Im = imread('pflower.jpg');
% S = L0Smooth(Im); % Default Parameters (lambda = 2e-2, kappa = 2)
% figure, imshow(Im), figure, imshow(S);
if ~exist('kappa','var')
kappa = 2.0;
end
if ~exist('lambda','var')
lambda = 2e-2;
end
S = im2double(Im);
betamax = 1e5;
fx = [1, -1];
fy = [1; -1];
[N,M,D] = size(Im);
sizeI2D = [N,M];
otfFx = psf2otf(fx,sizeI2D);
otfFy = psf2otf(fy,sizeI2D);
Normin1 = fft2(S);
Denormin2 = abs(otfFx).^2 + abs(otfFy ).^2;
if D>1
Denormin2 = repmat(Denormin2,[1,1,D]);
end
beta = 2*lambda;
while beta < betamax
Denormin = 1 + beta*Denormin2;
% h-v subproblem
h = [diff(S,1,2), S(:,1,:) - S(:,end,:)];
v = [diff(S,1,1); S(1,:,:) - S(end,:,:)];
if D==1
t = (h.^2+v.^2)<lambda/beta;
else
t = sum((h.^2+v.^2),3)<lambda/beta;
t = repmat(t,[1,1,D]);
end
h(t)=0; v(t)=0;
% S subproblem
Normin2 = [h(:,end,:) - h(:, 1,:), -diff(h,1,2)];
Normin2 = Normin2 + [v(end,:,:) - v(1, :,:); -diff(v,1,1)];
FS = (Normin1 + beta*fft2(Normin2))./Denormin;
S = real(ifft2(FS));
beta = beta*kappa;
fprintf('.');
end
fprintf('\n');
end
|
github
|
jacksky64/imageProcessing-master
|
kde.m
|
.m
|
imageProcessing-master/kde/kde.m
| 5,629 |
utf_8
|
3e2bd285297fe3ee3a2e8bc3fe7b3c00
|
function [bandwidth,density,xmesh,cdf]=kde(data,n,MIN,MAX)
% Reliable and extremely fast kernel density estimator for one-dimensional data;
% Gaussian kernel is assumed and the bandwidth is chosen automatically;
% Unlike many other implementations, this one is immune to problems
% caused by multimodal densities with widely separated modes (see example). The
% estimation does not deteriorate for multimodal densities, because we never assume
% a parametric model for the data.
% INPUTS:
% data - a vector of data from which the density estimate is constructed;
% n - the number of mesh points used in the uniform discretization of the
% interval [MIN, MAX]; n has to be a power of two; if n is not a power of two, then
% n is rounded up to the next power of two, i.e., n is set to n=2^ceil(log2(n));
% the default value of n is n=2^12;
% MIN, MAX - defines the interval [MIN,MAX] on which the density estimate is constructed;
% the default values of MIN and MAX are:
% MIN=min(data)-Range/10 and MAX=max(data)+Range/10, where Range=max(data)-min(data);
% OUTPUTS:
% bandwidth - the optimal bandwidth (Gaussian kernel assumed);
% density - column vector of length 'n' with the values of the density
% estimate at the grid points;
% xmesh - the grid over which the density estimate is computed;
% - If no output is requested, then the code automatically plots a graph of
% the density estimate.
% cdf - column vector of length 'n' with the values of the cdf
% Reference:
% Kernel density estimation via diffusion
% Z. I. Botev, J. F. Grotowski, and D. P. Kroese (2010)
% Annals of Statistics, Volume 38, Number 5, pages 2916-2957.
%
% Example:
% data=[randn(100,1);randn(100,1)*2+35 ;randn(100,1)+55];
% kde(data,2^14,min(data)-5,max(data)+5);
data=data(:); %make data a column vector
if nargin<2 % if n is not supplied switch to the default
n=2^14;
end
n=2^ceil(log2(n)); % round up n to the next power of 2;
if nargin<4 %define the default interval [MIN,MAX]
minimum=min(data); maximum=max(data);
Range=maximum-minimum;
MIN=minimum-Range/2; MAX=maximum+Range/2;
end
% set up the grid over which the density estimate is computed;
R=MAX-MIN; dx=R/(n-1); xmesh=MIN+[0:dx:R]; N=length(unique(data));
%bin the data uniformly using the grid defined above;
initial_data=histc(data,xmesh)/N; initial_data=initial_data/sum(initial_data);
a=dct1d(initial_data); % discrete cosine transform of initial data
% now compute the optimal bandwidth^2 using the referenced method
I=[1:n-1]'.^2; a2=(a(2:end)/2).^2;
% use fzero to solve the equation t=zeta*gamma^[5](t)
t_star=root(@(t)fixed_point(t,N,I,a2),N);
% smooth the discrete cosine transform of initial data using t_star
a_t=a.*exp(-[0:n-1]'.^2*pi^2*t_star/2);
% now apply the inverse discrete cosine transform
if (nargout>1)|(nargout==0)
density=idct1d(a_t)/R;
end
% take the rescaling of the data into account
bandwidth=sqrt(t_star)*R;
density(density<0)=eps; % remove negatives due to round-off error
if nargout==0
figure(1), plot(xmesh,density)
end
% for cdf estimation
if nargout>3
f=2*pi^2*sum(I.*a2.*exp(-I*pi^2*t_star));
t_cdf=(sqrt(pi)*f*N)^(-2/3);
% now get values of cdf on grid points using IDCT and cumsum function
a_cdf=a.*exp(-[0:n-1]'.^2*pi^2*t_cdf/2);
cdf=cumsum(idct1d(a_cdf))*(dx/R);
% take the rescaling into account if the bandwidth value is required
bandwidth_cdf=sqrt(t_cdf)*R;
end
end
%################################################################
function out=fixed_point(t,N,I,a2)
% this implements the function t-zeta*gamma^[l](t)
l=7;
f=2*pi^(2*l)*sum(I.^l.*a2.*exp(-I*pi^2*t));
for s=l-1:-1:2
K0=prod([1:2:2*s-1])/sqrt(2*pi); const=(1+(1/2)^(s+1/2))/3;
time=(2*const*K0/N/f)^(2/(3+2*s));
f=2*pi^(2*s)*sum(I.^s.*a2.*exp(-I*pi^2*time));
end
out=t-(2*N*sqrt(pi)*f)^(-2/5);
end
%##############################################################
function out = idct1d(data)
% computes the inverse discrete cosine transform
[nrows,ncols]=size(data);
% Compute weights
weights = nrows*exp(i*(0:nrows-1)*pi/(2*nrows)).';
% Compute x tilde using equation (5.93) in Jain
data = real(ifft(weights.*data));
% Re-order elements of each column according to equations (5.93) and
% (5.94) in Jain
out = zeros(nrows,1);
out(1:2:nrows) = data(1:nrows/2);
out(2:2:nrows) = data(nrows:-1:nrows/2+1);
% Reference:
% A. K. Jain, "Fundamentals of Digital Image
% Processing", pp. 150-153.
end
%##############################################################
function data=dct1d(data)
% computes the discrete cosine transform of the column vector data
[nrows,ncols]= size(data);
% Compute weights to multiply DFT coefficients
weight = [1;2*(exp(-i*(1:nrows-1)*pi/(2*nrows))).'];
% Re-order the elements of the columns of x
data = [ data(1:2:end,:); data(end:-2:2,:) ];
% Multiply FFT by weights:
data= real(weight.* fft(data));
end
function t=root(f,N)
% try to find smallest root whenever there is more than one
N=50*(N<=50)+1050*(N>=1050)+N*((N<1050)&(N>50));
tol=10^-12+0.01*(N-50)/1000;
flag=0;
while flag==0
try
t=fzero(f,[0,tol]);
flag=1;
catch
tol=min(tol*2,.1); % double search interval
end
if tol==.1 % if all else fails
t=fminbnd(@(x)abs(f(x)),0,.1); flag=1;
end
end
end
|
github
|
jacksky64/imageProcessing-master
|
GraphCut.m
|
.m
|
imageProcessing-master/segmentation/GCmex1.9/GraphCut.m
| 15,577 |
utf_8
|
5b4177c7da3c1f8130580912c90db429
|
function [gch, varargout] = GraphCut(mode, varargin)
%
% Performing Graph Cut energy minimization operations on a 2D grid.
%
% Usage:
% [gch ...] = GraphCut(mode, ...);
%
%
% Inputs:
% - mode: a string specifying mode of operation. See details below.
%
% Output:
% - gch: A handle to the constructed graph. Handle this handle with care
% and don't forget to close it in the end!
%
% Possible modes:
% - 'open': Create a new graph object
% [gch] = GraphCut('open', DataCost, SmoothnessCost);
% [gch] = GraphCut('open', DataCost, SmoothnessCost, vC, hC);
% [gch] = GraphCut('open', DataCost, SmoothnessCost, SparseSmoothness);
%
% Inputs:
% - DataCost a height by width by num_labels matrix where
% Dc(r,c,l) equals the cost for assigning label l to pixel at (r,c)
% Note that the graph dimensions, and the number of labels are deduced
% form the size of the DataCost matrix.
% When using SparseSmoothness Dc is of (L)x(P) where L is the
% number of labels and P is the number of nodes/pixels in the
% graph.
% - SmoothnessCost a #labels by #labels matrix where Sc(l1, l2)
% is the cost of assigning neighboring pixels with label1 and
% label2. This cost is spatialy invariant
% - vC, hC:optional arrays defining spatialy varying smoothness cost.
% Single precission arrays of size width*height.
% The smoothness cost is computed using:
% V_pq(l1, l2) = V(l1, l2) * w_pq
% where V is the SmoothnessCost matrix
% w_pq is spatialy varying parameter:
% if p=(r,c) and q=(r+1,c) then w_pq = vCue(r,c)
% if p=(r,c) and q=(r,c+1) then w_pq = hCue(r,c)
% (therefore in practice the last column of vC and
% the last row of vC are not used).
% - SparseSmoothness: a sparse matrix defining both the graph
% structure (might be other than grid) and the spatialy varying
% smoothness term. Must be real positive sparse matrix of size
% num_pixels by num_pixels, each non zero entry (i,j) defines a link
% between pixels i and j with w_pq = SparseSmoothness(i,j).
%
% - 'set': Set labels
% [gch] = GraphCut('set', gch, labels)
%
% Inputs:
% - labels: a width by height array containing a label per pixel.
% Array should be the same size of the grid with values
% [0..num_labels].
%
%
% - 'get': Get current labeling
% [gch labels] = GraphCut('get', gch)
%
% Outputs:
% - labels: a height by width array, containing a label per pixel.
% note that labels values are in range [0..num_labels-1].
%
%
% - 'energy': Get current values of energy terms
% [gch se de] = GraphCut('energy', gch)
% [gch e] = GraphCut('energy', gch)
%
% Outputs:
% - se: Smoothness energy term.
% - de: Data energy term.
% - e = se + de
%
%
% - 'expand': Perform labels expansion
% [gch labels] = GraphCut('expand', gch)
% [gch labels] = GraphCut('expand', gch, iter)
% [gch labels] = GraphCut('expand', gch, [], label)
% [gch labels] = GraphCut('expand', gch, [], label, indices)
%
% When no inputs are provided, GraphCut performs expansion steps
% until it converges.
%
% Inputs:
% - iter: a double scalar, the maximum number of expand
% iterations to perform.
% - label: scalar denoting the label for which to perfom
% expand step (labels are [0..num_labels-1]).
% - indices: array of linear indices of pixels for which
% expand step is computed.
%
% Outputs:
% - labels: a width*height array of type int32, containing a
% label per pixel. note that labels values must be is range
% [0..num_labels-1].
%
%
% - 'swap': Perform alpha - beta swappings
% [gch labels] = GraphCut('swap', gch)
% [gch labels] = GraphCut('swap', gch, iter)
% [gch labels] = GraphCut('swap', gch, label1, label2)
%
% When no inputs are provided, GraphCut performs alpha - beta swaps steps
% until it converges.
%
% Inputs:
% - iter: a double scalar, the maximum number of swap
% iterations to perform.
% - label1, label2: int32 scalars denoting two labels for swap
% step.
%
% Outputs:
% - labels: a width*height array of type int32, containing a
% label per pixel. note that labels values must be is range
% [0..num_labels-1].
%
%
% - 'truncate': truncating (or not) violating expansion terms
% (see Rother etal. Digital Tapestry, CVPR2005)
% [gch truncate_flag] = GraphCut('truncate', gch, trancate_flag);
%
% When no truncate_flag is provided the function returns the current
% state of truncation
%
% Inputs:
% - trancate_flag: set truncate_flag to this state
%
% Outputs:
% - trancate_flag: current state (after modification if
% applicable)
%
% - 'close': Close the graph and release allocated resources.
% [gch] = GraphCut('close', gch);
%
%
%
% This wrapper for Matlab was written by Shai Bagon ([email protected]).
% Department of Computer Science and Applied Mathmatics
% Wiezmann Institute of Science
% http://www.wisdom.weizmann.ac.il/
%
% The core cpp application was written by Olga Veksler
% (available from http://www.csd.uwo.ca/faculty/olga/code.html):
%
% [1] Efficient Approximate Energy Minimization via Graph Cuts
% Yuri Boykov, Olga Veksler, Ramin Zabih,
% IEEE transactions on PAMI, vol. 20, no. 12, p. 1222-1239, November
% 2001.
%
% [2] What Energy Functions can be Minimized via Graph Cuts?
% Vladimir Kolmogorov and Ramin Zabih.
% IEEE Transactions on Pattern Analysis and Machine Intelligence
% (PAMI), vol. 26, no. 2,
% February 2004, pp. 147-159.
%
% [3] An Experimental Comparison of Min-Cut/Max-Flow Algorithms
% for Energy Minimization in Vision.
% Yuri Boykov and Vladimir Kolmogorov.
% In IEEE Transactions on Pattern Analysis and Machine Intelligence
% (PAMI),
% vol. 26, no. 9, September 2004, pp. 1124-1137.
%
% [4] Matlab Wrapper for Graph Cut.
% Shai Bagon.
% in www.wisdom.weizmann.ac.il/~bagon, December 2006.
%
% This software can be used only for research purposes, you should cite ALL of
% the aforementioned papers in any resulting publication.
% If you wish to use this software (or the algorithms described in the
% aforementioned paper)
% for commercial purposes, you should be aware that there is a US patent:
%
% R. Zabih, Y. Boykov, O. Veksler,
% "System and method for fast approximate energy minimization via
% graph cuts ",
% United Stated Patent 6,744,923, June 1, 2004
%
%
% The Software is provided "as is", without warranty of any kind.
%
%
switch lower(mode)
case {'o', 'open'}
% open a new graph cut
if nargout ~= 1
error('GraphCut:Open: wrong number of output arguments');
end
gch = OpenGraph(varargin{:});
case {'c', 'close'}
% close the GraphCut handle - free memory.
if nargin ~= 2
error('GraphCut:Close: Too many inputs');
end
gch = varargin{1};
[gch] = GraphCutMex(gch, 'c');
case {'g', 'get'}
% get current labeling
if nargout ~= 2
error('GraphCut:GetLabels: wrong number of outputs');
end
[gch labels] = GetLabels(varargin{:});
varargout{1} = labels;
case {'s', 'set'}
% set user defined labeling
if nargout ~= 1
error('GraphCut:SetLabels: Too many outputs');
end
[gch] = SetLabels(varargin{:});
case {'en', 'n', 'energy'}
% get current energy values
if nargin ~= 2
error('GraphCut:GetEnergy: too many input arguments');
end
gch = varargin{1};
[gch se de] = GraphCutMex(gch, 'n');
switch nargout
case 2
varargout{1} = se+de;
case 3
varargout{1} = se;
varargout{2} = de;
case 1
otherwise
error('GraphCut:GetEnergy: wrong number of output arguments')
end
case {'e', 'ex', 'expand'}
% use expand steps to minimize energy
if nargout > 2
error('GraphCut:Expand: too many output arguments');
end
[gch labels] = Expand(varargin{:});
if nargout == 2
varargout{1} = labels;
end
case {'sw', 'a', 'ab', 'swap'}
% use alpha beta swapping to minimize energy
if nargout > 2
error('GraphCut:Expand: too many output arguments');
end
[gch labels] = Swap(varargin{:});
if nargout == 2
varargout{1} = labels;
end
case {'truncate'}
if numel(varargin) == 2
gch = varargin{1};
[gch tf] = GraphCutMex(gch, 't', varargin{2});
elseif numel(varargin) == 1
gch = varargin{1};
[gch tf] = GraphCutMex(gch, 't');
else
error('GraphCut:Truncate: wrong number of input arguments');
end
if nargout > 2
error('GraphCut:Truncate: too many output arguments');
end
if nargout == 2
varargout{1} = tf;
end
otherwise
error('GraphCut: Unrecognized mode %s', mode);
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Aux functions
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function gch = OpenGraph(varargin)
% Usage [gch] = OpenGraph(Dc, Sc, [vC, hC]) - 2D grid
% or [gch] = OpenGraph(Dc, Sc, [Contrast]) -3D grid
% or [gch] = GraphCut(DataCost, SmoothnessCost, SparseSmoothness) - any graph
nin = numel(varargin);
if (nin~=2) && (nin ~= 3) && (nin~=4)
error('GraphCut:Open: wrong number of inputs');
end
% Data cost
Dc = varargin{1};
if ndims(Dc) == 4
%%% 3D graph
[R C Z L] = size(Dc);
if ~strcmp(class(Dc),'single')
Dc = single(Dc);
end
Dc = permute(Dc,[4 1 2 3]);
Dc = Dc(:)';
% smoothness cost
Sc = varargin{2};
if any( size(Sc) ~= [L L] )
error('GraphCut:Open: smoothness cost has incorrect size');
end
if ~strcmp(class(Sc),'single')
Sc = single(Sc);
end
Sc = Sc(:)';
if nin == 3
Contrast = varargin{3};
if any( size(Contrast) ~= [R C Z] )
error('GraphCut:Open: Contrast term is of wrong size');
end
if ~strcmp(class(Contrast),'single')
Contrast = single(Contrast);
end
Contrast = Contrast(:);
gch = GraphCut3dConstr(R, C, Z, L, Dc, Sc, Contrast);
elseif nin == 2
gch = GraphCut3dConstr(R, C, Z, L, Dc, Sc);
else
error('GraphCut:Open: wrong number of inputs for 3D graph');
end
elseif ndims(Dc) == 3
%%% 2D graph
[h w l] = size(Dc);
if ~strcmp(class(Dc),'single')
Dc = single(Dc);
end
Dc = permute(Dc,[3 2 1]);
Dc = Dc(:)';
% smoothness cost
Sc = varargin{2};
if any( size(Sc) ~= [l l] )
error('GraphCut:Open: smoothness cost has incorrect size');
end
if ~strcmp(class(Sc),'single')
Sc = single(Sc);
end
Sc = Sc(:)';
if nin==4
vC = varargin{3};
if any( size(vC) ~= [h w] )
error('GraphCut:Open: vertical cue size incorrect');
end
if ~strcmp(class(vC),'single')
vC = single(vC);
end
vC = vC';
hC = varargin{4};
if any( size(hC) ~= [h w] )
error('GraphCut:Open: horizontal cue size incorrect');
end
if ~strcmp(class(hC),'single')
hC = single(hC);
end
hC = hC';
gch = GraphCutConstr(w, h, l, Dc, Sc, vC(:), hC(:));
elseif nin == 2
gch = GraphCutConstr(w, h, l, Dc, Sc);
else
error('GraphCut:Open: wrong number of input for 2D grid');
end
elseif ndims(Dc) == 2
%%% arbitrary graph
if nin ~= 3
error('GraphCut:Open', 'incorect number of inputs');
end
[nl np] = size(Dc);
Sc = varargin{2};
if any(size(Sc) ~= [nl nl])
error('GraphCut:Open', 'Wrong size of Dc or Sc');
end
SparseSc = varargin{3};
if any(size(SparseSc) ~= [np np])
error('GraphCut:Open', 'Wrong size of SparseSc');
end
gch = GraphCutConstrSparse(single(Dc(:)), single(Sc(:)), SparseSc);
else
%%% Unknown dimensionality...
error('GraphCut:Open: data cost has incorect dimensionality');
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [gch] = SetLabels(varargin)
% usgae [gch] = SetLabels(gch, labels)
if nargin ~= 2
error('GraphCut:SetLabels: wrong number of inputs');
end
gch = varargin{1};
labels = varargin{2};
if ~strcmp(class(labels), 'int32')
labels = int32(labels);
end
labels = labels';
[gch] = GraphCutMex(gch, 's', labels(:));
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [gch labels] = GetLabels(varargin)
if nargin ~= 1
error('GraphCut:GetLabels: wrong number of inputs');
end
gch = varargin{1};
[gch labels] = GraphCutMex(gch, 'g');
labels = labels';
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [gch labels] = Expand(varargin)
gch = varargin{1};
switch nargin
case 1
[gch labels] = GraphCutMex(gch, 'e');
case 2
[gch labels] = GraphCutMex(gch, 'e', varargin{2});
case 3
[gch labels] = GraphCutMex(gch, 'e', varargin{2}, varargin{3});
case 4
ind = varargin{4};
ind = int32(ind(:)-1)'; % convert to int32
[gch labels] = GraphCutMex(gch, 'e', varargin{2}, varargin{3}, ind);
otherwise
error('GraphCut:Expand: wrong number of inputs');
end
labels = labels';
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [gch labels] = Swap(varargin)
gch = varargin{1};
switch nargin
case 1
[gch labels] = GraphCutMex(gch, 'a');
case 2
[gch labels] = GraphCutMex(gch, 'a', varargin{2});
case 3
[gch labels] = GraphCutMex(gch, 'a', varargin{2}, varargin{3});
otherwise
error('GraphCut:Swap: wrong number of inputarguments');
end
labels = labels';
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
github
|
jacksky64/imageProcessing-master
|
lse_bfe_3Phase.m
|
.m
|
imageProcessing-master/segmentation/levelset_segmentation_biasCorrection_v1/levelset_segmentation_biasCorrection_v1/lse_bfe_3Phase.m
| 3,611 |
utf_8
|
c4544181814d6f42f72d47a243691641
|
function [u, b, C]= lse_bfe_3Phase(u,Img,b,Ksigma,KONE, nu,timestep,mu, epsilon,Iter)
% This code implements the level set evolution (LSE) and bias field estimation
% proposed in the following paper:
% C. Li, R. Huang, Z. Ding, C. Gatenby, D. N. Metaxas, and J. C. Gore,
% "A Level Set Method for Image Segmentation in the Presence of Intensity
% Inhomogeneities with Application to MRI", IEEE Trans. Image Processing, 2011
%
% Note:
% This code implements the three-phase formulation of the model in the above paper.
% The three-phase formulation is used to segment an image into three regions.
% The code for four-phase or other multi-phase formulation will be released later
% in the website below.
%
% All rights researved by Chunming Li, who formulated the model, designed and
% implemented the algorithm in the above paper.
%
% E-mail: [email protected]
% URL: http://www.engr.uconn.edu/~cmli/
% Copyright (c) by Chunming Li
% Author: Chunming Li
N_class = 3; % three-phase
KB1 = conv2(b,Ksigma,'same');
KB2 = conv2(b.^2,Ksigma,'same');
H1 = Heaviside(u(:,:,1),epsilon );
H2 = Heaviside(u(:,:,2),epsilon );
M(:,:,1)=H1.*H2; % membership function 1
M(:,:,2)=H1.*(1-H2); % membership function 2
M(:,:,3)=(1-H1); % membership function 3
C = updateC(Img, KB1, KB2, M);
KONE_Img = Img.^2.*KONE;
u = updateLSF(Img,u, C, N_class, KONE_Img, KB1, KB2, mu, nu, timestep, epsilon, Iter);
b =updateB(Img, C, M, Ksigma);
%
function u = updateLSF(Img,u, C, N_class, KONE_Img, KB1, KB2, mu, nu, timestep, epsilon, Iter)
u(:,:,1)=NeumannBoundCond(u(:,:,1));
Curv(:,:,1)=curvature_central(u(:,:,1));
H1 = Heaviside(u(:,:,1),epsilon );
Delta(:,:,1) = Dirac(u(:,:,1),epsilon);
u(:,:,2)=NeumannBoundCond(u(:,:,2));
Curv(:,:,2)=curvature_central(u(:,:,2));
H2 = Heaviside(u(:,:,2),epsilon );
Delta(:,:,2) = Dirac(u(:,:,2),epsilon);
e=zeros([size(Img),N_class]);
for kk=1:N_class
e(:,:,kk) = KONE_Img - 2*Img.*C(kk).*KB1 + C(kk)^2*KB2;
end
A1 = - Delta(:,:,1).*(e(:,:,1).*H2 + e(:,:,2).*(1-H2) - e(:,:,3));
P1=mu*(4*del2(u(:,:,1))-Curv(:,:,1));
L1=nu.*Delta(:,:,1).*Curv(:,:,1);
u(:,:,1) = u(:,:,1)+timestep*(L1+P1+A1); % update u1
A2 = - Delta(:,:,2).*H1.*(e(:,:,1)-e(:,:,2));
P2=mu*(4*del2(u(:,:,2))-Curv(:,:,2));
L2=nu.*Delta(:,:,2).*Curv(:,:,2);
u(:,:,2) = u(:,:,2)+timestep*(L2+P2+A2); % update u2
%
function C=updateC(Img, Kb1, Kb2, M)
N_class = size(M,3);
for kk=1:N_class
N2 = Kb1.*Img.*M(:,:,kk);
D2 = Kb2.*M(:,:,kk);
sN2 = sum(N2(:));
sD2 = sum(D2(:));
C(kk)=sN2/(sD2+(sD2==0));
end
%%%%%%%%%%%%%%%%%
function b=updateB(Img, C, M, Ksigma)
PC1=zeros(size(Img));
PC2=PC1;
N_class=size(M,3);
for kk=1:N_class
PC1=PC1+C(kk)*M(:,:,kk);
PC2=PC2+C(kk)^2*M(:,:,kk);
end
KNm1 = conv2(PC1.*Img,Ksigma,'same');
KDn1 = conv2(PC2,Ksigma,'same');
b = KNm1./KDn1;
function h = Heaviside(x,epsilon) % function (11)
h=0.5*(1+(2/pi)*atan(x./epsilon));
function f = Dirac(x, epsilon) % function (12)
f=(epsilon/pi)./(epsilon^2.+x.^2);
function K_curvature = curvature_central(u);
[ux,uy] = gradient(u);
normDu = sqrt(ux.^2+uy.^2+1e-20);
Nx = ux./normDu;
Ny = uy./normDu;
[nxx,junk] = gradient(Nx);
[junk,nyy] = gradient(Ny);
K_curvature = nxx+nyy;
function g = NeumannBoundCond(f)
% Make a function satisfy Neumann boundary condition
[nrow,ncol] = size(f);
g = f;
g([1 nrow],[1 ncol]) = g([3 nrow-2],[3 ncol-2]);
g([1 nrow],2:end-1) = g([3 nrow-2],2:end-1);
g(2:end-1,[1 ncol]) = g(2:end-1,[3 ncol-2]);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.