plateform
stringclasses
1 value
repo_name
stringlengths
13
113
name
stringlengths
3
74
ext
stringclasses
1 value
path
stringlengths
12
229
size
int64
23
843k
source_encoding
stringclasses
9 values
md5
stringlengths
32
32
text
stringlengths
23
843k
github
bsxfan/meta-embeddings-master
read_hdf5.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Key/read_hdf5.m
1,196
utf_8
4057278a996259de22fed6ee29c5d3b2
function key = read_hdf5(infilename) % Reads a Key object from an hdf5 file. % Inputs: % infilename: The name for the hdf5 file to read. % Outputs: % key: A Key object created from the information in the hdf5 % file. assert(nargin==1) assert(isa(infilename,'char')) key = Key(); key.modelset = h5strings_to_cell(infilename,'/ID/row_ids'); key.segset = h5strings_to_cell(infilename,'/ID/column_ids'); oldformat = false; info = hdf5info(infilename); datasets = info.GroupHierarchy.Datasets; for ii=1:length(datasets) if strcmp(datasets(ii).Name,'/target_mask') oldformat = true; end end if oldformat key.tar = logical(hdf5read(infilename,'/target_mask','V71Dimensions',true)); key.non = logical(hdf5read(infilename,'/nontarget_mask','V71Dimensions',true)); else trialmask = hdf5read(infilename,'/trial_mask','V71Dimensions',true); key.tar = trialmask > 0.5; key.non = trialmask < -0.5; end assert(key.validate()) function cellstrarr = h5strings_to_cell(infilename,attribname) tmp = hdf5read(infilename,attribname,'V71Dimensions',true); numentries = length(tmp); cellstrarr = cell(numentries,1); for ii=1:numentries cellstrarr{ii} = tmp(ii).Data; end
github
bsxfan/meta-embeddings-master
filter.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Ndx/filter.m
2,788
utf_8
6d39760ecafc786f43259d1adb98a810
function outndx = filter(inndx,modlist,seglist,keep) % Removes some of the information in an Ndx. Useful for creating a % gender specific Ndx from a pooled gender Ndx. Depending on the % value of 'keep', the two input lists indicate the strings to % retain or the strings to discard. % Inputs: % inndx: An Ndx object. % modlist: A cell array of strings which will be compared with % the modelset of 'inndx'. % seglist: A cell array of strings which will be compared with % the segset of 'inndx'. % keep: A boolean indicating whether modlist and seglist are the % models to keep or discard. % Outputs: % outndx: A filtered version of 'inndx'. if nargin == 0 test_this(); return end assert(nargin==4) assert(isa(inndx,'Ndx')) assert(inndx.validate()) assert(iscell(modlist)) assert(iscell(seglist)) if keep keepmods = modlist; keepsegs = seglist; else keepmods = setdiff(inndx.modelset,modlist); keepsegs = setdiff(inndx.segset,seglist); end keepmodidx = ismember(inndx.modelset,keepmods); keepsegidx = ismember(inndx.segset,keepsegs); outndx = Ndx(); outndx.modelset = inndx.modelset(keepmodidx); outndx.segset = inndx.segset(keepsegidx); outndx.trialmask = inndx.trialmask(keepmodidx,keepsegidx); assert(outndx.validate()) if length(inndx.modelset) > length(outndx.modelset) log_info('Number of models reduced from %d to %d.\n',length(inndx.modelset),length(outndx.modelset)); end if length(inndx.segset) > length(outndx.segset) log_info('Number of test segments reduced from %d to %d.\n',length(inndx.segset),length(outndx.segset)); end end function test_this() ndx = Ndx(); ndx.modelset = {'aaa','bbb','ccc','ddd'}; ndx.segset = {'11','22','33','44','55'}; ndx.trialmask = true(4,5); fprintf('ndx.modelset\n'); disp(ndx.modelset) fprintf('ndx.segset\n'); disp(ndx.segset) fprintf('ndx.trialmask\n'); disp(ndx.trialmask) modlist = {'bbb','ddd'} seglist = {'11','55'} keep = true out = Ndx.filter(ndx,modlist,seglist,keep); fprintf('out.modelset\n'); disp(out.modelset) fprintf('out.segset\n'); disp(out.segset) fprintf('out.trialmask\n'); disp(out.trialmask) keep = false out = Ndx.filter(ndx,modlist,seglist,keep); fprintf('out.modelset\n'); disp(out.modelset) fprintf('out.segset\n'); disp(out.segset) fprintf('out.trialmask\n'); disp(out.trialmask) modlist = {'bbb','ddd','eee'} seglist = {'11','66','77','55'} keep = true out = Ndx.filter(ndx,modlist,seglist,keep); fprintf('out.modelset\n'); disp(out.modelset) fprintf('out.segset\n'); disp(out.segset) fprintf('out.trialmask\n'); disp(out.trialmask) keep = false out = Ndx.filter(ndx,modlist,seglist,keep); fprintf('out.modelset\n'); disp(out.modelset) fprintf('out.segset\n'); disp(out.segset) fprintf('out.trialmask\n'); disp(out.trialmask) end
github
bsxfan/meta-embeddings-master
read_hdf5.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Ndx/read_hdf5.m
838
utf_8
424ae971c22eb22cf8c27af6130b9698
function ndx = read_hdf5(infilename) % Creates an Ndx object from the information in an hdf5 file. % Inputs: % infilename: The name of the hdf5 file contain the information % necessary to construct an Ndx object. % Outputs: % ndx: An Ndx object containing the information in the input % file. assert(nargin==1) assert(isa(infilename,'char')) ndx = Ndx(); ndx.modelset = h5strings_to_cell(infilename,'/ID/row_ids'); ndx.segset = h5strings_to_cell(infilename,'/ID/column_ids'); ndx.trialmask = logical(hdf5read(infilename,'/trial_mask','V71Dimensions',true)); assert(ndx.validate()) function cellstrarr = h5strings_to_cell(infilename,attribname) tmp = hdf5read(infilename,attribname,'V71Dimensions',true); numentries = length(tmp); cellstrarr = cell(numentries,1); for ii=1:numentries cellstrarr{ii} = tmp(ii).Data; end
github
bsxfan/meta-embeddings-master
filter_on_right.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Id_Map/filter_on_right.m
1,885
utf_8
deb124220c828ae065475bc93957d53f
function out_idmap = filter_on_right(in_idmap,idlist,keep) % Removes some of the information in an idmap. Depending on the % value of 'keep', the idlist indicates the strings to retain or % the strings to discard. % Inputs: % in_idmap: An Id_Map object to be pruned. % idlist: A cell array of strings which will be compared with % the rightids of 'in_idmap'. % keep: A boolean indicating whether idlist contains the ids to % keep or to discard. % Outputs: % out_idmap: A filtered version of 'in_idmap'. if nargin == 0 test_this(); return end assert(nargin==3) assert(isa(in_idmap,'Id_Map')) assert(in_idmap.validate()) assert(iscell(idlist)) if keep keepids = idlist; else keepids = setdiff(in_idmap.rightids,idlist); end keep_idx = ismember(in_idmap.rightids,keepids); out_idmap = Id_Map(); out_idmap.leftids = in_idmap.leftids(keep_idx); out_idmap.rightids = in_idmap.rightids(keep_idx); assert(out_idmap.validate(false)) end function test_this() idmap = Id_Map(); idmap.leftids = {'aaa','bbb','ccc','bbb','ddd','eee'}; idmap.rightids = {'11','22','33','44','55','22'}; fprintf('idmap.leftids\n'); disp(idmap.leftids) fprintf('idmap.rightids\n'); disp(idmap.rightids) idlist = {'22','44'} keep = true out = Id_Map.filter_on_right(idmap,idlist,keep); fprintf('out.leftids\n'); disp(out.leftids) fprintf('out.rightids\n'); disp(out.rightids) keep = false out = Id_Map.filter_on_right(idmap,idlist,keep); fprintf('out.leftids\n'); disp(out.leftids) fprintf('out.rightids\n'); disp(out.rightids) idlist = {'11','33','66'} keep = true out = Id_Map.filter_on_right(idmap,idlist,keep); fprintf('out.leftids\n'); disp(out.leftids) fprintf('out.rightids\n'); disp(out.rightids) keep = false out = Id_Map.filter_on_right(idmap,idlist,keep); fprintf('out.leftids\n'); disp(out.leftids) fprintf('out.rightids\n'); disp(out.rightids) end
github
bsxfan/meta-embeddings-master
read_hdf5.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Id_Map/read_hdf5.m
777
utf_8
47581f23817e49ffc325aed95a088106
function idmap = read_hdf5(infilename) % Creates an Id_Map object from the information in an hdf5 file. % Inputs: % infilename: The name of the hdf5 file containing the information % necessary to construct an Id_Map object. % Outputs: % idmap: An Id_Map object containing the information in the input % file. assert(nargin==1) assert(isa(infilename,'char')) idmap = Id_Map(); idmap.leftids = h5strings_to_cell(infilename,'/left_ids'); idmap.rightids = h5strings_to_cell(infilename,'/right_ids'); assert(idmap.validate()) function cellstrarr = h5strings_to_cell(infilename,attribname) tmp = hdf5read(infilename,attribname,'V71Dimensions',true); numentries = length(tmp); cellstrarr = cell(numentries,1); for ii=1:numentries cellstrarr{ii} = tmp(ii).Data; end
github
bsxfan/meta-embeddings-master
filter_on_left.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Id_Map/filter_on_left.m
1,871
utf_8
27abe0c92b6ff488892e389fee1fb5e9
function out_idmap = filter_on_left(in_idmap,idlist,keep) % Removes some of the information in an idmap. Depending on the % value of 'keep', the idlist indicates the strings to retain or % the strings to discard. % Inputs: % in_idmap: An Id_Map object to be pruned. % idlist: A cell array of strings which will be compared with % the leftids of 'in_idmap'. % keep: A boolean indicating whether idlist contains the ids to % keep or to discard. % Outputs: % out_idmap: A filtered version of 'in_idmap'. if nargin == 0 test_this(); return end assert(nargin==3) assert(isa(in_idmap,'Id_Map')) assert(in_idmap.validate()) assert(iscell(idlist)) if keep keepids = idlist; else keepids = setdiff(in_idmap.leftids,idlist); end keep_idx = ismember(in_idmap.leftids,keepids); out_idmap = Id_Map(); out_idmap.leftids = in_idmap.leftids(keep_idx); out_idmap.rightids = in_idmap.rightids(keep_idx); assert(out_idmap.validate(false)) end function test_this() idmap = Id_Map(); idmap.leftids = {'aaa','bbb','ccc','bbb','ddd'}; idmap.rightids = {'11','22','33','44','55'}; fprintf('idmap.leftids\n'); disp(idmap.leftids) fprintf('idmap.rightids\n'); disp(idmap.rightids) idlist = {'bbb','ddd'} keep = true out = Id_Map.filter_on_left(idmap,idlist,keep); fprintf('out.leftids\n'); disp(out.leftids) fprintf('out.rightids\n'); disp(out.rightids) keep = false out = Id_Map.filter_on_left(idmap,idlist,keep); fprintf('out.leftids\n'); disp(out.leftids) fprintf('out.rightids\n'); disp(out.rightids) idlist = {'bbb','ddd','eee'} keep = true out = Id_Map.filter_on_left(idmap,idlist,keep); fprintf('out.leftids\n'); disp(out.leftids) fprintf('out.rightids\n'); disp(out.rightids) keep = false out = Id_Map.filter_on_left(idmap,idlist,keep); fprintf('out.leftids\n'); disp(out.leftids) fprintf('out.rightids\n'); disp(out.rightids) end
github
bsxfan/meta-embeddings-master
L_BFGS.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/optimization/L_BFGS.m
5,021
utf_8
ccfcc8e580c4dfa191a42bfcbaf055cb
function [w,y,mem,logs] = L_BFGS(obj,w,maxiters,timeout,mem,stpsz0,callback) % L-BFGS Quasi-Newton unconstrained optimizer. % -- This has a small interface change from LBFGS.m -- % % Inputs: % obj: optimization objective, with interface: [y,grad] = obj(w), % where w is the parameter vector, y is the scalar objective value % and grad is a function handle, so that grad(1) gives the gradient % (same size as w). % w: the initial parameter vector % maxiters: max number of LBFGS iterations (line search iterations do not % count towards this limit). % timeout: LBFGS will stop when timeout (in seconds) is reached. % mem is either: (i) A struct with previously computed LBFGS data, to % allow resumption of iteration. % (ii) An integer: the size of the LBFGS memory. A good % default is 20. % %some linesearch magic numbers maxfev = 20; %max number of function evaluations stpmin = 1e-15; %same as Poblano default stpmax = 1e15; %same as Poblano default ftol = 1e-5; % as recommended by Nocedal (c1 in his book) gtol = 0.9; % as recommended by Nocedal (c2 in his book) xtol = 1e-15; %same as Poblano default quiet = false; %termination parameters %stopTol = 1e-5; %same as Poblano relFuncTol = 1e-6; %same as Poblano if ~exist('stpsz0','var') || isempty(stpsz0) stpsz0 = 1; end stpsz = stpsz0; if ~exist('timeout','var') || isempty(timeout) timeout = 15*60; fprintf('timeout defaulted to 15 minutes'); end; if ~exist('callback','var') || isempty(callback) ncbLogs = 0; else ncbLogs = length( callback(w) ); end; tic; dim = length(w); if ~isstruct(mem) m = mem; mem = []; mem.m = m; mem.sz = 0; mem.rho = zeros(1,m); mem.S = zeros(dim,m); mem.Y = zeros(dim,m); else m = mem.m; end if ~exist('y','var') || isempty(y) [y,grad] = obj(w); g = grad(1); fprintf('LBFGS 0: obj = %g, ||g||=%g\n',y,sqrt(g'*g)); end initial_y = y; logs = zeros(3+ncbLogs, maxiters); nlogs = 0; gmag = sqrt(g'*g); k = 0; while true if gmag< eps fprintf('LBFGS converged with tiny gradient\n'); break; end % choose direction p = -Hprod(g,mem); assert(g'*p<0,'p is not downhill'); % line search g0 = g; y0 = y; w0 = w; [w,y,grad,g,alpha,info,nfev] = minpack_cvsrch(obj,w,y,g,p,stpsz,... ftol,gtol,xtol, ... stpmin,stpmax,maxfev,quiet); stpsz = 1; delta_total = abs(initial_y-y); delta = abs(y0-y); if delta_total>eps relfunc = delta/delta_total; else relfunc = delta; end gmag = sqrt(g'*g); if info==1 %Wolfe is happy sk = w-w0; yk = g-g0; dot = sk'*yk; assert(dot>0); if mem.sz==m mem.S(:,1:m-1) = mem.S(:,2:m); mem.Y(:,1:m-1) = mem.Y(:,2:m); mem.rho(:,1:m-1) = mem.rho(:,2:m); else mem.sz = mem.sz + 1; end sz = mem.sz; mem.S(:,sz) = sk; mem.Y(:,sz) = yk; mem.rho(sz) = 1/dot; fprintf('LBFGS %i: ||g||/n = %g, rel = %g\n',k+1,gmag/length(g),relfunc); else fprintf('LBFGS %i: NO UPDATE, info = %i, ||g||/n = %g, rel = %g\n',k+1,info,gmag/length(g),relfunc); end time = toc; nlogs = nlogs+1; if ncbLogs > 0 logs(:,nlogs) = [time; y; nfev; callback(w)']; disp(logs(4:end,nlogs)'); else logs(:,nlogs) = [time;y;nfev]; end k = k + 1; if k>=maxiters fprintf('LBFGS stopped: maxiters exceeded\n'); break; end if time>timeout fprintf('LBFGS stopped: timeout\n'); break; end if relfunc < relFuncTol fprintf('\nTDN: stopped with minimal function change\n'); break; end end logs = logs(:,1:nlogs); end function r = Hprod(q,mem) if mem.sz==0 r = q; return; end sz = mem.sz; S = mem.S; Y = mem.Y; rho = mem.rho; alpha = zeros(1,sz); for i=sz:-1:1 alpha(i) = rho(i)*S(:,i)'*q; q = q - alpha(i)*Y(:,i); end yy = sum(Y(:,sz).^2,1); r = q/(rho(sz)*yy); for i=1:sz beta = rho(i)*Y(:,i)'*r; r = r + S(:,i)*(alpha(i)-beta); end end
github
bsxfan/meta-embeddings-master
create_PYCRP.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/CRP/create_PYCRP.m
10,787
utf_8
482772d72ab94176d598b555bd28ec19
function PYCRP = create_PYCRP(alpha,beta,e,n) % alpha: alpha>=0, concentration % beta: 0<= beta <=1, discount if nargin==0 %test_this2(); test_Gibbsmatrix() return; end if nargin==4 PYCRP = create_PYCRP(1,0); PYCRP.set_expected_number_tables(e,n,alpha,beta); return; else assert(nargin==2); end assert(alpha>=0); assert(beta>=0 && beta<=1); PYCRP.logprob = @logprob; PYCRP.logprob3 = @logprob3; PYCRP.sample = @sample; PYCRP.expected_number_tables = @expected_number_tables; PYCRP.set_expected_number_tables = @set_expected_number_tables; PYCRP.ent = @ent; PYCRP.getParams = @getParams; PYCRP.GibbsMatrix = @GibbsMatrix; PYCRP.slowGibbsMatrix = @slowGibbsMatrix; function [concentration,discount] = getParams() concentration = alpha; discount = beta; end function e = expected_number_tables(n) e = ent(alpha,beta,n); end function e = ent(alpha,beta,n) if alpha==0 && beta==0 e = 1; elseif isinf(alpha) e = n; elseif alpha>0 && beta>0 A = gammaln(alpha + beta + n) + gammaln(alpha + 1) ... - log(beta) - gammaln(alpha+n) - gammaln(alpha+beta); B = alpha/beta; e = B*expm1(A-log(B)); %exp(A)-B elseif alpha>0 && beta==0 e = alpha.*( psi(n+alpha) - psi(alpha) ); elseif alpha==0 && beta>0 A = gammaln(beta + n) - log(beta) - gammaln(n) - gammaln(beta); e = exp(A); end end function [flag,concentration,discount] = set_expected_number_tables(e,n,concentration,discount) if ~isempty(concentration) && ~isempty(discount) error('you can''t specify both parameters'); end if isempty(concentration) && isempty(discount) error('you must specify one parameter'); end if e<1 || e>n error('expectation must be between 1 and %i',n); end if isempty(concentration) assert(discount>=0 && discount<1); beta = discount; if beta==0 && e==1 alpha = 0; concentration = alpha; flag = 1; return; elseif e==n alpha = inf; concentration = alpha; flag = 1; return; end min_e = ent(0,beta,n); if e < min_e error('e=%g is impossible at discount=%g, minimum is e=%g',e,beta,min_e); end f = @(logalpha) ent(exp(logalpha),beta,n) - e; [logalpha,~,flag] = fzero(f,0); alpha = exp(logalpha); concentration = alpha; elseif isempty(discount) assert(concentration>=0); alpha = concentration; if alpha==0 && e==1 beta = 0; discount = beta; flag = 1; return; elseif e==n beta = 1; discount = beta; flag = 1; return; end min_e = ent(alpha,0,n); if e < min_e error('e=%g is impossible at concentration=%g, minimum is e=%min_e',e,alpha,min_e); end f = @(logitbeta) ent(alpha,sigmoid(logitbeta),n) - e; [logitbeta,~,flag] = fzero(f,0); beta = sigmoid(logitbeta); discount = beta; end end function y = sigmoid(x) y = 1./(1+exp(-x)); end function logP = logprob(counts) %Wikipedia K = length(counts); T = sum(counts); if isinf(alpha) && beta==1 %singleton tables if all(counts==1) logP = 0; else logP = -inf; end return; end if alpha==0 && beta==0 %single table if K==1 logP = 0; else logP = -inf; end return; end if alpha>0 && beta>0 % 2-param Pitman-Yor generalization logP = gammaln(alpha) - gammaln(alpha+T) + K*log(beta) ... + gammaln(alpha/beta + K) - gammaln(alpha/beta) ... + sum(gammaln(counts-beta)) ... - K*gammaln(1-beta); elseif beta==0 && alpha>0 % classical CRP logP = gammaln(alpha) + K*log(alpha) - gammaln(alpha+T) + sum(gammaln(counts)); elseif beta>0 && alpha==0 logP = (K-1)*log(beta) + gammaln(K) - gammaln(T) ... - K*gammaln(1-beta) + sum(gammaln(counts-beta)); end end % Seems wrong % function logP = logprob2(counts) % Goldwater % % % % K = length(counts); % T = sum(counts); % if beta>0 %Pitman-Yor generalization % logP = gammaln(1+alpha) - gammaln(alpha+T) ... % + sum(beta*(1:K-1)+alpha) ... % + sum(gammaln(counts-beta)) ... % - K*gammaln(1-beta); % else %1 parameter CRP % logP = gammaln(1+alpha) + (K-1)*log(alpha) - gammaln(alpha+T) + sum(gammaln(counts)); % end % % end % Agrees with Wikipedia version (faster for small counts) function logP = logprob3(counts) logP = 0; n = 0; for k=1:length(counts) % seat first customer at new table if k>1 logP = logP +log((alpha+(k-1)*beta)/(n+alpha)); end n = n + 1; % seat the rest at this table for i = 2:counts(k) logP = logP + log((i-1-beta)/(n+alpha)); n = n + 1; end end end % GibbsMatrix. Computes matrix of conditional log-probabilities % suitable for Gibbs sampling, or pseudolikelihood calculation. % Input: % labels: n-vector, maps each of n customers to a table in 1..m % Output: % logP: (m+1)-by-n matrix of **unnormalized** log-probabilities % logP(i,j) = log P(customer j at table i | seating of all others) + const % table m+1 is a new table function [logP,empties] = GibbsMatrix(labels) m = max(labels); n = length(labels); blocks = sparse(labels,1:n,true); counts = full(sum(blocks,2)); %original table sizes logP = repmat(log([counts-beta;alpha+m*beta]),1,n); %most common values for every row %return; empties = false(1,n); %new empty table when customer j removed for i=1:m cmin = counts(i) - 1; tar = blocks(i,:); if cmin==0 %table empty logP(i,tar) = log(alpha + (m-1)*beta); empties(tar) = true; else logP(i,tar) = log(cmin-beta); end end logP(m+1,empties) = -inf; end function logP = slowGibbsMatrix(labels) m = max(labels); n = length(labels); blocks = sparse(labels,1:n,true,m+1,n); counts = full(sum(blocks,2)); %original table sizes logP = zeros(m+1,n); for j=1:n cj = counts; tj = labels(j); cj(tj) = cj(tj) - 1; nz = cj>0; k = sum(nz); if k==m logP(nz,j) = log(cj(nz) - beta); logP(m+1,j) = log(alpha + m*beta); else %new empty table logP(nz,j) = log(cj(nz) - beta); logP(tj,j) = log(alpha + k*beta); logP(m+1,j) = -inf; end end end function [labels,counts] = sample(T) labels = zeros(1,T); counts = zeros(1,T); labels(1) = 1; K = 1; %number of classes counts(1) = 1; for i=2:T p = zeros(K+1,1); p(1:K) = counts(1:K) - beta; p(K+1) = alpha + K*beta; [~,k] = max(randgumbel(K+1,1) + log(p)); labels(i) = k; if k>K K = K + 1; assert(k==K); counts(k) = 1; else counts(k) = counts(k) + 1; end end counts = counts(1:K); labels = labels(randperm(T)); end end function test_this2() T = 20; e = 10; N = 1000; crp1 = create_PYCRP(0,[],e,T); [concentration,discount] = crp1.getParams() crp2 = create_PYCRP([],0,e,T); [concentration,discount] = crp2.getParams() K1 = zeros(1,T); K2 = zeros(1,T); for i=1:N [~,counts] = crp1.sample(T); K = length(counts); K1(K) = K1(K) + 1; [~,counts] = crp2.sample(T); K = length(counts); K2(K) = K2(K) + 1; end e1 = sum((1:T).*K1)/N e2 = sum((1:T).*K2)/N close all; subplot(2,1,1);bar(1:T,K1); subplot(2,1,2);bar(1:T,K2); K1 = K1/sum(K1); K2 = K2/sum(K2); %dlmwrite('K1.table',[(1:T)',K1'],' '); %dlmwrite('K2.table',[(1:T)',K2'],' '); for i=1:T fprintf('(%i,%6.4f) ',2*i-1,K1(i)) end fprintf('\n'); for i=1:T fprintf('(%i,%6.4f) ',2*i,K2(i)) end fprintf('\n'); end function test_Gibbsmatrix() alpha = randn^2; beta = rand; PYCRP = create_PYCRP(alpha,beta); labels = [1 1 1 2 1 3 4 4] logP = exp(PYCRP.slowGibbsMatrix(labels)) logP = exp(PYCRP.GibbsMatrix(labels)) end function test_this() alpha1 = 0.0; beta1 = 0.6; crp1 = create_PYCRP(alpha1,beta1); alpha2 = 0.1; beta2 = 0.6; crp2 = create_PYCRP(alpha2,beta2); close all; figure; hold; for i=1:100; L1 = crp1.sample(100); L2 = crp2.sample(100); C1=full(sum(int2onehot(L1),2)); C2=full(sum(int2onehot(L2),2)); x11 = crp1.logprob(C1); x12 = crp2.logprob3(C1); x22 = crp2.logprob3(C2); x21 = crp1.logprob(C2); plot(x11,x12,'.g'); plot(x21,x22,'.b'); end figure;hold; crp = crp1; for i=1:100; L1 = crp.sample(100); C1=full(sum(int2onehot(L1),2)); x = crp.logprob(C1); y = crp.logprob3(C1); plot(x,y); end end
github
bsxfan/meta-embeddings-master
rand_fake_Dirichlet.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/language_recognition/synth_data/rand_fake_Dirichlet.m
810
utf_8
3070cfe2d8487d8ae48c7740fdc24c2d
function R = rand_fake_Dirichlet(alpha,m,n) % This is no longer Dirichlet. I replaced it with a faster ad-hoc % distribution. % % Generates m-by-n matrix of n samples from m-category Dirichlet, with % concentration parameter: alpha > 0. if nargin==0 test_this(); return; end %R = reshape(randgamma(alpha,1,m*n),m,n); R = exp(alpha*randn(m,n).^2); R = bsxfun(@rdivide,R,sum(R,1)); end function E = app_exp(X) XX = X.^2/2; XXX = XX.*X/3; XXXX = XXX.*X/4; E = XXXX+ XXX + XX + X+1; end function test_this() close all; m = 400; %alpha = 1/(2*m); alpha = 2; n = 5000; R = rand_fake_Dirichlet(alpha,m,n); maxR = max(R,[],1); hist(maxR,100); % n = 50; % R = randDirichlet(alpha,m,n); % hist(R(:),100); end
github
bsxfan/meta-embeddings-master
create_T_backend.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/language_recognition/T_backend/create_T_backend.m
6,482
utf_8
ee013ab7facb848049ba586dfb7b6f33
function TBE = create_T_backend(nu,dim,K) % Create a (multivariate) T-distribution generative backend for multiclass classification. % The classes have different means, but the scatter matrix and degrees of % freedom are common to all clases. % % This object provides a method for supervised ML training (EM algorithm), % as well as a method for scoring at runtime (class log-likelihoods). % % Inputs: % nu: scalar >0, degrees of freedom % dim: data dimensionality % K: number of classes % % Typical usage: % > TBE = create_T-backend(nu,dim,K); %nu is fixed by user and not learnt during training % > TBE.train(TrainData,L,10); % TrainData: dim-by-N, L: K-by-N, (sparse) one-hot labels % > LLH = TBE.logLH(TestData) % % For EM algorithm, see: % Geoffrey J. McClachlan and Thriyambakam Krishnan, The EM Algorithm and Extensions, % 2nd Ed. John Wiley & Sons, 2008. Section 2.6 EXAMPLE 2.6: MULTIVARIATE t-DISTRIBUTION WITH KNOWN % DEGREES OF FREEDOM if nargin==0 test_this(); return; end assert(nu>0); Mu = zeros(dim,K); C = eye(dim); R = []; RMu = []; muWmu = []; logdetC = 0; prepare(); TBE.logLH = @logLH; TBE.getParams = @getParams; TBE.setParams = @setParams; TBE.train = @train; TBE.simulate = @simulate; TBE.randParams = @randParams; TBE.test_error_rate = @test_error_rate; TBE.cross_entropy = @cross_entropy; function [Mu1,C1] = getParams() Mu1 = Mu; C1 = C; end function setParams(Mu0,C0) Mu = Mu0; C = C0; prepare(); end function [obj,XE] = train(X,L,niters) [d,N] = size(X); assert(d==dim); [k,n] = size(L); assert(k==K && n==N); obj = zeros(1,niters+1); obj_i = EM_objective(X,L); obj(1) = obj_i; doXE = nargout>=2; if doXE XE = zeros(1,niters+1); XE_i = cross_entropy(X,L); XE(1) = XE_i; fprintf('%i: %g, %g\n',0,obj_i,XE_i); else fprintf('%i: %g\n',0,obj_i); end for i=1:niters EM_iteration(X,L); obj_i = EM_objective(X,L); obj(i+1) = obj_i; if doXE XE_i = cross_entropy(X,L); XE(i+1) = XE_i; fprintf('%i: %g, %g\n',i,obj_i,XE_i); else fprintf('%i: %g\n',i,obj_i); end end end %Class log-likelihood scores, with all irrelevant constants omitted function LLH = logLH(X,df) %inputs: % X: dim-by-N, data % df: [optional default df = nu], scalar, df>0, degrees of freedom parameter % %output: % LLH: K-by-N, class log-likelihoods if ~exist('df','var') || isempty(df) df = nu; else assert(df>0); end Delta = delta(X); LLH = (-0.5*(df+dim))*log1p(Delta/df); end function prepare() R = chol(C); % R'R = C and W = inv(C) = inv(R)*inv(R') RMu = R.'\Mu; % dim-dy-K muWmu = sum(RMu.^2,1); % 1-by-K logdetC = 2*sum(log(diag(R))); end function Delta = delta(X) %input X: dim-by-N, data %output Delta: K-by-N, squared Mahalanobis distances between data and means RX = R.'\X; % dim-by-N Delta = bsxfun(@minus,sum(RX.^2,1),(2*RMu).'*RX); %K-by-N Delta = bsxfun(@plus,Delta,muWmu.'); end function EM_iteration(X,L) Delta = sum(L.*delta(X),1); %1-by-N u = (nu+dim)./(nu+Delta); %1-by-N posterior expectations of hiddden precision scaling factors Lu = bsxfun(@times,L,u); %K-by-N normLu = bsxfun(@rdivide,Lu,sum(Lu,2)); newMu = X*normLu.'; %dim-by-K diff = X - newMu*L; newC = (bsxfun(@times,diff,u)*diff.')/sum(u); setParams(newMu,newC); end function obj = EM_objective(X,L) % X: dim-by-N, data % K: K-by-N, one-hot labels % obj: scalar LLH = logLH(X); N = size(X,2); obj = L(:).'*LLH(:) - (N/2)*logdetC ; end function randParams(ncov,muscale) assert(ncov>=dim); D = randn(dim,ncov); C = D*D.'; setParams(zeros(dim,K),C); Mu = muscale*simulate(K); setParams(Mu,C); end function [X,L] = simulate(N,df,L) if ~exist('L','var') || isempty(L) L = sparse(randi(K,1,N),1:N,1,K,N); end if ~exist('df','var') || isempty(df) df = ceil(nu); end u = sum(randn(df,N).^2,1)/df; % chi^2 with df dregrees of freedom, scaled so that <u>=1 X = Mu*L + bsxfun(@rdivide,R.'*randn(dim,N),sqrt(u)); end %assuming flat prior for now function e = test_error_rate(X,L) N = size(X,2); LLH = TBE.logLH(X); [~,labels] = max(LLH,[],1); Lhat = sparse(labels,1:N,1,K,N); e = 1-(L(:).'*Lhat(:))/N; end %assuming flat prior for now function e = cross_entropy(X,L,df) if ~exist('df','var') || isempty(df) df = nu; else assert(df>0); end LLH = TBE.logLH(X,df); P = exp(bsxfun(@minus,LLH,max(LLH,[],1))); P = bsxfun(@rdivide,P,sum(P,1)); e = -mean(log(full(sum(L.*P,1))),2)/log(K); end end function test_this() close all; dim = 100; % data dimensionality K = 10; % numer of classes nu = 3; % degrees of freedom (t-distribition parameter) N = K*1000; %create test and train data TBE0 = create_T_backend(nu,dim,K); TBE0.randParams(dim,5/sqrt(dim)); [X,L] = TBE0.simulate(N); [Xtest,Ltest] = TBE0.simulate(N); TBE = create_T_backend(nu,dim,K); [obj,XE] = TBE.train(X,L,20); subplot(1,2,1);plot(obj);title('error-rate'); subplot(1,2,2);plot(XE);title('cross-entropy'); train_error_rate = TBE.test_error_rate(X,L), test_error_rate = TBE.test_error_rate(Xtest,Ltest), train_XE = TBE.cross_entropy(X,L), test_XE = TBE.cross_entropy(Xtest,Ltest), df = [0.1:0.1:10]; XE = zeros(2,length(df)); for i=1:length(df) XE(1,i) = TBE.cross_entropy(X,L,df(i)); XE(2,i) = TBE.cross_entropy(Xtest,Ltest,df(i)); end figure;plot(df,XE(1,:),df,XE(2,:)); grid;xlabel('df');ylabel('XE'); legend('train','test'); end
github
bsxfan/meta-embeddings-master
train_TLDIvector.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/language_recognition/TLDIvector/train_TLDIvector.m
4,374
utf_8
6029b83917586ea3e2977c8fad616093
function [W,Mu,TT] = train_TLDIvector(stats_or_ivectors,N,T,TT,nu,labels,niters,W,Mu) % Inputs: % stats_or_ivectors: can be either F, or ivectors % F: dm-by-n first-order stats (m: UBM size; d: feature dim; n: no segments) % ivectors: k-by-n, classical i-vector point-estimates % N: m-by-n zero order stats % T: dm-by-k factor loading matrix % TT: [optional] k^2-by-m, vectorized precomputed T_i'T_i, i=1:m % nu: scalar, nu>0, degrees of freedom % labels: 1-by-n, label vector, with label values in 1:L, where L is % number of languages. % niters: number of VBEM iterations to do % % W: k-by-k within class precision [optional, for initialization] % Mu: k-by-L language means [optional, for initialization] % % % Outputs: % W: k-by-k within-class precision estimate % Mu: k-by-L class mean estimates if nargin==0 test_this(); return; end [A,B,k,n] = getPosteriorNatParams(stats_or_ivectors,N,T,TT); L = max(labels); if ~exist('Mu','var') || isempty(Mu) W = eye(k); Mu = zeros(k,L); else assert(all(size(W)==k)); [k2,L2] = size(Mu); assert(k2==k && L==L2); end for iter=1:niters WMu = W*Mu; C = zeros(size(W)); Pmeans = zeros(k,n); for ell=1:L tt = find(labels==ell); % E-step for t=tt Pt = W + reshape(B(:,t),k,k); %posterior precision Pmean = Pt\(WMu(:,ell)+A(:,t)); %posterior mean Pmeans(:,t) = Pmean; C = C + inv(Pt); end %M-step D = Pmeans(:,tt); Mu(:,ell) = mean(D,2); D = bsxfun(@minus,D,Mu(:,ell)); C = C + D*D.'; end C = C/n; W = inv(C), Mu = Mu, end end function test_this close all; %dimensions d = 10; %feature dimension m = 10; %no components k = 3; %ivector dimension n = 1000; %number of segments L = 2; %number of languages mindur = 2; maxdur = 100; niters = 3; T = randn(d*m,k); W = randn(k,k*2); W = W*W.'/k; UBM.logweights = randn(m,1)/5; UBM.Means = 5*randn(d,m); Mu = randn(k,L); [F,N,labels] = make_data(UBM,Mu,W,T,m,d,n,mindur,maxdur); dur = sum(N,1); L1 = labels==1; L2 = labels==2; TT = precomputeTT(T,d,k,m); ivectors = stats2ivectors(F,N,T,TT); LR1 = [1,-1]* score_LDIvector(F,N,T,TT,W,Mu); %LR2 = [1,-1]* score_LDIvector(ivectors,N,T,TT,W,Mu); subplot(4,1,1);plot(dur(L1),LR1(L1),'.r',dur(L2),LR1(L2),'.g'); [W2,Mu2] = train_LDIvector(F,N,T,[],labels,niters); [W3,Mu3] = train_LDIvector(ivectors,N,T,[],labels,niters); [W4,Mu4,map] = train_standaloneLGBE(ivectors,labels); LR2 = [1,-1]* score_LDIvector(F,N,T,[],W2,Mu2); LR3 = [1,-1]* score_CPF(ivectors,N,T,TT,W3,Mu3); LR4 = [1,-1]*map(ivectors); subplot(4,1,2);plot(dur(L1),LR2(L1),'.r',dur(L2),LR2(L2),'.g'); subplot(4,1,3);plot(dur(L1),LR3(L1),'.r',dur(L2),LR3(L2),'.g'); subplot(4,1,4);plot(dur(L1),LR4(L1),'.r',dur(L2),LR4(L2),'.g'); end function [F,N,labels,relConf] = make_data(UBM,Mu,W,T,m,d,n,mindur,maxdur) [k,L] = size(Mu); labels = randi(L,1,n); Labels = sparse(labels,1:n,1,L,n); %L-by-n one-hot class labels x = Mu*Labels+chol(W)\randn(k,n); %i-vectors Tx = T*x; dur = randi(1+maxdur-mindur,1,n) + mindur -1; dm = d*m; F = zeros(dm,n); N = zeros(m,n); logweights = UBM.logweights; prior = exp(logweights-max(logweights)); prior = prior/sum(prior); priorConfusion = exp(-prior.'*log(prior))-1; relConf = zeros(1,n); for i=1:n D = dur(i); states = randcatgumbel(UBM.logweights,D); States = sparse(states,1:D,1,m,D); X = (reshape(Tx(:,i),d,m)+UBM.Means)*States + randn(d,D); Q = bsxfun(@minus,UBM.Means.'*X,0.5*sum(X.^2,1)); Q = bsxfun(@plus,Q,UBM.logweights-0.5*sum(UBM.Means.^2,1).'); Q = exp(bsxfun(@minus,Q,max(Q,[],1))); Q = bsxfun(@rdivide,Q,sum(Q,1)); CE = -(States(:).'*log(Q(:)))/D; %cross entropy relConf(i) = (exp(CE)-1)/priorConfusion; Ni = sum(Q,2); Fi = X*Q.'; N(:,i) = Ni; F(:,i) = Fi(:); end end
github
bsxfan/meta-embeddings-master
create_diagonalized_C.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/language_recognition/TLDIvector/create_diagonalized_C.m
4,986
utf_8
c1a642f817fa817097bf89b070af4c04
function C = create_diagonalized_C(B,R,RM,Ra,W,M,a) % Creates object to represent: C = inv(lambda W + B), % % Inputs: % B: positive definite matrix (i-vector dimension) % R: chol(W), so that R'R=W (i-vector dimension) % RM: R*M, where M has language means in columns % Ra: (R')\a, vector (i-vector dimension) % W,M,a: optional for verification with slow version if nargin==0 test_this(); return; end dim = length(Ra); K = (R.'\B)/R; [V,D] = eig(K); %K = V*D*V' e = diag(D); %eigenvalues mWm = sum(RM.^2,1); %vector of m'Wm, for every language VRM = V.'*RM; VRa = V.'*Ra; VRaVRa = VRa.^2; VRMVRM = VRM.^2; VRMVRa = bsxfun(@times,VRa,VRM); C.traceCW = @traceCW; C.logdetCW = @logdetCW; C.quad = @quad; C.slowQuad = @slowQuad; C.slow_traceCW = @slow_traceCW; C.slow_logdetCW = @slow_logdetCW; C.lambda_by_root = @lambda_by_root; C.lambda_by_min = @lambda_by_min; C.lambda_by_fixed_point = @lambda_by_fixed_point; %C.slow_xCWCx = @slow_xCWCx; %C.xCWCx = @xCWCx; %C.xCx = @xCx; %C.slow_xCx = @slow_xCx; function log_lambda = lambda_by_root(nu,log_lambda,ell) f = @(log_lambda) log_lambda - log((nu+dim)/(nu+energy(log_lambda,ell))); log_lambda = fzero(f,log_lambda); end function log_lambda = lambda_by_fixed_point(nu,log_lambda,ell,niters) f = @(log_lambda) log((nu+dim)/(nu+energy(log_lambda,ell))); for i=1:niters log_lambda = f(log_lambda); end end function log_lambda = lambda_by_min(nu,log_lambda) f = @(log_lambda) (log_lambda - log((nu+dim)/(nu+energy(log_lambda))))^2; log_lambda = fminsearch(f,log_lambda); end function y = energy(log_lambda,ell) lambda = exp(log_lambda); y = quad(lambda,ell) + traceCW(lambda); %fprintf('%i: energy = %g\n%',ell,y); end function y = quad(lambda,ell) s = lambda + e; ss = s.^2; mWmu = sum(bsxfun(@rdivide, lambda*VRMVRM(:,ell) + VRMVRa(:,ell), s),1); muWmu = sum(bsxfun(@rdivide,bsxfun(@plus,... lambda^2*VRMVRM(:,ell) + ... (2*lambda)*VRMVRa(:,ell), ... VRaVRa), ss),1); y = mWm(ell) + muWmu -2*mWmu; end function y = slowQuad(lambda) P = lambda*W + B; cholP = chol(P); Mu = cholP\(cholP'\bsxfun(@plus,lambda*W*M,a)); delta = R*(Mu-M); y = sum(delta.^2,1); %y = sum(Mu.*(W*M),1); end % function y = xCx(lambda,x) % z = V'*((R.')\x); % s = lambda + e; % y = sum(z.^2./s,1); % end % % function y = xCWCx(lambda,x) % z = V'*((R.')\x); % s = lambda + e; % y = sum(z.^2./s.^2,1); % end % % function y = slow_xCx(lambda,x) % P = lambda*W+B; % y = x'*(P\x); % end % % function y = slow_xCWCx(lambda,x) % P = lambda*W+B; % z = P\x; % y = z.'*W*z; % end function [y,back] = traceCW(lambda) s = lambda + e; r = 1./s; y = sum(r,1); back = @back_this; function dlambda = back_this(dy) dr = dy; ds = (-dr)*r./s; dlambda = sum(ds,1); end end function y = slow_traceCW(lambda) P = lambda*W + B; cholP = chol(P); X = cholP.'\R.'; y = X(:).'*X(:); end function [y,back] = logdetCW(lambda) s = log(lambda) + log1p(e/lambda); y = -sum(s,1); back = @(dy) (-dy)*sum(1./(lambda+e)); end function y = slow_logdetCW(lambda) P = lambda*W + B; cholP = chol(P); y = 2*( sum(log(diag(R))) - sum(log(diag(cholP))) ); end end function test_this() dim = 400; L = 1; RR = randn(dim,dim);W = RR*RR'; RR = randn(dim,dim);B = RR*RR'; a = randn(dim,1); M = randn(dim,L); R = chol(W); C = create_diagonalized_C(B,R,R*M,(R')\a,W,M,a); lambda = rand/rand; %x = randn(dim,1); %[C.xCx(lambda,x),C.slow_xCx(lambda,x)] %tic;C.quad(lambda);toc %tic;C.slowQuad(lambda);toc %C.quad(lambda) %C.slowQuad(lambda) %[C.traceCW(lambda),C.slow_traceCW(lambda)] %[C.logdetCW(lambda),C.slow_logdetCW(lambda)] %[C.xCWCx(lambda,x),C.slow_xCWCx(lambda,x)] C.lambda_by_root(1,1) C.lambda_by_root(1,10) C.lambda_by_root(1,0.1) C.lambda_by_min(1,1) C.lambda_by_min(1,10) C.lambda_by_min(1,0.1) a = a*0; B = B*0; C = create_diagonalized_C(B,R,R*M,(R')\a,W,M,a); C.lambda_by_root(0.1,0.01) C.lambda_by_root(1,10) C.lambda_by_root(10,0.1) C.lambda_by_min(1,10) end
github
bsxfan/meta-embeddings-master
create_augmenting_backend.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/language_recognition/augmentation/create_augmenting_backend.m
3,599
utf_8
852abc76e3f7dfa8796f7015880bc788
function ABE = create_augmenting_backend(nu,dim,T,K,L) % Inputs: % nu: scalar nu>0, t-distribution degrees of freedom % dim: ivector dimension % T: i-vector extr\zctor T-matrix % K: UBM size % L: number of languages if nargin==0 test_this(); return; end assert(dim==size(T,2)); TBE = create_T_backend(nu,dim,L); augment = []; ABE.train = @train; ABE.logLH = @logLH; ABE.test_error_rate = @test_error_rate; ABE.cross_entropy = @cross_entropy; function [obj,AX] = train(X,Z,Labels,niters,ntiters) % X: ivectors % Z: zero-order stats % Labels: sparse one-hot label matrix if ~exist('niters','var') || isempty(niters) niters = 1; end if ~exist('ntiters','var') || isempty(ntiters) ntiters = 10; end assert(size(Labels,1)==L); assert(size(Labels,2)==size(X,2)); assert(size(Labels,2)==size(Z,2)); assert(size(Z,1)==K); assert(size(X,1)==dim); AX = X; obj = []; for i=1:niters obj_i = TBE.train(AX,Labels,ntiters); % starts with parameters from prev. iteration obj = [obj(:);obj_i(:)]; [Mu,C] = TBE.getParams(); augment = augment_i_vectors(T,K,Mu,C); if i<niters || nargout>=2 AX = augment(X,Z); end end end function [LLH,X] = logLH(X,Z) if exist('Z','var') && ~isempty(Z) X = augment(X,Z); end LLH = TBE.logLH(X); end %assuming flat prior for now function e = test_error_rate(X,Z,Labels) N = size(X,2); LLH = logLH(X,Z); [~,labels] = max(LLH,[],1); Lhat = sparse(labels,1:N,1,L,N); e = 1-(Labels(:).'*Lhat(:))/N; end %assuming flat prior for now function e = cross_entropy(X,Z,Labels) LLH = logLH(X,Z); P = exp(bsxfun(@minus,LLH,max(LLH,[],1))); P = bsxfun(@rdivide,P,sum(P,1)); e = -mean(log(full(sum(Labels.*P,1))),2)/log(L); end end function test_this() big = true; if big L = 10; %languages K = 1024; %UBM size nu = 2; %df dim = 400; %ivector dim fdim = 40; % feature dim minDur = 3*100; %3 sec maxDur = 30*100; %30 sec M = randn(dim,L); T = randn(K*fdim,dim); RR = randn(dim,2*dim);W = RR*RR'; Ntrain = 100; Ntest = 100; else L = 3; %languages K = 10; %UBM size nu = 2; %df dim = 40; %ivector dim fdim = 5; % feature dim minDur = 3*100; %3 sec maxDur = 30*100; %30 sec M = randn(dim,L); T = randn(K*fdim,dim); RR = randn(dim,2*dim);W = RR*RR'/100; Ntrain = 100; Ntest = 100; end fprintf('generating data\n'); [F,trainZ,trainLabels] = rand_ivector(M,nu,W,2,K,T,minDur,maxDur,Ntrain); [trainX,TT] = stats2ivectors(F,trainZ,T); [F,testZ,testLabels] = rand_ivector(M,nu,W,2,K,T,minDur,maxDur,Ntest); testX = stats2ivectors(F,testZ,T,TT); F = []; fprintf('training\n'); ABE = create_augmenting_backend(nu,dim,T,K,L); ABE.train(trainX,trainZ,trainLabels,2); train_error_rate = ABE.test_error_rate(trainX,trainZ,trainLabels), test_error_rate = ABE.test_error_rate(testX,testZ,testLabels), train_XE = ABE.cross_entropy(trainX,trainZ,trainLabels), test_XE = ABE.cross_entropy(testX,testZ,testLabels), end
github
bsxfan/meta-embeddings-master
testBackprop_rs.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/test/testBackprop_rs.m
2,198
utf_8
9d88acaf06002d97bf8eb2fdc07bf7b8
function total = testBackprop_rs(block,X,delta,mask) %same as testFBblock, but with real step if ~iscell(X) X = {X}; end dX = cellrndn(X); if exist('mask','var') assert(length(mask)==length(X)); dX = cellmask(dX,mask); end cX1 = cellstep(X,dX,delta); cX2 = cellstep(X,dX,-delta); DX = cell(size(X)); [Y,back] = block(X{:}); DY = randn(size(Y)); [DX{:}] = back(DY); %DX = J'*DY dot1 = celldot(DX,dX); %dX' * J' * DY cY1 = block(cX1{:}); cY2 = block(cX2{:}); [Y2,dY2] = recover(cY1,cY2,delta); %dY2 = J*dX dot2 = DY(:).'*dY2(:); %DY' * J* DX Y_diff = max(abs(Y(:)-Y2(:))), jacobian_diff = abs(dot1-dot2), total = Y_diff + jacobian_diff; if total < 1e-6 fprintf('\ntotal error=%g\n',total); else fprintf(2,'\ntotal error=%g\n',total); end end function R = cellrndn(X) if ~iscell(X) R = randn(size(X)); else R = cell(size(X)); for i=1:numel(X) R{i} = cellrndn(X{i}); end end end function C = cellstep(X,dX,delta) assert(all(size(X)==size(dX))); if ~iscell(X) C = X + delta*dX; else C = cell(size(X)); for i=1:numel(X) C{i} = cellstep(X{i},dX{i},delta); end end end function [R,D] = recover(cX1,cX2,delta) assert(all(size(cX1)==size(cX2))); if ~iscell(cX1) R = (cX1+cX2)/2; D = (cX1-cX2)/(2*delta); else R = cell(size(cX1)); D = cell(size(cX1)); for i=1:numel(cX1) [R{i},D{i}] = recover(cX1{i},cX2{i}); end end end function X = cellmask(X,mask) if ~iscell(X) assert(length(mask)==1); X = X*mask; else for i=1:numel(X) X{i} = cellmask(X{i},mask{i}); end end end function dot = celldot(X,Y) assert(all(size(X)==size(Y))); if ~iscell(X) dot = X(:).' * Y(:); else dot = 0; for i=1:numel(X) dot = dot + celldot(X{i},Y{i}); end end end
github
bsxfan/meta-embeddings-master
testBackprop_multi.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/test/testBackprop_multi.m
2,361
utf_8
ced4a5b7e925d5c00a2d991fb34d012c
function total = testBackprop_multi(block,nout,X,mask) % same as testBackprop, but handles multiple outputs if ~iscell(X) X = {X}; end dX = cellrndn(X); if exist('mask','var') assert(length(mask)==length(X)); dX = cellmask(dX,mask); end cX = cellcomplex(X,dX); DX = cell(size(X)); Y = cell(1,nout); [Y{:},back] = block(X{:}); DY = cell(size(Y)); for i=1:numel(DY) DY{i} = randn(size(Y{i})); end [DX{:}] = back(DY{:}); %DX = J'*DY dot1 = celldot(DX,dX); %dX' * J' * DY cY = cell(1,nout); Y2 = cell(1,nout); dY2 = cell(1,nout); [cY{:}] = block(cX{:}); for i=1:numel(cY) [Y2{i},dY2{i}] = recover(cY{i}); %dY2 = J*dX end dot2 = celldot(DY,dY2); %DY' * J* DX Y_diff = 0; for i=1:nout Y_diff = Y_diff + max(abs(Y{i}(:)-Y2{i}(:))); end Y_diff, jacobian_diff = abs(dot1-dot2), total = Y_diff + jacobian_diff; if total < 1e-6 fprintf('\ntotal error=%g\n',total); else fprintf(2,'\ntotal error=%g\n',total); end end function R = cellrndn(X) if ~iscell(X) R = randn(size(X)); else R = cell(size(X)); for i=1:numel(X) R{i} = cellrndn(X{i}); end end end function X = cellmask(X,mask) if ~iscell(X) assert(length(mask)==1); X = X*mask; else for i=1:numel(X) X{i} = cellmask(X{i},mask{i}); end end end function C = cellcomplex(X,dX) assert(all(size(X)==size(dX))); if ~iscell(X) C = complex(X,1e-20*dX); else C = cell(size(X)); for i=1:numel(X) C{i} = cellcomplex(X{i},dX{i}); end end end function [R,D] = recover(cX) if ~iscell(cX) R = real(cX); D = 1e20*imag(cX); else R = cell(size(cX)); D = cell(size(cX)); for i=1:numel(cX) [R{i},D{i}] = recover(cX{i}); end end end function dot = celldot(X,Y) assert(all(size(X)==size(Y))); if ~iscell(X) dot = X(:).' * Y(:); else dot = 0; for i=1:numel(X) dot = dot + celldot(X{i},Y{i}); end end end
github
bsxfan/meta-embeddings-master
testBackprop.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/test/testBackprop.m
2,015
utf_8
fc73fa404f5441c097eb63f249106078
function total = testBackprop(block,X,mask) if ~iscell(X) X = {X}; end dX = cellrndn(X); if exist('mask','var') assert(length(mask)==length(X)); dX = cellmask(dX,mask); end cX = cellcomplex(X,dX); DX = cell(size(X)); [Y,back] = block(X{:}); DY = randn(size(Y)); [DX{:}] = back(DY); %DX = J'*DY dot1 = celldot(DX,dX); %dX' * J' * DY cY = block(cX{:}); [Y2,dY2] = recover(cY); %dY2 = J*dX dot2 = DY(:).'*dY2(:); %DY' * J* DX Y_diff = max(abs(Y(:)-Y2(:))), jacobian_diff = abs(dot1-dot2), total = Y_diff + jacobian_diff; if total < 1e-6 fprintf('\ntotal error=%g\n',total); else fprintf(2,'\ntotal error=%g\n',total); end end function R = cellrndn(X) if ~iscell(X) R = randn(size(X)); else R = cell(size(X)); for i=1:numel(X) R{i} = cellrndn(X{i}); end end end function X = cellmask(X,mask) if ~iscell(X) assert(length(mask)==1); X = X*mask; else for i=1:numel(X) X{i} = cellmask(X{i},mask{i}); end end end function C = cellcomplex(X,dX) assert(all(size(X)==size(dX))); if ~iscell(X) C = complex(X,1e-20*dX); else C = cell(size(X)); for i=1:numel(X) C{i} = cellcomplex(X{i},dX{i}); end end end function [R,D] = recover(cX) if ~iscell(cX) R = real(cX); D = 1e20*imag(cX); else R = cell(size(cX)); D = cell(size(cX)); for i=1:numel(cX) [R{i},D{i}] = recover(cX{i}); end end end function dot = celldot(X,Y) assert(all(size(X)==size(Y))); if ~iscell(X) dot = X(:).' * Y(:); else dot = 0; for i=1:numel(X) dot = dot + celldot(X{i},Y{i}); end end end
github
bsxfan/meta-embeddings-master
create_truncGMM.m
.m
meta-embeddings-master/code/Niko/matlab/stochastic_clustering/create_truncGMM.m
11,422
utf_8
9bf418136c7994e16d5fb77486fd9bac
function model = create_truncGMM(W,F,alpha,m) % This is a truncated version of DP micture model, with a specified maximum number of % components. The observations are realted to the hidden cluster variables % like in an SPLDA model. The hidden variable for cluster i is z_i in R^d. % The observations, x_j are in R^D, where D>= d. The prior for the z_i is % IID: N(z_i | 0, I). The observations that belong to cluster i are % conditionally IID: N(x_j | F z_i, W^{-1} ). The SPLDA model parameters % are: % F: D-by-d, factor loading matrix % W: D-by-D, within cluster precision (inverse covariance) % % The other parameters are for the symmetric Dirichlet prior on mixture % weights, which has parameters alpha and m, where m is the maximum number % of mixture components: weights ~ Dir(alpha,m). % More generally, alpha may be an m-by-1 vector, for a non-symmetric Dirichlet % weight prior. if nargin==0 test_this(); return; end cholW = chol(W); dim = size(W,1); alpha = alpha(:); E = F'*W*F; %meta-embedding precision (before diagonalization) [V,Lambda] = eig(E); %E = V*Lambda*V'; P = V.'*(F.'*W); % projection to extract 1st-order meta-embedding stats Lambda = diag(Lambda); % The diagonal Lambda is the meta-embedding precision after % diagonalization. % We now have the likelihood, or meta-embedding: % P(x | z) \propto exp[ z'Px - 1/2 z' Lambda z ], where z is the % hidden variable after diagonalization. % % The (normal) posterior for z, given n observations {x_j} has natural % parameters: % sum_j P x_j and I + n Lambda FV = F*V; %projects from diagonalized Z to cluster means A = []; logThresh = log(1e-10); model.sampleData = @sampleData; model.sampleWeights = @sampleWeights; model.sampleZ = @sampleZ; model.sampleLabels = @sampleLabels; model.setData = @setData; model.label_log_posterior = @label_log_posterior; model.Means_given_labels = @Means_given_labels; model.fullGibbs_iteration = @fullGibbs_iteration; model.collapsedGibbs_iteration = @collapsedGibbs_iteration; model.mfvb_iteration = @mfvb_iteration; function setData(X) A = P*X; end % function Means = Z2Means(Z) % Means = FV*Z; % end %hlabels: m-by-n %Mu: D-by-m, posterior means for m cluster centroids %counts: 1-by-m, cluster occupancy counts (soft if hlabels is soft) %Q: posterior covariance for cluster i is: F*V*inv(diag(Q(:,i)))*V'*F.' function [Mu,Q,counts] = Means_given_labels(hlabels) if ~islogical(hlabels) [m,n] = size(hlabels); [~,L] = max(log(hlabels)-log(-log(rand(m,n))),[],1); hlabels = sparse(L,1:n,true,m,n); end counts = full(sum(hlabels,2)); %m-by-1 Q = 1 + Lambda*counts.'; % d-by-m Zhat = (A*hlabels.') ./ Q; %d-by-m Mu = FV*Zhat; end % hlabels (one hot columns), m-by-n % A = P*X, d-by-n function Z = sampleZ(hlabels,counts) %counts = full(sum(hlabels,2)); %m-by-1 Q = 1 + Lambda*counts.'; % d-by-m Zhat = (A*hlabels.') ./ Q; %d-by-m d = size(A,1); Z = Zhat + randn(d,m) ./ sqrt(Q); end function [weights,counts] = sampleWeights(hlabels) counts = sum(hlabels,2); weights = randDirichlet(alpha+counts,m,1); end % Z: d-by-m % A: d-by-n % weights: m-by-1 function hlabels = sampleLabels(Z,weights) n = size(A,2); Gumbel = -log(-log(rand(m,n))); %ZLambdaZ = sum(Z.*bsxfun(@times,Lambda,Z),1); % m-by-1 ZLambdaZ = Lambda.'*Z.^2; % m-by-1 Score = bsxfun(@plus,log(weights)-ZLambdaZ.'/2,Z.'*A); %m-by-n [~,L] = max(Gumbel+Score,[],1); hlabels = sparse(L,1:n,true,m,n); end function hlabels = fullGibbs_iteration(hlabels) [weights,counts] = sampleWeights(hlabels); Z = sampleZ(hlabels,counts); hlabels = sampleLabels(Z,weights); end % hlabels (one hot columns), m-by-n % A = P*X, d-by-n function [Zhat,Q] = mfvb_Z(respbilties,counts) %counts = sum(respbilties,2); %m-by-1 Q = 1 + Lambda*counts.'; % d-by-m Zhat = (A*respbilties.') ./ Q; %d-by-m end function [post_alpha,counts] = mfvb_Weights(respbilties) counts = sum(respbilties,2); post_alpha = alpha+counts; end % Z: d-by-m % A: d-by-n % weights: m-by-1 function respbilties = mfvb_Labels(Zhat,Q,post_alpha) ZLambdaZ = Lambda.'*Zhat.^2 + sum(bsxfun(@rdivide,Lambda,Q),1); % expected value log_weights = psi(post_alpha) - psi(sum(post_alpha)); % expected value R = bsxfun(@plus,log_weights-ZLambdaZ.'/2,Zhat.'*A); %m-by-n mx = max(R,[],1); R = bsxfun(@minus,R,mx); R(R<logThresh) = -inf; R = exp(R); R = bsxfun(@rdivide,R,sum(R,1)); respbilties = R; end function respbilties = mfvb_iteration(respbilties) [post_alpha,counts] = mfvb_Weights(respbilties); [Zhat,Q] = mfvb_Z(respbilties,counts); respbilties = mfvb_Labels(Zhat,Q,post_alpha); end function logPrior = label_log_prior(counts) % Compute P(labels) by marginalizing over hidden weights. The output is % in the form of un-normalized log probability. We use the % candidate's formula: % P(labels) = P(labels|weights) P(weights) / P(weights | labels) % where we conveniently set weights = 1/m. We ignore P(weights), % because we do not compute the normalizer. Since weights are uniform, % we can also ignore P(labels|weights). We need to compute % P(weights | labels) = Dir(weights | alpha + counts), with the % normalizer, which is a function of the counts (and the labels). logPrior = - logDirichlet(ones(m,1)/m,counts+alpha); % - log P(weights | labels) end function [llh,counts] = label_log_likelihood(hlabels) AL = A*hlabels.'; % d-by-m counts = sum(hlabels,2); %m-by-1 Q = Lambda*counts.'; % d-by-m logdetQ = sum(log1p(Q),1); Q = 1 + Q; %centroid posterior precisions Mu = AL ./ Q; %centroid posterior means llh = (Mu(:).'*AL(:) - sum(logdetQ,2))/2; end function [logP,counts] = label_log_posterior(hlabels) if ~islogical(hlabels) [m,n] = size(hlabels); [~,L] = max(log(hlabels)-log(-log(rand(m,n))),[],1); hlabels = sparse(L,1:n,true,m,n); end [logP,counts] = label_log_likelihood(hlabels); logP = logP + label_log_prior(counts); end function hlabels = collapsedGibbs_iteration(hlabels) n = size(A,2); for j=1:n hlabels(:,j) = false; AL0 = A*hlabels.'; % d-by-m counts0 = sum(hlabels,2); %m-by-1 nB0 = Lambda*counts0.'; % d-by-m counts1 = counts0+1; AL1 = bsxfun(@plus,AL0,A(:,j)); nB1 = bsxfun(@plus,nB0,Lambda); logdetQ0 = sum(log1p(nB0),1); logdetQ1 = sum(log1p(nB1),1); K0 = sum(AL0.^2./(1+nB0),1); K1 = sum(AL1.^2./(1+nB1),1); llh = (K1 - K0 - logdetQ1 + logdetQ0)/2; logPrior0 = gammaln(counts0+alpha); logPrior1 = gammaln(counts1+alpha); logPost = llh + (logPrior1 - logPrior0).'; [~,c] = max(logPost - log(-log(rand(1,m))),[],2); hlabels(c,j) = true; end end function hlabels = collapsedGibbs_slow(hlabels) n = size(A,2); for j = 1:n logP = zeros(m,1); hlabels(:,j) = false; for i=1:m hlabels(i,j) = true; logP(i) = label_log_posterior(hlabels); hlabels(i,j) = false; end [~,c] = max(logP-log(-log(rand(m,1))),[],1); hlabels(c,j) = true; end end function [X,Means,Z,weights,hlabels] = sampleData(n) weights = randDirichlet(alpha,m,1); Z = randn(dim,m); Means = F*Z; [~,labels] = max(bsxfun(@minus,log(weights(:)),log(-log(rand(m,n)))),[],1); hlabels = sparse(labels,1:n,true,m,n); X = cholW\randn(dim,n) + Means*hlabels; end end function P = sampleP(dim,tame) R = rand(dim,dim-1)/tame; P = eye(dim) + R*R.'; end function test_this() dim = 2; tame = 10; sep = 500; %increase to move clusters further apart in smulated data small = false; alpha0 = 10; %increase to get more clusters if small n = 8; m = 20; else n = 1000; m = 100; end alpha = alpha0/m; W = sep*sampleP(dim,tame); B = sampleP(dim,tame); F = inv(chol(B)); model = create_truncGMM(W,F,alpha,m); [X,Means,Z,weights,truelabels] = model.sampleData(n); counts = full(sum(truelabels,2).'); nz = counts>0; nzcounts = counts(nz) close all; cg_labels = sparse(randi(m,1,n),1:n,true,m,n); %random label init %cg_labels = sparse(ones(1,n),1:n,true,m,n); %single cluster init bg_labels = cg_labels; mf_labels = cg_labels; model.setData(X); niters = 1000; cg_delta = zeros(1,niters); bg_delta = zeros(1,niters); mf_delta = zeros(1,niters); mft = 0; bgt = 0; cgt = 0; cg_wct = zeros(1,niters); bg_wct = zeros(1,niters); mf_wct = zeros(1,niters); oracle = model.label_log_posterior(truelabels); for i=1:niters tic; bg_labels = model.fullGibbs_iteration(bg_labels); bgt = bgt + toc; bg_wct(i) = bgt; tic; cg_labels = model.collapsedGibbs_iteration(cg_labels); cgt = cgt + toc; cg_wct(i) = cgt; tic; mf_labels = model.mfvb_iteration(mf_labels); mft = mft + toc; mf_wct(i) = mft; cg_delta(i) = model.label_log_posterior(cg_labels) - oracle; bg_delta(i) = model.label_log_posterior(bg_labels) - oracle; mf_delta(i) = model.label_log_posterior(mf_labels) - oracle; [bgMu,~,bg_counts] = model.Means_given_labels(bg_labels); [cgMu,~,cg_counts] = model.Means_given_labels(cg_labels); [mfMu,~,mf_counts] = model.Means_given_labels(mf_labels); bg_nz = bg_counts>0; cg_nz = cg_counts>0; mf_nz = mf_counts>0; subplot(2,2,1);plot(X(1,:),X(2,:),'.b',Means(1,nz),Means(2,nz),'*r',bgMu(1,bg_nz),bgMu(2,bg_nz),'*g');title('full Gibbs'); subplot(2,2,2);plot(X(1,:),X(2,:),'.b',Means(1,nz),Means(2,nz),'*r',cgMu(1,cg_nz),cgMu(2,cg_nz),'*g');title('collapsed Gibbs'); subplot(2,2,3);plot(X(1,:),X(2,:),'.b',Means(1,nz),Means(2,nz),'*r',mfMu(1,mf_nz),mfMu(2,mf_nz),'*g');title('mean field VB'); subplot(2,2,4);semilogx(cg_wct(1:i),cg_delta(1:i),... bg_wct(1:i),bg_delta(1:i),... mf_wct(1:i),mf_delta(1:i)); xlabel('wall clock');ylabel('log P(sampled labels) - log P(true labels)') legend('clpsd Gibbs','full Gibbs','mfVB','Location','SouthEast'); pause(0.1); end end
github
bsxfan/meta-embeddings-master
randg.m
.m
meta-embeddings-master/code/Niko/matlab/stochastic_clustering/randg.m
2,448
utf_8
bf1edfa92d41e108e3e1e2ef55461799
function G = randg(alpha,m,n) % Generates an m-by-n matrix of random gamma variates, having scale = 1 % and shape alpha. % Inputs: % alpha: scalar, vector or matrix % m,n: [optional] size of output matrix. If not given, the size is the % same as that of alpha. If given, then alpha should be m-by-n, or an % m-vector, or an n-row. % Output: % G: m-by-n matrix of gamma variates % % % Uses the method of: % Marsaglia & Tsang, "A Simple Method for Generating Gamma Variables", % 2000, ACM Transactions on Mathematical Software. 26(3). % % See also: % https://en.wikipedia.org/wiki/Gamma_distribution#Generating_gamma- % distributed_random_variables % % The speed is roughly independent of alpha. if nargin==0 test_this(); return; end if exist('m','var') assert(exist('n','var')>0,'illegal argument combination'); [mm,nn] = size(alpha); if isscalar(alpha) alpha = alpha*ones(m,n); elseif nn==1 && mm==m && n>1 alpha = repmat(alpha,1,n); elseif mm==1 && nn==n && m>1 alpha = repmat(alpha,m,1); else assert(m==mm && n==nn,'illegal argument combination'); end else [m,n] = size(alpha); end N = m*n; alpha = reshape(alpha,1,N); G = zeros(1,N); req = 1:N; small = alpha < 1; if any(small) ns = sum(small); sa = alpha(small); G(small) = randg(1+sa,1,ns).*rand(1,ns).^(1./sa); req(small)=[]; end nreq = length(req); d = alpha(req) - 1/3; c = 1./sqrt(9*d); while nreq>0 x = randn(1,nreq); v = (1+c.*x).^3; u = rand(1,nreq); ok = ( v>0 ) & ( log(u) < x.^2/2 + d.*(1 - v + log(v)) ); G(req(ok)) = d(ok).*v(ok); d(ok) = []; c(ok) = []; req(ok) = []; nreq = length(req); % This version works nicely, but can be made faster (fewer logs), % at the cost slightly more complex code---see the paper. end G = reshape(G,m,n); end function test_this() alpha = [0.1,1,10]; G = randg(repmat(alpha,10000,1)); mu = mean(G,1); v = mean(bsxfun(@minus,G,alpha).^2,1); [alpha;mu;v] alpha = pi; G = randg(alpha,10000,1); mu = mean(G,1); v = mean(bsxfun(@minus,G,alpha).^2,1); [alpha;mu;v] end
github
bsxfan/meta-embeddings-master
asEig_svd.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/asEig_svd.m
2,468
utf_8
9b7e0b63ebc89c1d50847ba58f54e7d1
function CA = asEig_svd(A) if nargin==0 test_this(); return; end if isreal(A) [V,D] = eig(A); %V*D*V' = A D = diag(D); r = true; else [U,S,V] = svd(A); % U*S*V' = A S = diag(S); r = false; end dim = size(A,1); CA.logdet = @logdet; CA.solve = @solve; function [y,back] = logdet() if r y = sum(log(D)); else y = sum(log(S)); end back = @(dy) solve(dy*speye(dim)); end function [Y,back] = solve(RHS) if r Y = V*bsxfun(@ldivide,D,V.'*RHS); else Y = V*bsxfun(@ldivide,S,U.'*RHS); end back = @(dY) back_solve(dY,Y); end function Y = solveT(RHS) %A'\RHS, for LU case Y = U*bsxfun(@ldivide,S,V.'*RHS); end function [dRHS,dA] = back_solve(dY,Y) if r dRHS = solve(dY); if nargout >= 2 dA = (-dRHS)*Y.'; end else dRHS = solveT(dY); if nargout >= 2 dA = (-dRHS)*Y.'; end end end end function [y,back] = logdettestfun(A) CA = asEig_svd(A*A.'); [y,back1] = CA.logdet(); sym = @(DY) DY + DY.'; back =@(dy) sym(back1(dy))*A; end function [Y,back] = solvetestfun(RHS,A) CA = asEig_svd(A*A.'); [Y,back1] = CA.solve(RHS); back =@(dY) back_solvetestfun(dY); function [dRHS,dA] = back_solvetestfun(dY) [dRHS,dAA] = back1(dY); dA = (dAA+dAA.')*A; end end function test_this() fprintf('Test function values:\n'); dim = 5; RHS = rand(dim,1); A = randn(dim);A = A*A'; CA = asEig_svd(A); [log(det(A)),CA.logdet()] [A\RHS,CA.solve(RHS)] A = complex(randn(dim),zeros(dim)); CA = asEig_svd(A); [log(abs(det(A))),CA.logdet()] [A\RHS,CA.solve(RHS)] A = randn(dim,2*dim);A = A*A'; fprintf('\n\n\nTest logdet backprop (complex step) :\n'); testBackprop(@logdettestfun,A); fprintf('\n\n\nTest logdet backprop (real step) :\n'); testBackprop_rs(@logdettestfun,A,1e-4); fprintf('\n\n\nTest solve backprop (complex step) :\n'); testBackprop(@solvetestfun,{RHS,A},{1,1}); fprintf('\n\n\nTest solve backprop (real step) :\n'); testBackprop_rs(@solvetestfun,{RHS,A},1e-4,{1,1}); end
github
bsxfan/meta-embeddings-master
SGME_MXE.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_MXE.m
2,114
utf_8
829ff4b78c816bad28ac1dd5db3afbb8
function [y,back] = SGME_MXE(A,B,D,As,Bs,labels,logPrior) if nargin==0 test_this(); return; end dA = zeros(size(A)); dB = zeros(size(B)); dD = zeros(size(D)); dAs = zeros(size(As)); dBs = zeros(size(Bs)); [LEc,back1] = SGME_logexpectation(A,B,D); [LEs,back2] = SGME_logexpectation(As,Bs,D); dLEc = zeros(size(LEc)); dLEs = zeros(size(LEs)); m = length(LEs); % #speakers n = length(LEc); % #recordings scal = 1/(n*log(m+1)); logPost = zeros(m+1,1); logPost(m+1) = logPrior(m+1); y = 0; for j=1:n AA = bsxfun(@plus,As,A(:,j)); BB = bsxfun(@plus,Bs,B(:,j)); [LEboth,back3] = SGME_logexpectation(AA,BB,D); logPost(1:m) = logPrior(1:m) + LEboth.' - LEs.' - LEc(j); [yj,back4] = sumlogsoftmax(logPost,labels(j)); y = y - yj; dlogPost = back4(-1); dLEs = dLEs - dlogPost(1:m).'; dLEc(j) = dLEc(j) - sum(dlogPost(1:m)); dLEboth = dlogPost(1:m).'; [dAA,dBB,dDj] = back3(dLEboth); dD = dD + dDj; dAs = dAs + dAA; dBs = dBs + dBB; dA(:,j) = sum(dAA,2); dB(:,j) = sum(dBB,2); end y = y*scal; back = @(dy) back_this(dy,dA,dB,dD,dAs,dBs); function [dA,dB,dD,dAs,dBs] = back_this(dy,dA,dB,dD,dAs,dBs) %[LEc,back1] = SGME_logexpectation(A,B,D); %[LEs,back2] = SGME_logexpectation(As,Bs,D).'; [dA1,dB1,dD1] = back1(dLEc); [dAs2,dBs2,dD2] = back2(dLEs); dA = (dy*scal) * (dA + dA1); dB = (dy*scal) * (dB + dB1); dD = (dy*scal) * (dD + dD1 + dD2); dAs = (dy*scal) * (dAs + dAs2); dBs = (dy*scal) * (dBs + dBs2); end end function test_this() m = 3; n = 5; dim = 2; A = randn(dim,n); As = randn(dim,m); B = rand(1,n); Bs = rand(1,m); D = rand(dim,1); logPrior = randn(m+1,1); labels = randi(m,1,n); f = @(A,B,D,As,Bs) SGME_MXE(A,B,D,As,Bs,labels,logPrior); testBackprop(f,{A,B,D,As,Bs}); end
github
bsxfan/meta-embeddings-master
SGME_train.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_train.m
2,349
utf_8
875c864d98e47717be58a0d88a2550ab
function model = SGME_train(R,labels,nu,zdim,niters,test) if nargin==0 test_this(); return; end [rdim,n] = size(R); m = max(labels); blocks = sparse(labels,1:n,true,m+1,n); num = find(blocks(:)); %Can we choose maximum likelihood prior parameters, given labels? %For now: prior expected number of speakers = m prior = create_PYCRP([],0,m,n); logPrior = prior.GibbsMatrix(labels); delta = rdim - zdim; assert(delta>0); %initialize P0 = randn(zdim,rdim); H0 = randn(delta,rdim); sqrtd0 = rand(zdim,1); szP = numel(P0); szH = numel(H0); w0 = pack(P0,H0,sqrtd0); if exist('test','var') && test testBackprop(@objective,w0); return; end mem = 20; stpsz0 = 1e-3; timeout = 5*60; w = L_BFGS(@objective,w0,niters,timeout,mem,stpsz0); [P,H,sqrtd] = unpack(w); d = sqrtd.^2; model.logexpectation = @(A,b) SGME_logexpectation(A,b,d); model.extract = @(R) SGME_extract(P,H,nu,R); model.objective = @(P,H,d) objective(pack(P,H,d)); model.d = d; function w = pack(P,H,d) w = [P(:);H(:);d(:)]; end function [P,H,d] = unpack(w) at = 1:szP; P = reshape(w(at),zdim,rdim); at = szP + (1:szH); H = reshape(w(at),delta,rdim); at = szP + szH + (1:zdim); d = w(at); end function [y,back] = objective(w) [P,H,sqrtd] = unpack(w); [A,b,back1] = SGME_extract(P,H,nu,R); d = sqrtd.^2; [PsL,back2] = SGME_logPsL(A,b,d,blocks,labels,num,logPrior); y = -PsL; back = @back_this; function [dw] = back_this(dy) %dPsL = -dy; [dA,db,dd] = back2(-dy); dsqrtd = 2*sqrtd.*dd; [dP,dH] = back1(dA,db); dw = pack(dP,dH,dsqrtd); end end end function test_this() zdim = 2; rdim = 4; n = 5; m = 3; prior = create_PYCRP([],0,m,n); labels = prior.sample(n); nu = pi; R = randn(rdim,n); test = true; niters = []; SGME_train(R,labels,nu,zdim,niters,test); end
github
bsxfan/meta-embeddings-master
scaled_GME_precision.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/scaled_GME_precision.m
2,566
utf_8
59c037444c1e57e933d5346bc36263b6
function [SGMEP,meand] = scaled_GME_precision(B) if nargin==0 test_this(); return; end dim = size(B,1); [V,D] = eig(B); % B = VDV' d = diag(D); meand = mean(d); %D = sparse(D); %I = speye(dim); SGMEP.logdet = @logdet; SGMEP.solve = @solve; function [y,back] = logdet(beta) betad = bsxfun(@times,beta,d); y = sum(log1p(betad),1); back = @(dy) dy*sum(d./(1+betad),1); end function [Y,back] = solve(RHS,beta) betad = beta*d; Y = V*bsxfun(@ldivide,betad+1,V.'*RHS); back = @(dY) back_solve(dY,Y,beta); end function [dRHS,dbeta] = back_solve(dY,Y,beta) dRHS = solve(dY,beta); if nargout >= 2 %dA = (-dRHS)*Y.'; %dbeta = trace(dA*B.'); dbeta = -trace(Y.'*B.'*dRHS); end end end function [y,back] = logdettestfun(SGMEP,gamma) beta = gamma^2; [y,back1] = SGMEP.logdet(beta); back =@(dy) 2*gamma*back1(dy); end function [Y,back] = solvetestfun(SGMEP,RHS,gamma) beta = gamma^2; [Y,back1] = SGMEP.solve(RHS,beta); back =@(dY) back_solvetestfun(dY); function [dRHS,dgamma] = back_solvetestfun(dY) [dRHS,dbeta] = back1(dY); dgamma = 2*gamma*dbeta; end end function test_this() close all; fprintf('Test function values:\n'); dim = 5; RHS = rand(dim,1); %R = randn(dim,floor(1.1*dim));B = R*R.';B = B/trace(B); R = randn(dim,dim);B = R*R.';B = B/trace(B); I = eye(dim); [SGMEP,meand] = scaled_GME_precision(B); beta = rand/rand; [log(det(I+beta*B)),SGMEP.logdet(beta)] [(I+beta*B)\RHS,SGMEP.solve(RHS,beta)] doplot = false; if doplot beta = 0.01:0.01:200; y = zeros(size(beta)); for i=1:length(beta) y(i) = SGMEP.logdet(beta(i)); end 1/meand plot(log(1/meand+beta),y); end gamma = rand/rand; fprintf('\n\n\nTest logdet backprop (complex step) :\n'); testBackprop(@(gamma) logdettestfun(SGMEP,gamma),gamma); fprintf('\n\n\nTest logdet backprop (real step) :\n'); testBackprop_rs(@(gamma) logdettestfun(SGMEP,gamma),gamma,1e-4); fprintf('\n\n\nTest solve backprop (complex step) :\n'); testBackprop(@(RHS,gamma) solvetestfun(SGMEP,RHS,gamma),{RHS,gamma},{1,1}); fprintf('\n\n\nTest solve backprop (real step) :\n'); testBackprop_rs(@(RHS,gamma) solvetestfun(SGMEP,RHS,gamma),{RHS,gamma},1e-4,{1,1}); end
github
bsxfan/meta-embeddings-master
dsolve.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/dsolve.m
980
utf_8
8734dea4d3f28af88579fef7b106d892
function [Y,back] = dsolve(RHS,A) % SOLVE: Y= A\RHS, with backpropagation into both arguments % % This is mostly for debugging purposes. It can be done more efficiently % by caching a matrix factorization to re-use for derivative (and also for % the determinant if needed). if nargin==0 test_this(); return; end Y = A\RHS; back = @back_this; function [dRHS,dA] = back_this(dY) dRHS = A.'\dY; % A\dY = dsolve(dY,A) can be re-used for symmetric A if nargout>=2 dA = -dRHS*Y.'; end end end % function [Y,back] = IbetaB(beta,B) % dim = size(B,1); % Y = speye(dim)+beta*B; % back = @(dY) trace(dY*B.'); % end function test_this() m = 5; n = 2; A = randn(m,m); RHS = randn(m,n); testBackprop(@dsolve,{RHS,A}); testBackprop_rs(@dsolve,{RHS,A},1e-4); % beta = rand/rand; % testBackprop(@(beta) IbetaB(beta,A),{beta}); end
github
bsxfan/meta-embeddings-master
tme.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/tme.m
421
utf_8
337d862122de581dbec9c54a23086f6d
function y = tme(z,mu,W,nu) if nargin==0 test_this(); return; end Delta = bsxfun(@minus,z,mu); q = sum(Delta.*(W*Delta),1); dim = length(mu); y = -(nu+dim)/2 * log1p(q/nu); end function test_this() dim = 1; W = 1; z = -10:0.01:10; mu1 = -5; mu2 = 5; nu = 1; y1 = tme(z,mu2,W,nu); y2 = tme(z,mu1,W,nu) plot(z,y1,z,y2,z,y1+y2); end
github
bsxfan/meta-embeddings-master
SGME_logexpectation_full.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_logexpectation_full.m
2,084
utf_8
77d3c0866095ed94d4c7340697b2a714
function [y,back] = SGME_logexpectation_full(A,b,B) % log expected values (w.r.t. standard normal) of diagonalized SGMEs % Inputs: % A: dim-by-n, natural parameters (precision *mean) for n SGMEs % b: 1-by-n, precision scale factors for these SGMEs % B: dim-by-dim, common precision (full) matrix factor % % Note: % A(:,j) , b(j)*B forms the meta-embedding for case j % % Outputs: % y: 1-by-n, log expectations % back: backpropagation handle, [dA,db,dB] = back(dy) if nargin==0 test_this(); return; end [V,L] = eig(B); %V*L*V' = B L = diag(L); bL = bsxfun(@times,b,L); logdets = sum(log1p(bL),1); bL1 = 1 + bL; S = V*bsxfun(@ldivide,bL1,V.'*A); Q = sum(A.*S,1); y = (Q-logdets)/2; back = @back_this; function [dA,db,dB] = back_this(dy) hdy = dy/2; dA = bsxfun(@times,hdy,S); dS = bsxfun(@times,hdy,A); dlogdets = -hdy; dA2 = V*bsxfun(@ldivide,bL1,V.'*dS); dA = dA + dA2; dBlogdet = V*bsxfun(@times,sum(bsxfun(@rdivide,b.*dlogdets,bL1),2),V.'); dBsolve = bsxfun(@times,-b,dA2) * S.'; dB = dBlogdet + (dBsolve+dBsolve.')/2; db = L.'*bsxfun(@ldivide,bL1,dlogdets) - sum(S.*(B*dA2),1); end end function test_this() m = 3; n = 5; A = randn(m,n); b = rand(1,n); B = randn(m,m+1); B = B*B.'; fprintf('test function values:\n'); err = max(abs(SGME_logexpectation_full(A,b,B)-SGME_logexpectation_slow(A,b,B))), fprintf('test derivatives:\n'); [y,back] = SGME_logexpectation_full(A,b,B); dy = randn(size(y)); [dAf,dbf,dBf] = back(dy); [~,back] = SGME_logexpectation_slow(A,b,B); [dAs,dbs,dBs] = back(dy); err_dA = max(abs(dAs(:)-dAf(:))), err_db = max(abs(dbs(:)-dbf(:))), err_dB = max(abs(dBs(:)-dBf(:))), %neither complex, nor real step differentiation seem to work through eig() %testBackprop(@SGME_logexpectation_full,{A,b,B},{1,0,1}); end
github
bsxfan/meta-embeddings-master
labels2blocks.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/labels2blocks.m
1,061
utf_8
fea6d1fe91a39552e2a103155fe96e8f
function [subsets,counts] = labels2blocks(labels) % Inputs: % labels: n-vector with elements in 1..m, maps each of n customers to a % table number. There are m tables. Empty tables not allowed. % % Ouputs: % subsets: n-by-m logical, with one-hot rows % counts: m-vector, maps table number to customer count if nargin==0 test_this(); return; end m = max(labels); %m tables n = length(labels); %n customers assert(min(labels)==1,'illegal argument ''labels'': tables must be consecutively numbered from 1'); assert(m <= n,'illegal argument ''labels'': there are more tables than customers'); subsets = bsxfun(@eq,1:m,labels(:)); %subsets = sparse(1:n,labels,true,n,m,n); counts = sum(subsets,1); assert(sum(counts)==n,'illegal argument ''labels'': table counts must add up to length(labels)'); assert(all(counts),'illegal argument ''labels'': empty tables not allowed'); end function test_this() labels = [1,1,2,3,3,3,4] [subsets,counts] = labels2blocks(labels) end
github
bsxfan/meta-embeddings-master
create_BXE_calculator.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/create_BXE_calculator.m
2,055
utf_8
494fcd9ff939f75d131309b403080ae5
function calc = create_BXE_calculator(log_expectations,prior,poi) calc.BXE = @BXE; calc.get_tar_non = @get_tar_non; n = length(poi); spoi = sparse(poi); tar = bsxfun(@eq,spoi,spoi.'); ntar = 0; nnon = 0; for k=1:n-1 jj = k+1:n; tari = full(tar(k,jj)); ntari = sum(tari); ntar = ntar + ntari; nnon = nnon + length(jj) - ntari; end if isempty(prior) prior = ntar/(ntar+nnon); end plo = log(prior) - log1p(-prior); function y = BXE(A,B) LEc = log_expectations(A,B); yt = 0; yn = 0; for i=1:n-1 jj = i+1:n; AA = bsxfun(@plus,A(:,i),A(:,jj)); BB = bsxfun(@plus,B(:,i),B(:,jj)); tari = full(tar(i,jj)); LE2 = log_expectations(AA,BB); llr = LE2 - LEc(i) - LEc(jj); log_post = plo + llr; yt = yt + sum(softplus(-log_post(tari))); yn = yn + sum(softplus(log_post(~tari))); end y = prior*yt/ntar + (1-prior)*yn/(nnon); end function [tars,nons] = get_tar_non(A,B) LEc = log_expectations(A,B); tars = zeros(1,ntar); nons = zeros(1,nnon); tcount = 0; ncount = 0; for i=1:n-1 jj = i+1:n; AA = bsxfun(@plus,A(:,i),A(:,jj)); BB = bsxfun(@plus,B(:,i),B(:,jj)); tari = full(tar(i,jj)); LE2 = log_expectations(AA,BB); llr = LE2 - LEc(i) - LEc(jj); llr_tar = llr(tari); count = length(llr_tar); tars(tcount+(1:count)) = llr_tar; tcount = tcount + count; llr_non = llr(~tari); count = length(llr_non); nons(ncount+(1:count)) = llr_non; ncount = ncount + count; end end end function y = softplus(x) % y = log(1+exp(x)); y = x; f = find(x<30); y(f) = log1p(exp(x(f))); end
github
bsxfan/meta-embeddings-master
PLDA_mixture_responsibilities.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/PLDA_mixture_responsibilities.m
1,346
utf_8
78dfbb4de92f575f08845cbc7e0010fb
function P = PLDA_mixture_responsibilities(w,F,W,R) if nargin==0 P = test_this(); return end K = length(w); if iscell(F) [D,d] = size(F{1}); else [D,d] = size(F); end N = size(R,2); P = zeros(K,N); Id = eye(d); for k=1:K if iscell(F) Fk = F{k}; else Fk = F; end Wk = W{k}; Bk = Fk.'*Wk*Fk; Gk = Wk - Wk*Fk*((Id+Bk)\Fk.'*Wk); RGR = sum(R.*(Gk*R),1); logdetW = 2*sum(log(diag(chol(Wk)))); logdetIB = 2*sum(log(diag(chol(Id+Bk)))); P(k,:) = log(w(k)) + (logdetW - logdetIB - RGR)/2; end P = exp(bsxfun(@minus,P,max(P,[],1))); P = bsxfun(@rdivide,P,sum(P,1)); end function P = test_this() close all; d = 100; D = 400; N = 1000; K = 5; w = ones(1,K)/K; W = cell(1,K); W{1} = eye(D); for k=2:K W{k} = 2*W{k-1}; end %F = randn(D,d); F = cell(1,K); for k=1:K F{k} = randn(D,d); end Z = randn(d,N*K); R = randn(D,N*K); jj = 1:N; for k=1:K R(:,jj) = F{k}*Z(:,jj) + chol(W{k})\randn(D,N); jj = jj + N; end P = PLDA_mixture_responsibilities(w,F,W,R); plot(P'); end
github
bsxfan/meta-embeddings-master
create_partition_posterior_calculator.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/create_partition_posterior_calculator.m
4,076
utf_8
32fda68f00bdccc246e56e3db2e0babe
function calc = create_partition_posterior_calculator(log_expectations,prior,poi) % Inputs: % log_expectations: function handle, maps matrices of additive natural % parameters to log-expectations % prior: Exchangeable prior over partitions, for example CRP. It needs to % implement prior.logprob(counts), where counts are the number of % customers per table (partition block sizes). % poi: partition of interest, given as an n-vector of table assignments, % where there are n customers. The tables are numbered 1 to m. if nargin==0 test_this(); return; end n = length(poi); %number of customers %Generate flags for all possible (non-empty) subsets ns = 2^n-1; %number of non-empty customer subsets subsets = logical(mod(fix(bsxfun(@rdivide,0:ns,2.^(0:n-1)')),2)); subsets = subsets(:,2:end); % dump empty subset %subsets = sparse(subsets); %maps partition to flags indicating subsets (blocks) % also returns table counts function [flags,counts] = labels2weights(labels) [blocks,counts] = labels2blocks(labels); %blocks = sparse(blocks); [tf,loc] = ismember(blocks',subsets','rows'); %seems faster with full matrices assert(all(tf)); flags = false(ns,1); flags(loc) = true; end [poi_weights,counts] = labels2weights(poi); log_prior_poi = prior.logprob(counts); %precompute weights and prior for every partition Bn = Bell(n); PI = create_partition_iterator(n); Weights = false(ns,Bn); log_prior = zeros(1,Bn); for j=1:Bn labels = PI.next(); [Weights(:,j),counts] = labels2weights(labels); log_prior(j) = prior.logprob(counts); end Weights = sparse(Weights); subsets = sparse(subsets); poi_weights = sparse(poi_weights); calc.logPost = @logPost; calc.logPostPoi = @logPostPoi; function y = logPostPoi(A,B) % Inputs: % A,B: n-column matrices of natural parameters for n meta-embeddings % Output: % y: log P(poi | A,B, prior) assert(size(B,2)==n && size(A,2)==n); %compute subset likelihoods log_ex = log_expectations(A*subsets,B*subsets); %compute posterior num = log_prior_poi + log_ex*poi_weights; dens = log_prior + log_ex*Weights; maxden = max(dens); den = maxden+log(sum(exp(dens-maxden))); y = num - den; end function f = logPost(A,B) % Inputs: % A,B: n-column matrices of natural parameters for n meta-embeddings % Output: % y: log P(poi | A,B, prior) assert(size(B,2)==n && size(A,2)==n); %compute subset likelihoods log_ex = log_expectations(A*subsets,B*subsets); llh = log_ex*Weights; den = log_prior + llh; maxden = max(den); den = maxden+log(sum(exp(den-maxden))); function y = logpost_this(poi) [poi_weights,counts] = labels2weights(poi); log_prior_poi = prior.logprob(counts); num = log_prior_poi + log_ex*poi_weights; y = num - den; end f = @logpost_this; end end function test_this Mu = [-1 0 -1.1; 0 -3 0]; C = [3 1 3; 1 1 1]; A = Mu./C; B = zeros(4,3); B(1,:) = 1./C(1,:); B(4,:) = 1./C(2,:); scale = 3; B = B * scale; C = C / scale; close all; figure;hold; plotGaussian(Mu(:,1),diag(C(:,1)),'blue','b'); plotGaussian(Mu(:,2),diag(C(:,2)),'red','r'); plotGaussian(Mu(:,3),diag(C(:,3)),'green','g'); axis('square'); axis('equal'); poi = [1 1 2]; %prior = create_PYCRP(0,[],2,3); %prior = create_PYCRP([],0,2,3); create_flat_partition_prior(length(poi)); calc = create_partition_posterior_calculator(prior,poi); f = calc.logPost(A,B); exp([f([1 1 2]), f([1 1 1]), f([1 2 3]), f([1 2 2]), f([1 2 1])]) end
github
bsxfan/meta-embeddings-master
SGME_logexpectation_slow.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_logexpectation_slow.m
1,686
utf_8
798c0acef9f8a32f4f544ceed9ce0373
function [y,back] = SGME_logexpectation_slow(A,b,B) % log expected values (w.r.t. standard normal) of diagonalized SGMEs % Inputs: % A: dim-by-n, natural parameters (precision *mean) for n SGMEs % b: 1-by-n, precision scale factors for these SGMEs % B: dim-by-dim, common precision (full) matrix factor % % Note: % A(:,j) , b(j)*B forms the meta-embedding for case j % % Outputs: % y: 1-by-n, log expectations % back: backpropagation handle, [dA,db,dB] = back(dy) if nargin==0 test_this(); return; end [dim,n] = size(A); I = speye(dim); y = zeros(1,n); S = zeros(dim,n); for i=1:n a = A(:,i); bBI = I+b(i)*B; s = bBI\a; S(:,i) = s; logd = logdet(bBI); y(i) = (s.'*a - logd)/2; end back = @back_this; function [dA,db,dB] = back_this(dy) dA = zeros(size(A)); db = zeros(size(b)); dB = zeros(size(B)); for ii=1:n s = S(:,ii); a = A(:,ii); da = (dy(ii)/2)*s; ds = (dy(ii)/2)*a; dlogd = -dy(ii)/2; bBI = I+b(ii)*B; dbBI = dlogd*inv(bBI); %#ok<MINV> da2 = bBI.'\ds; dA(:,ii) = da + da2; dbBI = dbBI - (da2)*s.'; dB = dB + b(ii)*dbBI; db(ii) = dbBI(:).'*B(:); end end end function y = logdet(M) [~,U] = lu(M); y = sum(log(diag(U).^2))/2; end function test_this() m = 3; n = 5; A = randn(m,n); b = rand(1,n); B = randn(m,m+1); B = B*B.'; testBackprop(@SGME_logexpectation_slow,{A,b,B},{1,1,1}); end
github
bsxfan/meta-embeddings-master
sampleARG.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/sampleARG.m
1,140
utf_8
4246b530bc1f2bc4fd550c3aa048a900
function Z = sampleARG(a,B,n,X) if nargin==0 test_this(); return; end dim = length(a); if ~exist('X','var') || isempty(X) X = randn(dim,n); end diagB = diag(B); mu = a./diagB; Z = bsxfun(@plus,mu,bsxfun(@rdivide,X,sqrt(diagB))); B0 = bsxfun(@rdivide,B,diagB); for i=2:dim jj = 1:i-1; Z(i,:) = Z(i,:) - B0(i,jj)*Z(jj,:); end end function test_this dim = 2; n = 5000; a = randn(dim,1); R = randn(dim,dim+1); B = R*R.'; X = randn(dim,n); tic;Z1 = sampleARG(a,B,n,X);toc tic;Z2 = sampleChol(a,B,n,X);toc mu = B\a, mu1 = mean(Z1,2), mu2 = mean(Z2,2), C = inv(B), C1 = cov(Z1.',1), C2 = cov(Z2.',1), close all; mx = max(max(Z1(:)),max(Z2(:))); mn = min(min(Z1(:)),min(Z2(:))); subplot(1,2,1);plot(Z1(1,:),Z1(2,:),'.');title('AR');axis([mn,mx,mn,mx]);axis('square'); subplot(1,2,2);plot(Z2(1,:),Z2(2,:),'.');title('Chol');axis([mn,mx,mn,mx]);axis('square'); end
github
bsxfan/meta-embeddings-master
SGME_train_BXE.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_train_BXE.m
2,434
utf_8
4fb4ed77b580dc09d69346bc07a2cd16
function model = SGME_train_BXE(R,labels,nu,zdim,niters,timeout,test) if nargin==0 test_this(); return; end [rdim,n] = size(R); spoi = sparse(labels); tar = bsxfun(@eq,spoi,spoi.'); ntar = 0; nnon = 0; for k=1:n-1 jj = k+1:n; tari = full(tar(k,jj)); ntari = sum(tari); ntar = ntar + ntari; nnon = nnon + length(jj) - ntari; end prior = ntar/(ntar+nnon); plo = log(prior) - log1p(-prior); wt = prior/ntar; wn = (1-prior)/nnon; delta = rdim - zdim; assert(delta>0); %initialize P0 = randn(zdim,rdim); H0 = randn(delta,rdim); sqrtd0 = rand(zdim,1); szP = numel(P0); szH = numel(H0); w0 = pack(P0,H0,sqrtd0); if exist('test','var') && test testBackprop(@objective,w0); return; end mem = 20; stpsz0 = 1e-3; %timeout = 5*60; w = L_BFGS(@objective,w0,niters,timeout,mem,stpsz0); [P,H,sqrtd] = unpack(w); d = sqrtd.^2; model.logexpectation = @(A,b) SGME_logexpectation(A,b,d); model.extract = @(R) SGME_extract(P,H,nu,R); model.d = d; function w = pack(P,H,d) w = [P(:);H(:);d(:)]; end function [P,H,d] = unpack(w) at = 1:szP; P = reshape(w(at),zdim,rdim); at = szP + (1:szH); H = reshape(w(at),delta,rdim); at = szP + szH + (1:zdim); d = w(at); end function [y,back] = objective(w) [P,H,sqrtd] = unpack(w); [A,b,back1] = SGME_extract(P,H,nu,R); d = sqrtd.^2; [y,back2] = SGME_BXE(A,b,d,plo,wt,wn,tar); back = @back_this; function [dw] = back_this(dy) [dA,db,dd] = back2(dy); dsqrtd = 2*sqrtd.*dd; [dP,dH] = back1(dA,db); dw = pack(dP,dH,dsqrtd); end end end function test_this() zdim = 2; rdim = 4; n = 10; m = 3; prior = create_PYCRP([],0,m,n); while true labels = prior.sample(n); if max(labels) > 1 break; end end nu = pi; R = randn(rdim,n); test = true; niters = []; timeout = []; SGME_train_BXE(R,labels,nu,zdim,niters,timeout,test); end
github
bsxfan/meta-embeddings-master
SGME_F2G.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_F2G.m
969
utf_8
e77bb8ead4c56cabb8bda3a5a1e59081
function [G,reg,back] = SGME_F2G(F) if nargin==0 test_this(); return; end B0 = F.'*F; D = diag(B0); end function [y,back] = regGG(G) dim = size(G,1); Delta = G*G-G; [y,back1] = regL2(Delta); back = @back_this; function [dG] = back_this(dy) dDelta = back1(dy); dG = G.'*dDelta + dDelta*G.' - speye(dim); end end function [y,back] = regDiag(B0,D) Delta = B0-D; [y,back1] = regL2(Delta); back = @back_this; function [dB0,dD] = back_this(dy) dDelta = back1(dy); dB0 = dDelta; dD = -dDelta; end end function [y,back] = regGF(G,F) Delta = G*F; [y,back1] = regL2(Delta); back = @back_this; function [dG,dF] = back_this(dy) dDelta = back1(dy); dG = dDelta*F.'; dF = G.'*dDelta; end end function [y,back] = regL2(Delta) y = (Delta(:).'*Delta(:))/2; back = @(dy) Delta; end
github
bsxfan/meta-embeddings-master
SGME_extr.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_extr.m
3,822
utf_8
b9fd252ce11aa8487b51f175440693a3
function [A,b,d,reg,back] = SGME_extr(T,F,H,nu,R) if nargin==0 test_this(); return; end [rdim,zdim] = size(F); nuprime = nu + rdim - zdim; TR = T*R; A0 = F.'*TR; B0 = F.'*F; d = diag(B0); %G = speye(rdim) - F*bsxfun(@ldivide,d,F.'); %HH = H.'*H This must be regularized to equal G %B0 must be diagonal delta1 = mdot(B0) - d.'*d; reg1 = 0.5*delta1^2; %tr([B0-diag(d)]^2) %G=HH BF = bsxfun(@ldivide,d,F.'); % inv(B)*F' FFB = F.'*BF.'; % F'*F*inv(B) HF = H*F; BFH = BF*H.'; % inv(B)*F.'*H.' HH = H*H.'; trGG = rdim - 2*mdot(F,BF.') + mdot(FFB); trGHH = mdot(H) - mdot(HF,BFH.'); trHHHH = mdot(HH); reg2 = (trGG - 2*trGHH + trHHHH)/2; %H'H*F = 0 HHHF = HH*HF; reg3 = mdot(HHHF,HF); %tr(H'H)=D-d delta4 = mdot(H) - rdim + zdim; reg4 = 0.5*delta4^2; reg = reg1 + reg2 + reg3 + reg4; HTR = H*TR; q = sum(HTR.^2,1); den = nu + q; b = nuprime./den; A = bsxfun(@times,b,A0); back = @back_this; function [dT,dF,dH] = back_this(dA,db,dd,dreg) %A = bsxfun(@times,b,A0) db = db + sum(dA.*A0,1); dA0 = bsxfun(@times,b,dA); %b = nuprime./den dden = -db.*b./den; %den = nu + q dq = dden; %q = sum(HTR.^2,1) dHTR = bsxfun(@times,2*dq,HTR); %HTR = H*TR dH = dHTR*TR.'; dTR = H.'*dHTR; %reg4 = 0.5*delta4^2; ddelta4 = dreg*delta4; %delta4 = mdot(H) - D + d; dH = dH + (2*ddelta4)*H; %reg3 = mdot(HHHF,HF); dHHHF = dreg*HF; dHF = dreg*HHHF; %HHHF = HH*HF; dHH = dHHHF*HF.'; dHF = dHF + HH.'*dHHHF; %reg2 = (trGG - 2*trGHH + trHHHH)/2 dtrGG = dreg/2; dtrGHH = -dreg; dtrHHHH = dreg/2; %trHHHH = mdot(HH) dHH = dHH + (2*dtrHHHH)*HH; %trGHH = mdot(H) - mdot(HF,BFH.') dH = dH + (2*dtrGHH)*H; dHF = dHF - dtrGHH*BFH.'; dBFH = (-dtrGHH)*HF.'; %trGG = rdim - 2*mdot(F,BF.') + mdot(FFB) dF = (-2*dtrGG)*BF.'; dBF = (-2*dtrGG)*F.'; dFFB = (2*dtrGG)*FFB; %HH = H*H.' dH = dH + (2*dHH)*H; %BFH = BF*H.' dBF = dBF + dBFH*H; dH = dH + dBFH.'*BF; %HF = H*F dH = dH + dHF*F.'; dF = dF + H.'*dHF; %FFB = F.'*BF.' dF = dF + BF.'*dFFB.'; dBF = dBF + dFFB.'*F.'; %BF = bsxfun(@ldivide,d,F.') dF = dF + bsxfun(@ldivide,d,dBF).'; dd = dd - sum(bsxfun(@ldivide,d,BF.*dBF),2); %reg1 = 0.5*delta1^2; ddelta1 = dreg*delta1; %delta1 = mdot(B0) - d.'*d; dB0 = (2*ddelta1)*B0; dd = dd - (2*ddelta1)*d; %d = diag(B0); dB0 = dB0 + diag(dd); %B0 = F.'*F; dF = dF + 2*F*dB0; %A0 = F.'*TR; dF = dF + TR*dA0.'; dTR = dTR + F*dA0; %TR = T*R; dT = dTR*R.'; end end %trace(A*B.') function y = mdot(A,B) if exist('B','var') assert(all(size(A)==size(B))); y = A(:).'*B(:); else y = sum(A(:).^2,1); end end function test_this() zdim = 2; rdim = 5; n = 4; F = randn(rdim,zdim); T = randn(rdim,rdim); H = randn(rdim-zdim,rdim); nu = pi; R = randn(rdim,n); f = @(T,F,H) SGME_extr(T,F,H,nu,R); testBackprop_multi(f,4,{T,F,H}); end
github
bsxfan/meta-embeddings-master
SGME_extract.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_extract.m
1,065
utf_8
b9106e80e9a78235222680c566b510fd
function [A,b,back] = SGME_extract(P,H,nu,R) if nargin==0 test_this(); return; end [zdim,rdim] = size(P); nuprime = nu + rdim - zdim; HR = H*R; q = sum(HR.^2,1); den = nu + q; b = nuprime./den; M = P*R; A = bsxfun(@times,b,M); back = @back_this; function [dP,dH] = back_this(dA,db) %A = bsxfun(@times,b,M); db = db + sum(dA.*M,1); dM = bsxfun(@times,b,dA); %M = P*R; dP = dM*R.'; %b = nuprime./den; dden = -db.*b./den; %den = nu + q; dq = dden; %q = sum(HR.^2,1); dHR = bsxfun(@times,2*dq,HR); %HR = H*R; dH = dHR*R.'; end end function test_this() zdim = 2; rdim = 4; n = 5; P = randn(zdim,rdim); H = randn(rdim-zdim,rdim); nu = pi; R = randn(rdim,n); f = @(P,H) SGME_extract(P,H,nu,R); testBackprop_multi(f,2,{P,H}); end
github
bsxfan/meta-embeddings-master
SGME_extr_full.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_extr_full.m
1,736
utf_8
a9fbe07b13ba1fd948fdfdfc334f7d48
function [A,b,B0,back] = SGME_extr_full(T,F,nu,R) if nargin==0 test_this(); return; end [rdim,zdim] = size(F); nuprime = nu + rdim - zdim; TR = T*R; A0 = F.'*TR; B0 = F.'*F; if isreal(F) cholB0 = chol(B0); solveB = @(A) cholB0\(cholB0.'\A); else solveB = @(A) B0\A; end S = solveB(A0); den = nu + sum(TR.^2,1) - sum(A0.*S,1); b = nuprime ./ den; A = bsxfun(@times,b,A0); back = @back_this; function [dT,dF] = back_this(dA,db,dB0) %A = bsxfun(@times,b,A0) db = db + sum(dA.*A0,1); dA0 = bsxfun(@times,b,dA); %b = nuprime ./ den dden = -(db.*b)./den; %den = nu + sum(TR.^2,1) - sum(A0.*S,1) dTR = bsxfun(@times,(2*dden),TR); dA0 = dA0 - bsxfun(@times,dden,S); dS = -bsxfun(@times,dden,A0); %S = B0\A0 dA0_2 = solveB(dS); dA0 = dA0 + dA0_2; dB0 = dB0 - dA0_2*S.'; %B0 = F.'*F dF = F*(dB0+dB0.'); %A0 = F.'*TR dF = dF + TR*dA0.'; dTR = dTR + F*dA0; %TR = T*R dT = dTR*R.'; end end function test_this() zdim = 2; rdim = 5; n = 4; F = randn(rdim,zdim); T = randn(rdim,rdim); nu = pi; R = randn(rdim,n); f = @(T,F) SGME_extr_full(T,F,nu,R); testBackprop_multi(f,3,{T,F},{1,1}); end
github
bsxfan/meta-embeddings-master
sumlogsumexp.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/sumlogsumexp.m
455
utf_8
cccd5f3ae0b7894b95682910eba4a060
function [y,back] = sumlogsumexp(X) if nargin==0 test_this(); return; end mx = max(real(X),[],1); yy = mx + log(sum(exp(bsxfun(@minus,X,mx)),1)); y = sum(yy,2); back = @back_this; function dX = back_this(dy) dX = dy*exp(bsxfun(@minus,X,yy)); end end function test_this() m = 3; n = 5; X = randn(m,n); testBackprop(@(X)sumlogsumexp(X),X); end
github
bsxfan/meta-embeddings-master
SGME_logexpectation.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_logexpectation.m
1,902
utf_8
81c7aa33f6446ddaf811422fbffe00d6
function [y,back] = SGME_logexpectation(A,b,d) % log expected values (w.r.t. standard normal) of diagonalized SGMEs % Inputs: % A: dim-by-n, natural parameters (precision *mean) for n SGMEs % b: 1-by-n, precision scale factors for these SGMEs % d: dim-by-1, common diagonal precision % % Note: % bsxfun(@times,b,d) is dim-by-n precision diagonals for the n SGMEs % % Outputs: % y: 1-by-n, log expectations % back: backpropagation handle, [dA,db,dd] = back(dy) if nargin==0 test_this(); return; end assert(isreal(A)); assert(isreal(b)); assert(isreal(d)); bd = bsxfun(@times,b,d); logdets = sum(log1p(bd),1); den = 1 + bd; Aden = A./den; Q = sum(A.*Aden,1); %Q = sum((A.^2)./den,1); y = (Q-logdets)/2; back = @back_this; assert(isreal(y)); function [dA,db,dd] = back_this(dy) dQ = dy/2; %dlogdets = - dQ; dAden = bsxfun(@times,dQ,A); dA = bsxfun(@times,dQ,Aden); dA2 = dAden./den; dA = dA + dA2; dden = -Aden.*dA2; dbd = dden - bsxfun(@rdivide,dQ,den); %dlogdets = -dQ db = d.' * dbd; dd = dbd * b.'; end end function test_this0() m = 3; n = 5; A = randn(m,n); b = rand(1,n); d = rand(m,1); testBackprop(@SGME_logexpectation,{A,b,d},{1,1,1}); end function test_this() %em = 4; n = 7; dim = 2; %prior = create_PYCRP([],0,em,n); %poi = prior.sample(n); %m = max(poi); %blocks = sparse(poi,1:n,true,m+1,n); %num = find(blocks(:)); %logPrior = prior.GibbsMatrix(poi); d = rand(dim,1); A = randn(dim,n); b = rand(1,n); f = @(A,b,d) SGME_logexpectation(A,b,d); testBackprop(f,{A,b,d},{1,1,1}); end
github
bsxfan/meta-embeddings-master
SGME_train_MXE.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_train_MXE.m
2,514
utf_8
939eef34cb61a4493dfe9c98a11d633c
function model = SGME_train_MXE(R,labels,nu,zdim,niters,timeout,test) if nargin==0 test_this(); return; end [rdim,n] = size(R); m = max(labels); blocks = sparse(labels,1:n,true,m,n); counts = sum(blocks,2); logPrior = [log(counts);-inf]; delta = rdim - zdim; assert(delta>0); %initialize P0 = randn(zdim,rdim); H0 = randn(delta,rdim); sqrtd0 = rand(zdim,1); As0 = randn(zdim,m); sqrtBs0 = randn(1,m); szP = numel(P0); szH = numel(H0); szd = numel(sqrtd0); szAs = numel(As0); szBs = numel(sqrtBs0); w0 = pack(P0,H0,sqrtd0,As0,sqrtBs0); if exist('test','var') && test testBackprop(@objective,w0); return; end mem = 20; stpsz0 = 1e-3; %timeout = 5*60; w = L_BFGS(@objective,w0,niters,timeout,mem,stpsz0); [P,H,sqrtd,As,sqrtBs] = unpack(w); d = sqrtd.^2; model.logexpectation = @(A,b) SGME_logexpectation(A,b,d); model.extract = @(R) SGME_extract(P,H,nu,R); model.d = d; function w = pack(P,H,d,As,Bs) w = [P(:);H(:);d(:);As(:);Bs(:)]; end function [P,H,d,As,Bs] = unpack(w) at = 1:szP; P = reshape(w(at),zdim,rdim); at = szP + (1:szH); H = reshape(w(at),delta,rdim); at = szP + szH + (1:szd); d = w(at); at = szP + szH + szd + (1:szAs); As = reshape(w(at),zdim,m); at = szP + szH + szd + szAs + (1:szBs); Bs = w(at).'; end function [y,back] = objective(w) [P,H,sqrtd,As,sqrtBs] = unpack(w); [A,b,back1] = SGME_extract(P,H,nu,R); d = sqrtd.^2; Bs = sqrtBs.^2; [y,back2] = SGME_MXE(A,b,d,As,Bs,labels,logPrior); back = @back_this; function [dw] = back_this(dy) [dA,db,dd,dAs,dBs] = back2(dy); dsqrtd = 2*sqrtd.*dd; dsqrtBs = 2*sqrtBs.*dBs; [dP,dH] = back1(dA,db); dw = pack(dP,dH,dsqrtd,dAs,dsqrtBs); end end end function test_this() zdim = 2; rdim = 4; n = 5; m = 3; prior = create_PYCRP([],0,m,n); labels = prior.sample(n); nu = pi; R = randn(rdim,n); test = true; niters = []; timeout = []; SGME_train_MXE(R,labels,nu,zdim,niters,timeout,test); end
github
bsxfan/meta-embeddings-master
SGME_BXE.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_BXE.m
1,927
utf_8
43f8a07c46e1df00ef02abdfbbc38dde
function [y,back] = SGME_BXE(A,B,D,plo,wt,wn,tar) if nargin==0 test_this(); return; end n = size(A,2); [LEc,back1] = SGME_logexpectation(A,B,D); y = 0; dA = zeros(size(A)); dB = zeros(size(B)); dLEc = zeros(size(LEc)); dD = zeros(size(D)); for i=1:n-1 jj = i+1:n; AA = bsxfun(@plus,A(:,i),A(:,jj)); BB = bsxfun(@plus,B(:,i),B(:,jj)); tari = full(tar(i,jj)); [LE2,back2] = SGME_logexpectation(AA,BB,D); llr = LE2 - LEc(i) - LEc(jj); arg_tar = -plo - llr(tari); noni = ~tari; arg_non = plo + llr(noni); y = y + wt*sum(softplus(arg_tar)); y = y + wn*sum(softplus(arg_non)); dllr = zeros(size(llr)); dllr(tari) = (-wt)*sigmoid(arg_tar); dllr(noni) = wn*sigmoid(arg_non); dLE2 = dllr; dLEc(i) = dLEc(i) - sum(dllr); dLEc(jj) = dLEc(jj) - dllr; [dAA,dBB,dD2] = back2(dLE2); dD = dD + dD2; dA(:,i) = dA(:,i) + sum(dAA,2); dB(:,i) = dB(:,i) + sum(dBB,2); dA(:,jj) = dA(:,jj) + dAA; dB(:,jj) = dB(:,jj) + dBB; end back = @(dy) back_this(dy,dA,dB,dLEc,dD); function [dA,dB,dD] = back_this(dy,dA,dB,dLEc,dD) [dA1,dB1,dD1] = back1(dLEc); dA = dy*(dA + dA1); dB = dy*(dB + dB1); dD = dy*(dD + dD1); end end function y = sigmoid(x) y = 1./(1+exp(-x)); end function y = softplus(x) % y = log(1+exp(x)); y = x; f = find(x<30); y(f) = log1p(exp(x(f))); end function test_this() zdim = 2; n = 5; A = randn(zdim,n); B = rand(1,n); plo = randn; wt = rand; wn = rand; tar = sparse(randn(n)>0); D = rand(zdim,1); f = @(A,B,D) SGME_BXE(A,B,D,plo,wt,wn,tar); testBackprop(f,{A,B,D}); end
github
bsxfan/meta-embeddings-master
plotGaussian.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/plotGaussian.m
1,323
utf_8
16ea9cd804af31a79f3ccd3cf5687a49
function tikz = plotGaussian(mu,C,colr,c) if nargin==0 test_this(); return; end if isempty(C) %assume mu is a GME [mu,C] = mu.get_mu_cov(); end [V,D] = eig(C); v1 = V(:,1); v2 = V(:,2); if all(v1>=0) r1 = sqrt(D(1,1)); r2 = sqrt(D(2,2)); rotate = acos(v1(1))*180/pi; elseif all(-v1>=0) r1 = sqrt(D(1,1)); r2 = sqrt(D(2,2)); rotate = acos(-v1(1))*180/pi; elseif all(v2>=0) r1 = sqrt(D(2,2)); r2 = sqrt(D(1,1)); rotate = acos(v2(1))*180/pi; else r1 = sqrt(D(2,2)); r2 = sqrt(D(1,1)); rotate = acos(-v2(1))*180/pi; end if ~isempty(colr) tikz = sprintf('\\draw[rotate around ={%4.3g:(%4.3g,%4.3g)},%s] (%4.3g,%4.3g) ellipse [x radius=%4.3g, y radius=%4.3g];\n',rotate,mu(1),mu(2),colr,mu(1),mu(2),r1,r2); fprintf('%s',tikz); end theta = (0:100)*2*pi/100; circle = [cos(theta);sin(theta)]; ellipse = bsxfun(@plus,mu,V*sqrt(D)*circle); plot(ellipse(1,:),ellipse(2,:),c); end function test_this close all; %B = 2*eye(2) + ones(2); B = 2*eye(2) + [1,-1;-1,1]; mu = [1;2]; figure;hold; axis('equal'); axis('square'); plotGaussian(mu,B,'blue','b') end
github
bsxfan/meta-embeddings-master
create_HTPLDA_extractor.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/create_HTPLDA_extractor.m
5,994
utf_8
90dafde6ab2c52c45680a2fece86f0f9
function HTPLDA = create_HTPLDA_extractor(F,nu,W) if nargin==0 test_PsL(); %test_this(); return; end [rdim,zdim] = size(F); assert(rdim>zdim); nu_prime = nu + rdim - zdim; if ~exist('W','var') || isempty(W) W = speye(rdim); end E = F.'*W*F; G = W - W*F*(E\F.')*W; SGME = create_SGME_calculator(E); V = SGME.V; % E = VDV' VFW = V.'*F.'*W; HTPLDA.extractSGMEs = @extractSGMEs; HTPLDA.SGME = SGME; HTPLDA.plot_database = @plot_database; HTPLDA.getPHd = @getPHd; function [A,b,d] = extractSGMEs(R,precisions) q = sum(R.*(G*R),1); if ~exist('precisions','var') || isempty(precisions) b = nu_prime./(nu+q); else b = precisions; end A = bsxfun(@times,b,VFW*R); d = SGME.d; end matlab_colours = {'r','g','b','m','c','k',':r',':g',':b',':m',':c',':k'}; tikz_colours = {'red','green','blue','magenta','cyan','black','red, dotted','green, dotted','blue, dotted','magenta, dotted','cyan, dotted','black, dotted'}; function plot_database(R,labels,Z) assert(max(labels) <= length(matlab_colours),'not enough colours to plot all speakers'); [A,b] = extractSGMEs(R); %SGME.plotAll(A,b,matlab_colours(labels), tikz_colours(labels)); SGME.plotAll(A,b,matlab_colours(labels), []); if exist('Z','var') && ~isempty(Z) for i=1:size(Z,2) plot(Z(1,i),Z(2,i),[matlab_colours{i},'*']); end end end end function test_this() zdim = 2; xdim = 20; %required: xdim > zdim nu = 3; %required: nu >= 1, integer, DF fscal = 3; %increase fscal to move speakers apart F = randn(xdim,zdim)*fscal; HTPLDA = create_HTPLDA_extractor(F,nu); SGME = HTPLDA.SGME; %labels = [1,2,2]; %[R,Z,precisions] = sample_HTPLDA_database(nu,F,labels); n = 8; m = 5; %prior = create_PYCRP(0,[],m,n); prior = create_PYCRP([],0,m,n); [R,Z,precisions,labels] = sample_HTPLDA_database(nu,F,prior,n); fprintf('there are %i speakers\n',max(labels)); [A,b] = HTPLDA.extractSGMEs(R); rotate = true; [Ap,Bp] = SGME.SGME2GME(A,b,rotate); close all; figure;hold; plotGaussian(zeros(zdim,1),eye(zdim),'black, dashed','k--'); %matlab_colours = {'b','r','r'}; %tikz_colours = {'blue','red','red'}; %SGME.plotAll(A,b,matlab_colours, tikz_colours, rotate); HTPLDA.plot_database(R,labels,Z); axis('square');axis('equal'); calc1 = create_partition_posterior_calculator(SGME.log_expectations,prior,labels); calc2 = create_pseudolikelihood_calculator(SGME.log_expectations,prior,labels); calc3 = create_BXE_calculator(SGME.log_expectations,[],labels); scale = exp(-5:0.1:5); MCL = zeros(size(scale)); PsL = zeros(size(scale)); slowPsL = zeros(size(scale)); BXE = zeros(size(scale)); tic; for i=1:length(scale) MCL(i) = - calc1.logPostPoi(scale(i)*A,scale(i)*b); end toc tic; for i=1:length(scale) BXE(i) = calc3.BXE(scale(i)*A,scale(i)*b); end toc tic; for i=1:length(scale) slowPsL(i) = - calc2.slow_log_pseudo_likelihood(scale(i)*A,scale(i)*b); end toc tic; for i=1:length(scale) PsL(i) = - calc2.log_pseudo_likelihood(scale(i)*A,scale(i)*b); end toc figure; %subplot(2,1,1);semilogx(scale,MCL);title('MCL') %subplot(2,1,2);semilogx(scale,PsL);title('PsL'); subplot(2,1,1);semilogx(scale,MCL,scale,slowPsL,scale,PsL,'--');legend('MCL','slowPsL','PsL'); subplot(2,1,2);semilogx(scale,BXE);legend('BXE'); %[precisions;b] %[plain_GME_log_expectations(Ap,Bp);SGME.log_expectations(A,b)] end function test_PsL() zdim = 2; xdim = 20; %required: xdim > zdim nu = 3; %required: nu >= 1, integer, DF fscal = 3; %increase fscal to move speakers apart F = randn(xdim,zdim)*fscal; HTPLDA = create_HTPLDA_extractor(F,nu); SGME = HTPLDA.SGME; n = 1000; m = 100; %prior = create_PYCRP(0,[],m,n); prior = create_PYCRP([],0,m,n); [R,Z,precisions,labels] = sample_HTPLDA_database(nu,F,prior,n); fprintf('there are %i speakers\n',max(labels)); [A,b] = HTPLDA.extractSGMEs(R); rotate = true; [Ap,Bp] = SGME.SGME2GME(A,b,rotate); close all; if zdim==2 && max(labels)<=12 figure;hold; plotGaussian(zeros(zdim,1),eye(zdim),'black, dashed','k--'); HTPLDA.plot_database(R,labels,Z); axis('square');axis('equal'); end tic;calc0 = create_pseudolikelihood_calculator_old(SGME.log_expectations,prior,labels);toc tic;calc1 = create_pseudolikelihood_calculator(SGME.log_expectations,prior,labels);toc; tic;calc2 = create_BXE_calculator(SGME.log_expectations,[],labels);toc scale = exp(-5:0.1:5); oldPsL = zeros(size(scale)); PsL = zeros(size(scale)); BXE = zeros(size(scale)); % tic; % for i=1:length(scale) % slowPsL(i) = - calc1.slow_log_pseudo_likelihood(scale(i)*A,scale(i)*b); % end % toc tic; for i=1:length(scale) oldPsL(i) = - calc0.log_pseudo_likelihood(scale(i)*A,scale(i)*b); end toc tic; for i=1:length(scale) PsL(i) = - calc1.log_pseudo_likelihood(scale(i)*A,scale(i)*b); end toc % tic; % for i=1:length(scale) % BXE(i) = calc2.BXE(scale(i)*A,scale(i)*b); % end % toc figure; subplot(2,1,1);semilogx(scale,oldPsL,scale,PsL,'r--');legend('oldPsL','PsL'); subplot(2,1,2);semilogx(scale,BXE);title('BXE'); %[precisions;b] %[plain_GME_log_expectations(Ap,Bp);SGME.log_expectations(A,b)] end
github
bsxfan/meta-embeddings-master
SGME_MXE2.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_MXE2.m
1,787
utf_8
353320c477be13a9cd785ec811fdd210
function [y,back] = SGME_MXE2(A,B,D,As,Bs,labels,logPrior) if nargin==0 test_this(); return; end dA = zeros(size(A)); dB = zeros(size(B)); dD = zeros(size(D)); dAs = zeros(size(As)); dBs = zeros(size(Bs)); [LEs,back2] = SGME_logexpectation(As,Bs,D); dLEs = zeros(size(LEs)); m = length(LEs); % #speakers n = size(A,2); % #recordings scal = 1/(n*log(m)); y = 0; for j=1:n AA = bsxfun(@plus,As,A(:,j)); BB = bsxfun(@plus,Bs,B(:,j)); [LEboth,back3] = SGME_logexpectation(AA,BB,D); logPost = logPrior + LEboth.' - LEs.'; [yj,back4] = sumlogsoftmax(logPost,labels(j)); y = y - yj; dlogPost = back4(-1); dLEs = dLEs - dlogPost.'; dLEboth = dlogPost.'; [dAA,dBB,dDj] = back3(dLEboth); dD = dD + dDj; dAs = dAs + dAA; dBs = dBs + dBB; dA(:,j) = sum(dAA,2); dB(:,j) = sum(dBB,2); end y = y*scal; back = @(dy) back_this(dy,dA,dB,dD,dAs,dBs); function [dA,dB,dD,dAs,dBs] = back_this(dy,dA,dB,dD,dAs,dBs) %[LEs,back2] = SGME_logexpectation(As,Bs,D).'; [dAs2,dBs2,dD2] = back2(dLEs); dA = (dy*scal) * dA; dB = (dy*scal) * dB; dD = (dy*scal) * (dD + dD2); dAs = (dy*scal) * (dAs + dAs2); dBs = (dy*scal) * (dBs + dBs2); end end function test_this() m = 3; n = 5; dim = 2; A = randn(dim,n); As = randn(dim,m); B = rand(1,n); Bs = rand(1,m); D = rand(dim,1); logPrior = randn(m,1); labels = randi(m,1,n); f = @(A,B,D,As,Bs) SGME_MXE2(A,B,D,As,Bs,labels,logPrior); testBackprop(f,{A,B,D,As,Bs}); end
github
bsxfan/meta-embeddings-master
asEig.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/asEig.m
2,397
utf_8
c32c0c7a9a0290a98e03a32ad9077c80
function CA = asEig(A) if nargin==0 test_this(); return; end if isreal(A) [V,D] = eig(A); %V*D*V' = A D = diag(D); r = true; else [L,U] = lu(A); % LU = A r = false; end dim = size(A,1); CA.logdet = @logdet; CA.solve = @solve; function [y,back] = logdet() if r y = sum(log(D)); else y = sum(log(diag(U).^2))/2; end back = @(dy) solve(dy*speye(dim)); end function [Y,back] = solve(RHS) if r Y = V*bsxfun(@ldivide,D,V.'*RHS); else Y = U\(L\RHS); end back = @(dY) back_solve(dY,Y); end function Y = solveT(RHS) %A'\RHS, for LU case Y = L.'\(U.'\RHS); end function [dRHS,dA] = back_solve(dY,Y) if r dRHS = solve(dY); if nargout >= 2 dA = (-dRHS)*Y.'; end else dRHS = solveT(dY); if nargout >= 2 dA = (-dRHS)*Y.'; end end end end function [y,back] = logdettestfun(A) CA = asEig(A*A.'); [y,back1] = CA.logdet(); sym = @(DY) DY + DY.'; back =@(dy) sym(back1(dy))*A; end function [Y,back] = solvetestfun(RHS,A) CA = asEig(A*A.'); [Y,back1] = CA.solve(RHS); back =@(dY) back_solvetestfun(dY); function [dRHS,dA] = back_solvetestfun(dY) [dRHS,dAA] = back1(dY); dA = (dAA+dAA.')*A; end end function test_this() fprintf('Test function values:\n'); dim = 5; RHS = rand(dim,1); A = randn(dim);A = A*A'; CA = asEig(A); [log(det(A)),CA.logdet()] [A\RHS,CA.solve(RHS)] A = complex(randn(dim),zeros(dim)); CA = asEig(A); [log(abs(det(A))),CA.logdet()] [A\RHS,CA.solve(RHS)] A = randn(dim,2*dim);A = A*A'; fprintf('\n\n\nTest logdet backprop (complex step) :\n'); testBackprop(@logdettestfun,A); fprintf('\n\n\nTest logdet backprop (real step) :\n'); testBackprop_rs(@logdettestfun,A,1e-4); fprintf('\n\n\nTest solve backprop (complex step) :\n'); testBackprop(@solvetestfun,{RHS,A},{1,1}); fprintf('\n\n\nTest solve backprop (real step) :\n'); testBackprop_rs(@solvetestfun,{RHS,A},1e-4,{1,1}); end
github
bsxfan/meta-embeddings-master
SGME_train2.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_train2.m
2,448
utf_8
e9d4f6112d2c067b5d1fb88a8d941e38
function model = SGME_train2(R,labels,nu,zdim,reg_weight,niters,test) if nargin==0 test_this(); return; end [rdim,n] = size(R); m = max(labels); blocks = sparse(labels,1:n,true,m+1,n); num = find(blocks(:)); %Can we choose maximum likelihood prior parameters, given labels? %For now: prior expected number of speakers = m prior = create_PYCRP([],0,m,n); logPrior = prior.GibbsMatrix(labels); delta = rdim - zdim; assert(delta>0); %initialize T0 = randn(rdim,rdim); F0 = randn(rdim,zdim); H0 = randn(rdim-zdim,rdim); szT = numel(T0); szF = numel(F0); szH = numel(H0); w0 = pack(T0,F0,H0); if exist('test','var') && test testBackprop(@objective,w0); return; end mem = 20; stpsz0 = 1e-3; timeout = 5*60; w = L_BFGS(@objective,w0,niters,timeout,mem,stpsz0); [T,F,H] = unpack(w); d = diag(F.'*F); % model.logexpectation = @(A,b) SGME_logexpectation(A,b,d); % model.extract = @(R) SGME_extr(T,F,nu,R); % model.objective = @(T,F) objective(pack(T,F)); % model.d = d; model = create_HTPLDA_SGME_backend2(nu,T,F,H); function w = pack(T,F,H) w = [T(:);F(:);H(:)]; end function [T,F,H] = unpack(w) at = 1:szT; T = reshape(w(at),rdim,rdim); at = szT + (1:szF); F = reshape(w(at),rdim,zdim); at = szT + szF + (1:szH); H = reshape(w(at),rdim-zdim,rdim); end function [y,back] = objective(w) [T,F,H] = unpack(w); [A,b,d,reg,back1] = SGME_extr(T,F,H,nu,R); [PsL,back2] = SGME_logPsL(A,b,d,blocks,labels,num,logPrior); y = reg_weight*reg - PsL; back = @back_this; function [dw] = back_this(dy) %dPsL = -dy; [dA,db,dd] = back2(-dy); [dT,dF,dH] = back1(dA,db,dd,reg_weight*dy); dw = pack(dT,dF,dH); end end end function test_this() zdim = 2; rdim = 4; n = 5; m = 3; prior = create_PYCRP([],0,m,n); labels = prior.sample(n); nu = pi; R = randn(rdim,n); reg_weight = exp(1); test = true; niters = []; SGME_train2(R,labels,nu,zdim,reg_weight,niters,test); end
github
bsxfan/meta-embeddings-master
SGME_extr_full_slightly_slower.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_extr_full_slightly_slower.m
1,645
utf_8
6e09f32760de243166ccff8266563fe0
function [A,b,B0,back] = SGME_extr_full_slightly_slower(T,F,nu,R) if nargin==0 test_this(); return; end [rdim,zdim] = size(F); nuprime = nu + rdim - zdim; TR = T*R; A0 = F.'*TR; B0 = F.'*F; S = B0\A0; den = nu + sum(TR.^2,1) - sum(A0.*S,1); b = nuprime ./ den; A = bsxfun(@times,b,A0); back = @back_this; function [dT,dF] = back_this(dA,db,dB0) %A = bsxfun(@times,b,A0) db = db + sum(dA.*A0,1); dA0 = bsxfun(@times,b,dA); %b = nuprime ./ den dden = -(db.*b)./den; %den = nu + sum(TR.^2,1) - sum(A0.*S,1) dTR = bsxfun(@times,(2*dden),TR); dA0 = dA0 - bsxfun(@times,dden,S); dS = -bsxfun(@times,dden,A0); %S = B0\A0 dA0_2 = B0\dS; dA0 = dA0 + dA0_2; dB0 = dB0 - dA0_2*S.'; %B0 = F.'*F dF = F*(dB0+dB0.'); %A0 = F.'*TR dF = dF + TR*dA0.'; dTR = dTR + F*dA0; %TR = T*R dT = dTR*R.'; end end function test_this() zdim = 2; rdim = 5; n = 4; F = randn(rdim,zdim); T = randn(rdim,rdim); nu = pi; R = randn(rdim,n); f = @(T,F) SGME_extr_full_slightly_slower(T,F,nu,R); testBackprop_multi(f,3,{T,F},{1,1}); end
github
bsxfan/meta-embeddings-master
SGME_train_MXE2.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_train_MXE2.m
2,510
utf_8
b71a75273c325f1e45edf8af7e971f30
function model = SGME_train_MXE2(R,labels,nu,zdim,niters,timeout,test) if nargin==0 test_this(); return; end [rdim,n] = size(R); m = max(labels); blocks = sparse(labels,1:n,true,m,n); counts = sum(blocks,2); logPrior = log(counts); delta = rdim - zdim; assert(delta>0); %initialize P0 = randn(zdim,rdim); H0 = randn(delta,rdim); sqrtd0 = rand(zdim,1); As0 = randn(zdim,m); sqrtBs0 = randn(1,m); szP = numel(P0); szH = numel(H0); szd = numel(sqrtd0); szAs = numel(As0); szBs = numel(sqrtBs0); w0 = pack(P0,H0,sqrtd0,As0,sqrtBs0); if exist('test','var') && test testBackprop(@objective,w0); return; end mem = 20; stpsz0 = 1e-3; %timeout = 5*60; w = L_BFGS(@objective,w0,niters,timeout,mem,stpsz0); [P,H,sqrtd,As,sqrtBs] = unpack(w); d = sqrtd.^2; model.logexpectation = @(A,b) SGME_logexpectation(A,b,d); model.extract = @(R) SGME_extract(P,H,nu,R); model.d = d; function w = pack(P,H,d,As,Bs) w = [P(:);H(:);d(:);As(:);Bs(:)]; end function [P,H,d,As,Bs] = unpack(w) at = 1:szP; P = reshape(w(at),zdim,rdim); at = szP + (1:szH); H = reshape(w(at),delta,rdim); at = szP + szH + (1:szd); d = w(at); at = szP + szH + szd + (1:szAs); As = reshape(w(at),zdim,m); at = szP + szH + szd + szAs + (1:szBs); Bs = w(at).'; end function [y,back] = objective(w) [P,H,sqrtd,As,sqrtBs] = unpack(w); [A,b,back1] = SGME_extract(P,H,nu,R); d = sqrtd.^2; Bs = sqrtBs.^2; [y,back2] = SGME_MXE2(A,b,d,As,Bs,labels,logPrior); back = @back_this; function [dw] = back_this(dy) [dA,db,dd,dAs,dBs] = back2(dy); dsqrtd = 2*sqrtd.*dd; dsqrtBs = 2*sqrtBs.*dBs; [dP,dH] = back1(dA,db); dw = pack(dP,dH,dsqrtd,dAs,dsqrtBs); end end end function test_this() zdim = 2; rdim = 4; n = 5; m = 3; prior = create_PYCRP([],0,m,n); labels = prior.sample(n); nu = pi; R = randn(rdim,n); test = true; niters = []; timeout = []; SGME_train_MXE2(R,labels,nu,zdim,niters,timeout,test); end
github
bsxfan/meta-embeddings-master
asChol.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/asChol.m
2,365
utf_8
ea86b12ae1d2edfe698ac2881861b35f
function CA = asChol(A) if nargin==0 test_this(); return; end if isreal(A) C = chol(A); %C'C = A r = true; else [L,U] = lu(A); % LU = A r = false; end dim = size(A,1); CA.logdet = @logdet; CA.solve = @solve; function [y,back] = logdet() if r y = 2*sum(log(diag(C))); else y = sum(log(diag(U).^2))/2; end back = @(dy) solve(dy*speye(dim)); end function [Y,back] = solve(RHS) if r Y = C\(C'\RHS); else Y = U\(L\RHS); end back = @(dY) back_solve(dY,Y); end function Y = solveT(RHS) %A'\RHS, for LU case Y = L.'\(U.'\RHS); end function [dRHS,dA] = back_solve(dY,Y) if r dRHS = solve(dY); if nargout >= 2 dA = (-dRHS)*Y.'; end else dRHS = solveT(dY); if nargout >= 2 dA = (-dRHS)*Y.'; end end end end function [y,back] = logdettestfun(A) CA = asChol(A*A.'); [y,back1] = CA.logdet(); sym = @(DY) DY + DY.'; back =@(dy) sym(back1(dy))*A; end function [Y,back] = solvetestfun(RHS,A) CA = asChol(A*A.'); [Y,back1] = CA.solve(RHS); back =@(dY) back_solvetestfun(dY); function [dRHS,dA] = back_solvetestfun(dY) [dRHS,dAA] = back1(dY); dA = (dAA+dAA.')*A; end end function test_this() fprintf('Test function values:\n'); dim = 5; RHS = rand(dim,1); A = randn(dim);A = A*A'; CA = asChol(A); [log(det(A)),CA.logdet()] [A\RHS,CA.solve(RHS)] A = complex(randn(dim),zeros(dim)); CA = asChol(A); [log(abs(det(A))),CA.logdet()] [A\RHS,CA.solve(RHS)] A = randn(dim,2*dim);A = A*A'; fprintf('\n\n\nTest logdet backprop (complex step) :\n'); testBackprop(@logdettestfun,A); fprintf('\n\n\nTest logdet backprop (real step) :\n'); testBackprop_rs(@logdettestfun,A,1e-4); fprintf('\n\n\nTest solve backprop (complex step) :\n'); testBackprop(@solvetestfun,{RHS,A},{1,1}); fprintf('\n\n\nTest solve backprop (real step) :\n'); testBackprop_rs(@solvetestfun,{RHS,A},1e-4,{1,1}); end
github
bsxfan/meta-embeddings-master
SGME_logPsL.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SGME_logPsL.m
4,123
utf_8
ab3281c77517b744131e5f2929860d04
function [y,back] = SGME_logPsL(A,B,d,blocks,poi,num,logPrior) assert(isreal(A)); assert(isreal(B)); assert(isreal(d)); assert(isreal(logPrior)); if nargin==0 test_this(); return; end if isempty(blocks) m = max(poi); n = length(poi); blocks = sparse(poi,1:n,true,m+1,n); num = find(blocks(:)); else m = size(blocks,1) - 1; end if isstruct(logPrior) % then it is prior prior = logPrior; logPrior = prior.GibbsMatrix(poi); end At = A*blocks.'; Bt = B*blocks.'; [LEt,back1] = SGME_logexpectation(At,Bt,d); [LEc,back2] = SGME_logexpectation(A,B,d); Amin = At(:,poi) - A; Bmin = Bt(:,poi) - B; [LEmin,back3] = SGME_logexpectation(Amin,Bmin,d); LLR = zeros(size(blocks)); for i=1:m tar = full(blocks(i,:)); LLR(i,tar) = LEt(i) - LEmin(tar) - LEc(tar); non = ~tar; Aplus = bsxfun(@plus,A(:,non),At(:,i)); Bplus = bsxfun(@plus,B(:,non),Bt(:,i)); LLR(i,non) = SGME_logexpectation(Aplus,Bplus,d) - LEt(i) - LEc(non); end %y = LLR; [y,back5] = sumlogsoftmax(LLR + logPrior,num); assert(isreal(y)); back = @back_this; function [dA,dB,dd] = back_this(dy) dA = zeros(size(A)); dB = zeros(size(B)); dd = zeros(size(d)); dLEt = zeros(size(LEt)); dLEmin = zeros(size(LEmin)); dLEc = zeros(size(LEmin)); dAt = zeros(size(At)); dBt = zeros(size(Bt)); %[y,back5] = sumlogsoftmax(LLR + logPrior,num); dLLR = back5(dy); for k=1:m tar = full(blocks(k,:)); %LLR(k,tar) = LEt(k) - LEmin(tar) - LEc(tar); row = dLLR(k,tar); dLEt(k) = dLEt(k) + sum(row); dLEmin(tar) = dLEmin(tar) - row; dLEc(tar) = dLEc(tar) - row; non = ~tar; Aplus = bsxfun(@plus,A(:,non),At(:,k)); Bplus = bsxfun(@plus,B(:,non),Bt(:,k)); %LLR(k,non) = SGME_logexpectation(Aplus,Bplus,d) - LEt(k) - LEc(non); [~,back4] = SGME_logexpectation(Aplus,Bplus,d); row = dLLR(k,non); [dAplus,dBplus,dd4] = back4(row); dLEt(k) = dLEt(k) - sum(row); dLEc(non) = dLEc(non) - row; dd = dd + dd4; dA(:,non) = dA(:,non) + dAplus; dB(:,non) = dB(:,non) + dBplus; dAt(:,k) = dAt(:,k) + sum(dAplus,2); dBt(:,k) = dBt(:,k) + sum(dBplus,2); end %[LEmin,back3] = SGME_logexpectation(Amin,Bmin,d); [dAmin,dBmin,dd3] = back3(dLEmin); dd = dd + dd3; %Amin = At(:,poi) - A; %Bmin = Bt(:,poi) - B; dA = dA - dAmin; dB = dB - dBmin; dAt = dAt + dAmin*blocks.'; dBt = dBt + dBmin*blocks.'; %[LEc,back2] = SGME_logexpectation(A,B,d); [dA2,dB2,dd2] = back2(dLEc); dA = dA + dA2; dB = dB + dB2; dd = dd + dd2; %[LEt,back1] = SGME_logexpectation(At,Bt,d); [dAt1,dBt1,dd1] = back1(dLEt); dAt = dAt + dAt1; dBt = dBt + dBt1; dd = dd + dd1; %At = A*blocks.'; %Bt = B*blocks.'; dA = dA + dAt*blocks; dB = dB + dBt*blocks; assert(isreal(dA)); assert(isreal(dB)); assert(isreal(dd)); end end function test_this() em = 4; n = 7; dim = 2; prior = create_PYCRP([],0,em,n); poi = prior.sample(n); m = max(poi); blocks = sparse(poi,1:n,true,m+1,n); num = find(blocks(:)); logPrior = prior.GibbsMatrix(poi); d = rand(dim,1); A = randn(dim,n); b = rand(1,n); %f = @(A,b,d) SGME_logexpectation(A,b,d); %testBackprop(f,{A,b,d},{1,1,1}); g = @(A,b,d) SGME_logPsL(A,b,d,blocks,poi,num,logPrior); testBackprop(g,{A,b,d},{1,1,1}); end
github
bsxfan/meta-embeddings-master
sumlogsoftmax.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/sumlogsoftmax.m
517
utf_8
5591b4f9a440f97900ac26aefd1faf62
function [y,back] = sumlogsoftmax(X,num) if nargin==0 test_this(); return; end [den,back1] = sumlogsumexp(X); y = sum(X(num)) - den; back = @back_this; function dX = back_this(dy) dX = back1(-dy); dX(num) = dX(num) + dy; end end function test_this() m = 3; n = 5; X = randn(m,n); labels = randi(m,1,n); num = sub2ind(size(X),labels,1:n); testBackprop(@(X)sumlogsoftmax(X,num),X); end
github
bsxfan/meta-embeddings-master
create_SGME_calculator.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/create_SGME_calculator.m
3,098
utf_8
22c43d447699e600cb1e2c8a1f4c4a2d
function [SGME,LEfun] = create_SGME_calculator(E) if nargin==0 test_this(); return; end [V,D] = eig(E); % E = VDV' d = diag(D); % eigenvalues dd = zeros(size(d)); %gradient w.r.t. d backpropagated from log_expectations zdim = length(d); ii = reshape(logical(eye(zdim)),[],1); SGME.SGME2GME = @SGME2GME; SGME.log_expectations = @log_expectations; SGME.logLR = @logLR; SGME.plotAll = @plotAll; SGME.V = V; SGME.d = d; LEfun = @LE; SGME.reset_parameter_gradient = @reset_parameter_gradient; SGME.get_parameter_gradient = @get_parameter_gradient; function reset_parameter_gradient() dd(:) = 0; end function dd1 = get_parameter_gradient() dd1 = dd; end function plotAll(A,b,matlab_colours, tikz_colours, rotate) if ~exist('rotate','var') || isempty(rotate) rotate = true; end if ~exist('tikz_colours','var') tikz_colours = []; end [A,B] = SGME2GME(A,b,rotate); n = length(b); for i=1:n Bi = reshape(B(:,i),zdim,zdim); mu = Bi\A(:,i); if ~isempty(tikz_colours) plotGaussian(mu,inv(Bi),tikz_colours{i},matlab_colours{i}); else plotGaussian(mu,inv(Bi),[],matlab_colours{i}); end end end function [A,B] = SGME2GME(A,b,rotate) B = zeros(zdim*zdim,length(b)); B(ii,:) = bsxfun(@times,b,d); if ~exist('rotate','var') || isempty(rotate) || rotate %rotate by default A = V*A; for j = 1:size(B,2) BR = V*reshape(B(:,j),zdim,zdim)*V.'; B(:,j) = BR(:); end end end function [y,back] = log_expectations(A,b) [y,back0] = LE(A,b,d); back = @back_this; function [dA,db] = back_this(dy) [dA,db,dd0] = back0(dy); dd = dd + dd0; end end function Y = logLR(left,right) B = bsxfun(@plus,left.b.',right.b); [m,n] = size(B); Y = zeros(m,n); for i=1:m AA = bsxfun(@plus,left.A(:,i),right.A); Y(i,:) = log_expectations(AA,B(i,:)); end end end function [y,back] = LE(A,b,d) bd = bsxfun(@times,b,d); logdets = sum(log1p(bd),1); den = 1 + bd; Aden = A./den; Q = sum(A.*Aden,1); %Q = sum((A.^2)./den,1); y = (Q-logdets)/2; back = @back_LE; function [dA,db,dd] = back_LE(dy) dQ = dy/2; %dlogdets = - dQ; dAden = bsxfun(@times,dQ,A); dA = bsxfun(@times,dQ,Aden); dA2 = dAden./den; dA = dA + dA2; dden = -Aden.*dA2; dbd = dden - bsxfun(@rdivide,dQ,den); %dlogdets = -dQ db = d.' * dbd; dd = dbd * b.'; end end function test_this() m = 3; n = 5; A = randn(m,n); b = rand(1,n); d = rand(m,1); testBackprop(@LE,{A,b,d},{1,1,1}); end
github
bsxfan/meta-embeddings-master
logsumexp.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/logsumexp.m
456
utf_8
ba0f6dd080d4fa7a7cd270a5055c5980
function [y,back] = logsumexp(X) if nargin==0 test_this(); return; end mx = max(X,[],1); y = bsxfun(@plus,log(sum(exp(bsxfun(@minus,X,mx)),1)),mx); back = @back_this; function dX = back_this(dy) dX = bsxfun(@times,dy,exp(bsxfun(@minus,X,y))); end end function test_this() m = 3; n = 5; X = randn(m,n); testBackprop(@(X)logsumexp(X),X); end
github
bsxfan/meta-embeddings-master
create_GPLDA_extractor.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/create_GPLDA_extractor.m
710
utf_8
2bd9a131a04f720c722e16dd00900f44
function PLDA = create_GPLDA_extractor(F,W) if nargin==0 test_this(); return; end [rdim,zdim] = size(F); assert(rdim>zdim); if ~exist('W','var') || isempty(W) W = speye(rdim); end E = F.'*W*F; SGME = create_SGME_calculator(E); V = SGME.V; % E = VDV' VFW = V.'*F.'*W; PLDA.extractSGMEs = @extractSGMEs; PLDA.SGME = SGME; PLDA.getPd = @getPd; function [P,d] = getPd() P = VFW; d = SGME.d; end function [A,b] = extractSGMEs(R) b = ones(1,size(R,2)); A = VFW*R; end end function test_this() error('test_this not implemented'); end
github
bsxfan/meta-embeddings-master
VB_vs_PsL_demo.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/VB_vs_PsL_demo.m
7,289
utf_8
0d789457d9783ea1df326fff1db09cb6
function VB_vs_PsL_demo % Demo and test code for VB training and SGME scoring of HT-PLDA model. % % Training and evaluation data are (independently) sampled from a model with % randomly generated data. A VB algorithm is used to estimate the parameters % of this model from the training data. The accuracy of the trained (VB) % model is compared (on both train and evaluation data) against the % (oracle) model that generated the data. % % The accuracy is given in terms of the calibration-sensitive binary cross % entropy (BXE) and (if available) also equal-error-rate EER. % % If the BOSARIS Toolkit (https://sites.google.com/site/bosaristoolkit/) is % available, BXE is shown not only for the 'raw' scores as given by this % model, but also for PAV-recalibrated scores, to give 'minBXE'. The latter % is what BXE could have been if calibration had been ideal. % Assemble model to generate data zdim = 2; %speaker identity variable size rdim = 20; %i-vector size. required: rdim > zdim nu = 3; %required: nu >= 1, integer, degrees of freedom for heavy-tailed channel noise fscal = 3; %increase fscal to move speakers apart % zdim = 100; %speaker identity variable size % rdim = 512; %i-vector size. required: rdim > zdim % nu = 3; %required: nu >= 1, integer, degrees of freedom for heavy-tailed channel noise % fscal = 1/100; %increase fscal to move speakers apart F = randn(rdim,zdim)*fscal; W = randn(rdim,rdim+1);W = W*W.'; W = (rdim/trace(W))*W; model1 = create_HTPLDA_SGME_backend(nu,F,W); %oracle model %Generate synthetic labels nspeakers = 1000; recordings_per_speaker = 10; N = nspeakers*recordings_per_speaker; ilabels = repmat(1:nspeakers,recordings_per_speaker,1); ilabels = ilabels(:).'; % integer speaker labels hlabels = sparse(ilabels,1:N,true,nspeakers,N); %speaker label matrix with one-hot columns %and some training data Z = randn(zdim,nspeakers); Train = F*Z*hlabels + sample_HTnoise(nu,rdim,N,W); %train fprintf('*** Training on %i i-vectors of %i speakers ***\n',N,nspeakers); niters = 10; % Weights can be used to change relative importance of subsets of the training data % weights = 1 + rand(1,N); %In practice, obviously not like this! This is just a quick and dirty test. % [model2,obj] = HTPLDA_SGME_train_VB(Train,hlabels,nu,zdim,niters,[],[],weights); [model2,obj] = HTPLDA_SGME_train_VB(Train,hlabels,nu,zdim,niters); close all; plot(obj);title('VB lower bound'); niters = 1000; reg_weight = 100; model3 = SGME_train2(Train,ilabels,nu,zdim,reg_weight,niters); %Generate independent evaluation data with new speakers nspeakers = 300; %Generate target speakers ntar = nspeakers; Ztar = randn(zdim,ntar); %and some single enrollment data for them Enroll1 = F*Ztar + sample_HTnoise(nu,rdim,ntar,W); %1 enrollment / speaker %and some double enrollments ne = 2; Flags = repmat(1:ntar,ne,1); Flags = sparse(Flags(:),1:ne*ntar,true,ntar,ne*ntar); Enroll2 = F*Ztar*Flags + sample_HTnoise(nu,rdim,ne*ntar,W); %2 enrollments / speaker %and some test data recordings_per_speaker = 10; N = nspeakers*recordings_per_speaker; ilabels = repmat(1:nspeakers,recordings_per_speaker,1); ilabels = ilabels(:).'; % integer speaker labels hlabels = sparse(ilabels,1:N,true,nspeakers,N); %speaker label matrix with one-hot columns Test = F*Ztar*hlabels + sample_HTnoise(nu,rdim,N,W); fprintf('\n\n*** Evaluation on %i target speakers with single/double enrollments and %i test recordings ***\n',nspeakers,N); useBOSARIS = exist('opt_loglr','file'); if useBOSARIS fprintf(' minBXE in brackets\n') BXE = zeros(3,2); minBXE = zeros(3,2); EER = zeros(3,2); Scores = model1.score_trials(Enroll1,[],Test); [BXE(1,1),minBXE(1,1),EER(1,1)] = calcBXE(Scores,hlabels); Scores = model1.score_trials(Enroll2,Flags,Test); [BXE(1,2),minBXE(1,2),EER(1,2)] = calcBXE(Scores,hlabels); Scores = model2.score_trials(Enroll1,[],Test); [BXE(2,1),minBXE(2,1),EER(2,1)] = calcBXE(Scores,hlabels); Scores = model2.score_trials(Enroll2,Flags,Test); [BXE(2,2),minBXE(2,2),EER(2,2)] = calcBXE(Scores,hlabels); Scores = model3.score_trials(Enroll1,[],Test); [BXE(3,1),minBXE(3,1),EER(3,1)] = calcBXE(Scores,hlabels); Scores = model3.score_trials(Enroll2,Flags,Test); [BXE(3,2),minBXE(3,2),EER(3,2)] = calcBXE(Scores,hlabels); fprintf('oracle: single enroll BXE = %g (%g), double enroll BXE = %g (%g)\n',BXE(1,1),minBXE(1,1),BXE(1,2),minBXE(1,2)); fprintf('VB : single enroll BXE = %g (%g), double enroll BXE = %g (%g)\n',BXE(2,1),minBXE(2,1),BXE(2,2),minBXE(2,2)); fprintf('PsL : single enroll BXE = %g (%g), double enroll BXE = %g (%g)\n',BXE(3,1),minBXE(3,1),BXE(3,2),minBXE(3,2)); fprintf('oracle: single enroll EER = %g, double enroll EER = %g\n',EER(1,1),EER(1,2)); fprintf('VB : single enroll EER = %g, double enroll EER = %g\n',EER(2,1),EER(2,2)); fprintf('PsL : single enroll EER = %g, double enroll EER = %g\n',EER(3,1),EER(3,2)); else % no BOSARIS tic BXE = zeros(2,2); Scores = model1.score_trials(Enroll1,[],Test); BXE(1,1) = calcBXE(Scores,hlabels); Scores = model1.score_trials(Enroll2,Flags,Test); BXE(1,2) = calcBXE(Scores,hlabels); Scores = model2.score_trials(Enroll1,[],Test); BXE(2,1) = calcBXE(Scores,hlabels); Scores = model2.score_trials(Enroll2,Flags,Test); BXE(2,2) = calcBXE(Scores,hlabels); toc fprintf('oracle: single enroll BXE = %g, double enroll BXE = %g\n',BXE(1,1),BXE(1,2)); fprintf('VB : single enroll BXE = %g, double enroll BXE = %g\n',BXE(2,1),BXE(2,2)); end end function [bxe,min_bxe,EER] = calcBXE(Scores,labels) % Binary cross-entropy, with operating point at target prior at true % proportion, normalized so that llr = 0 gives bxe = 1. tar = Scores(labels); non = Scores(~labels); ofs = log(length(tar)) - log(length(non)); bxe = mean([softplus(-tar - ofs).',softplus(non + ofs).']) / log(2); if nargout>=2 [tar,non] = opt_loglr(tar.',non.','raw'); tar = tar'; non = non.'; min_bxe = mean([softplus(-tar - ofs).',softplus(non + ofs).']) / log(2); end if nargout >=3 EER = eer(tar,non); end end function y = softplus(x) % y = log(1+exp(x)); y = x; f = find(x<30); y(f) = log1p(exp(x(f))); end function X = sample_HTnoise(nu,dim,n,W) % Sample n heavy-tailed dim-dimensional variables. (Only for integer nu.) % % Inputs: % nu: integer nu >=1, degrees of freedom of resulting t-distribution % n: number of samples % W: precision matrix for T-distribution % % Output: % X: dim-by-n samples cholW = chol(W); precisions = mean(randn(nu,n).^2,1); std = 1./sqrt(precisions); X = cholW\bsxfun(@times,std,randn(dim,n)); end
github
bsxfan/meta-embeddings-master
LinvSR.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/LinvSR.m
562
utf_8
cc684a75c7911f7d518550620e6b8fc7
function [Y,back] = LinvSR(L,S,R) if nargin==0 test_this(); return; end Z = S\R; Y = L*Z; back = @back_this; function [dL,dS,dR] = back_this(dY) % Y = L*Z dL = dY*Z.'; dZ = L.'*dY; % Z = S\R; dR = S.'\dZ; dS = -dR*Z.'; end end function test_this() m = 3; n = 4; L = randn(m,n); R = randn(n,m); S = randn(n,n); fprintf('test slow derivatives:\n'); testBackprop(@LinvSR,{L,S,R}); end
github
bsxfan/meta-embeddings-master
test_MLNDA4.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/test_MLNDA4.m
2,622
utf_8
506e855474d332d8f87a49df75807345
function test_MLNDA4() % Assemble model to generate data big = true; nu = inf; %required: nu >= 1, integer, degrees of freedom for heavy-tailed channel noise if ~big zdim = 2; %speaker identity variable size rdim = 20; %i-vector size. required: rdim > zdim fscal = 3; %increase fscal to move speakers apart else zdim = 100; %speaker identity variable size rdim = 512; %i-vector size. required: rdim > zdim fscal = 1/20; %increase fscal to move speakers apart end F = randn(rdim,zdim)*fscal; W = randn(rdim,2*rdim); W = W*W.';W = (rdim/trace(W))*W; %model1 = create_HTPLDA_SGME_backend(nu,F,W); %oracle model %Generate synthetic labels nspeakers = 1000; recordings_per_speaker = 10; N = nspeakers*recordings_per_speaker; ilabels = repmat(1:nspeakers,recordings_per_speaker,1); ilabels = ilabels(:).'; % integer speaker labels hlabels = sparse(ilabels,1:N,true,nspeakers,N); %speaker label matrix with one-hot columns %and some training data Z = randn(zdim,nspeakers); R = F*Z*hlabels + sample_HTnoise(nu,rdim,N,W); rank = 3; [f,fi,paramsz] = create_nice_Trans(rdim,rank); oracle = randn(paramsz,1); oracle(1) = sqrt(pi); r = randn(rdim,1); t = f(oracle,r); rr = fi(oracle,t); [r,t,rr] T = f(oracle,R); Rtrace = trace(F*F.'+W); Ttrace = sum(T(:).^2)/size(T,2); offset = mean(T,2); %params0 = [log(sqrt(Rtrace/Ttrace));randn(rdim*rank,1)/100;ones(rank,1);offset]; params0 = [sqrt(sqrt(Rtrace/Ttrace));randn(rdim*rank,1)/100;ones(rank,1);offset]; obj = @(params) MLNDAobj(T,hlabels,F,W,fi,params); obj_oracle = obj(oracle), obj_init = obj(params0), maxiters = 1000; timeout = 5*60; [trans,params] = train_ML_trans(F,W,T,hlabels,fi,params0,maxiters,timeout); obj_oracle = obj(oracle), obj_init = obj(params0), obj_final = obj(params), end function X = sample_HTnoise(nu,dim,n,W) % Sample n heavy-tailed dim-dimensional variables. (Only for integer nu.) % % Inputs: % nu: integer nu >=1, degrees of freedom of resulting t-distribution % n: number of samples % W: precision matrix for T-distribution % % Output: % X: dim-by-n samples cholW = chol(W); if isinf(nu) precisions = ones(1,n); else precisions = mean(randn(nu,n).^2,1); end std = 1./sqrt(precisions); X = cholW\bsxfun(@times,std,randn(dim,n)); end
github
bsxfan/meta-embeddings-master
test_MLNDA2.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/test_MLNDA2.m
2,499
utf_8
e3f3b9a0246c9e728b4717bef4a9f5cf
function test_MLNDA2() % Assemble model to generate data big = true; nu = inf; %required: nu >= 1, integer, degrees of freedom for heavy-tailed channel noise if ~big zdim = 2; %speaker identity variable size rdim = 20; %i-vector size. required: rdim > zdim fscal = 3; %increase fscal to move speakers apart else zdim = 100; %speaker identity variable size rdim = 512; %i-vector size. required: rdim > zdim fscal = 1/20; %increase fscal to move speakers apart end F = randn(rdim,zdim)*fscal; W = randn(rdim,2*rdim); W = W*W.';W = (rdim/trace(W))*W; %model1 = create_HTPLDA_SGME_backend(nu,F,W); %oracle model %Generate synthetic labels nspeakers = 100; recordings_per_speaker = 10; N = nspeakers*recordings_per_speaker; ilabels = repmat(1:nspeakers,recordings_per_speaker,1); ilabels = ilabels(:).'; % integer speaker labels hlabels = sparse(ilabels,1:N,true,nspeakers,N); %speaker label matrix with one-hot columns %and some training data Z = randn(zdim,nspeakers); R = F*Z*hlabels + sample_HTnoise(nu,rdim,N,W); rank = 3; [f,fi,paramsz] = create_sandwich_trans(rdim,rank); oracle = randn(paramsz,1); r = randn(rdim,1); t = f(oracle,r); rr = fi(oracle,t); [r,t,rr] T = f(oracle,R); Rtrace = trace(F*F.'+W); Ttrace = sum(T(:).^2)/size(T,2); sigma = 1; L = randn(rdim,rank)/100; RR = L.'; D = ones(rdim,1)*Ttrace/Rtrace; offset = mean(T,2); params0 = [sigma;L(:);RR(:);offset;D]; obj = @(params) MLNDAobj(T,hlabels,F,W,fi,params); obj_oracle = obj(oracle), obj_init = obj(params0), maxiters = 10000; timeout = 5*60; trans = train_ML_trans(F,W,T,hlabels,fi,params0,maxiters,timeout); obj_oracle = obj(oracle), end function X = sample_HTnoise(nu,dim,n,W) % Sample n heavy-tailed dim-dimensional variables. (Only for integer nu.) % % Inputs: % nu: integer nu >=1, degrees of freedom of resulting t-distribution % n: number of samples % W: precision matrix for T-distribution % % Output: % X: dim-by-n samples cholW = chol(W); if isinf(nu) precisions = ones(1,n); else precisions = mean(randn(nu,n).^2,1); end std = 1./sqrt(precisions); X = cholW\bsxfun(@times,std,randn(dim,n)); end
github
bsxfan/meta-embeddings-master
logdetNice.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/logdetNice.m
864
utf_8
3dbf0e9b2c0ef0c688e5d05ea7956379
function [logdet,back] = logdetNice(sigma,R,d) if nargin==0 test_this(); return; end RR = R.'*R; S = RR/sigma + diag(1./d); [L,U] = lu(S); dim = size(R,1); logdet = ( sum(log(diag(U).^2)) + sum(log(d.^2)) + dim*log(sigma^2) ) /2; back = @back_this; function [dsigma,dR,dd] = back_this(dlogdet) dS = dlogdet*(inv(U)/L).'; dd = dlogdet./d; dsigma = dim*dlogdet/sigma; dR = R*(dS + dS.')/sigma; dsigma = dsigma - (RR(:).'*dS(:))/sigma^2; dd = dd - diag(dS)./d.^2; end end function test_this() dim = 5; rank = 2; sigma = randn; R = randn(dim,rank); d = randn(rank,1); M = sigma*eye(dim) + R*diag(d)*R.'; [log(abs(det(M))),logdetNice(sigma,R,d)] testBackprop(@logdetNice,{sigma,R,d}) end
github
bsxfan/meta-embeddings-master
mvn_obj.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/mvn_obj.m
1,588
utf_8
d63d45665a8d9f7c779a316af8d9ceb1
function [y,back] = mvn_obj(T,fi,params) if nargin==0 test_this2(); return; end [R,logdetJ,back2] = fi(params,T); [llh,back1] = smvn_llh(R); y = logdetJ - llh; back = @back_this; function dparams = back_this(dy) dlogdetJ = dy; dR = back1(-dy); dparams = back2(dR,dlogdetJ); end end function test_this() dim = 5; n = 20; rank = 2; R = randn(dim,n); [f,fi,sz] = create_nice_Trans(dim,rank); params = randn(sz,1); T = f(params,R); Ri = fi(params,T); test_inverse = max(abs(Ri(:)-R(:))), testBackprop(@(params)mvn_obj(T,fi,params),{params}); end function test_this2() dim = 5; n = 10000; rank = 2; %[f,fi,sz] = create_nice_Trans(dim,rank); %[f,fi,sz] = create_linTrans2(dim); [f,fi,sz] = create_affineTrans2(dim); R = randn(dim,n); oracle = randn(sz,1)/10; oracle(1) = pi; T = f(oracle,R); %params0 = randn(sz,1)/10; %params0(1) = 1/sqrt(sum(T(:).^2)/n); mu = mean(T,2); C = cov(T.',1); M = eye(dim)/sqrt(trace(C)/dim); params0 = [M(:);mu]; %params0 = eye(dim)/sqrt(sum(T(:).^2)/n); %params0 = params0(:); obj = @(params) mvn_obj(T,fi,params); oracle_obj = obj(oracle), init_obj = obj(params0), mem = 20; stpsz0 = 1; [params,obj_final] = L_BFGS(obj,params0,100,2*60,20,1/100); init_obj = obj(params0), oracle_obj = obj(oracle), final_obj = obj(params), end
github
bsxfan/meta-embeddings-master
diag2full.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/diag2full.m
351
utf_8
292558cc287da6cff2303e230a2032c5
function [M,back] = diag2full(d) if nargin==0 test_this(); return; end dim = length(d); M = sparse(1:dim,1:dim,d,dim,dim); back = @back_this; function [dd] = back_this(dM) dd = diag(dM); end end function test_this() d = randn(5,1); testBackprop(@diag2full,{d}); end
github
bsxfan/meta-embeddings-master
smvn_llh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/smvn_llh.m
235
utf_8
77365e553286261a9c21061b658bc8a9
function [y,back] = smvn_llh(R) if nargin==0 test_this(); return; end y = (-0.5)*R(:).'*R(:); back = @(dy) (-dy)*R; end function test_this() R = randn(3,5); testBackprop(@smvn_llh,{R}); end
github
bsxfan/meta-embeddings-master
create_shiftTrans.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/create_shiftTrans.m
723
utf_8
bdf9737fc803c43ac9954fd9a59e4dd8
function [f,fi,paramsz] = create_shiftTrans(dim) if nargout==0 test_this(); return; end f = @f_this; fi = @fi_this; paramsz = dim; function T = f_this(P,R) T = bsxfun(@plus,P,R); end function [R,logdetJ,back] = fi_this(P,T) logdetJ = 0; R = bsxfun(@minus,T,P); back = @back_this; function [dP,dT] = back_this(dR,dlogdetJ) dP = -sum(dR,2); dT = dR; end end end function test_this() dim = 3; [f,fi,sz] = create_shiftTrans(dim); P = randn(sz,1); R = randn(dim,5); T = f(P,R); testBackprop_multi(fi,2,{P,T}); end
github
bsxfan/meta-embeddings-master
splda_adaptation_obj.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/splda_adaptation_obj.m
2,197
utf_8
68929c4f01703cb12cd542b7b824ee2f
function [y,back] = splda_adaptation_obj(newData,labels,oldF,oldW,params,num_new_Fcols,W_adj_rank,slow) if nargin==0 test_this(); return; end if ~exist('slow','var') slow = false; end [dim,Frank] = size(oldF); [Fcols,Fscal,Cfac] = unpack(params,dim,Frank,num_new_Fcols,W_adj_rank); [newF,newW,back1] = adaptSPLDA(Fcols,Fscal,Cfac,oldF,oldW); [llh,back2] = splda_llh_full(labels,newF,newW,newData,slow); y = -llh; back = @back_this; function dparams = back_this(dy) dllh = -dy; [dnewF,dnewW] = back2(dllh); [Fcols,Fscal,Cfac] = back1(dnewF,dnewW); dparams = [Fcols(:);Fscal(:);Cfac(:)]; end end function [Fcols,Fscal,Cfac] = unpack(params,dim,Frank,num_new_Fcols,W_adj_rank) at = 0; sz = dim*num_new_Fcols; Fcols = reshape(params(at+(1:sz)),dim,num_new_Fcols); at = at + sz; sz = Frank; Fscal = reshape(params(at+(1:sz)),1,Frank); at = at + sz; sz = dim*W_adj_rank; Cfac = reshape(params(at+(1:sz)),dim,W_adj_rank); at = at + sz; assert( at == length(params) ); end function test_this() dim = 20; Frank = 5; num_new_Fcols = 2; W_adj_rank = 3; n = 100; K = 15; newData = randn(dim,n); labels = sparse(randi(K,1,n),1:n,true,K,n); oldF = randn(dim,Frank); oldW = randn(dim,dim+1);oldW = oldW * oldW.'; Fcols = randn(dim,num_new_Fcols); Cfac = randn(dim,W_adj_rank); Fscal = randn(1,Frank); params = [Fcols(:);Fscal(:);Cfac(:)]; f_slow = @(params) splda_adaptation_obj(newData,labels,oldF,oldW,params,num_new_Fcols,W_adj_rank,true); f_fast = @(params) splda_adaptation_obj(newData,labels,oldF,oldW,params,num_new_Fcols,W_adj_rank,false); fprintf('test function value equality:\n'); delta = abs(f_slow(params)-f_fast(params)), fprintf('test slow derivatives:\n'); testBackprop(f_slow,params); [~,back] = f_slow(params); dparams = back(pi); [~,back] = f_fast(params); dparams = back(pi); fprintf('compare fast and slow derivatives:\n'); delta = max(abs(dparams-dparams)), end
github
bsxfan/meta-embeddings-master
create_iso_lowrank_trans.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/create_iso_lowrank_trans.m
1,971
utf_8
38ba025f97172778544cfbd48dd497e7
function [f,fi,paramsz,fe] = create_iso_lowrank_trans(dim,rank) if nargin==0 test_this(); return; end paramsz = 1 + 2*dim*rank + dim; [f0,fi0] = create_affineTrans(dim); f = @f_this; fi = @fi_this; fe = @expand; function T = f_this(P,R) Q = expand(P); T = f0(Q,R); end function [R,logdetJ,back] = fi_this(P,T) [Q,back1] = expand(P); [R,logdetJ,back2] = fi0(Q,T); back = @back_that; function [dP,dT] = back_that(dR,dlogsetJ) if nargout==2 [dQ,dT] = back2(dR,dlogsetJ); else dQ = back2(dR,dlogsetJ); end dP = back1(dQ); end end function [Q,back] = expand(P) at = 1; sz = 1; sigma = P(at); at = at + sz; sz = dim*rank; L = reshape(P(at:at+sz-1),dim,rank); at = at + sz; sz = dim*rank; R = reshape(P(at:at+sz-1),rank,dim); at = at + sz; sz = dim; offset = P(at:at+sz-1); at = at + sz; assert(at==length(P)+1); [LR,back1] = matmul(L,R); M = sigma*speye(dim) + LR; Q = [M(:);offset]; back = @back_this; function dP = back_this(dQ) dQ = reshape(dQ,dim,dim+1); doffset = dQ(:,end); dM = dQ(:,1:end-1); dsigma = trace(dM); [dL,dR] = back1(dM); dP = [dsigma;dL(:);dR(:);doffset]; end end end function test_this() dim = 5; rank = 2; n = 6; [f,fi,sz] = create_iso_lowrank_trans(dim,rank); P = randn(sz,1); R = randn(dim,n); T = f(P,R); testBackprop_multi(fi,2,{P,T}); %testBackprop(fe,{P}); end
github
bsxfan/meta-embeddings-master
logdetLU.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/logdetLU.m
393
utf_8
f89f69d0255592e7c1933492e7894c79
function [y,back] = logdetLU(M) if nargin==0 test_this(); return; end [L,U] = lu(M); y = sum(log(diag(U).^2))/2; back = @back_this; function dM = back_this(dy) %dM = dy*(inv(U)/L).'; dM = dy*(L.'\inv(U.')); end end function test_this() dim = 5; M = randn(dim); testBackprop(@logdetLU,M); end
github
bsxfan/meta-embeddings-master
splda_map_adaptation_obj.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/splda_map_adaptation_obj.m
2,841
utf_8
7240f1d5e773584fcf41dd21677665cb
function [y,back] = splda_map_adaptation_obj(newData,newLabels,... oldData,oldLabels,... old_weighting,... params,Frank,Wfac_numcols,slow) if nargin==0 test_this(); return; end error('derivatives are not working yet'); if ~exist('slow','var') slow = false; end [dim,~] = size(oldData); [F,Wfac] = unpack(params,dim,Frank,Wfac_numcols); W = Wfac*Wfac.'; [llh_old,back1] = splda_llh_full(oldLabels,F,W,oldData,slow); [llh_new,back2] = splda_llh_full(newLabels,F,W,newData,slow); y = -llh_new - old_weighting*llh_old; back = @back_this; function dparams = back_this(dy) % y = -llh_new - old_weighting*llh_old dllh_new = -dy; dllh_old = -old_weighting*dy; % [llh_new,back2] = splda_llh_full(newLabels,F,W,newData,slow) [dF,dW] = back2(dllh_new); % [llh_old,back1] = splda_llh_full(oldLabels,F,W,oldData,slow) [dF1,dW1] = back1(dllh_old); dF = dF + dF1; dW = dW + dW1; % W = Wfac*Wfac.' dWfac = 2*dW*Wfac; dparams = [dF(:);dWfac(:)]; end end function [F,Wfac] = unpack(params,dim,Frank,Wfac_numcols) at = 0; sz = dim*Frank; F = reshape(params(at+(1:sz)),dim,Frank); at = at + sz; sz = dim*Wfac_numcols; Wfac = reshape(params(at+(1:sz)),dim,Wfac_numcols); at = at + sz; assert( at == length(params) ); end function test_this() dim = 20; Frank = 5; Wfac_numcols = 25; Nold = 100; Kold = 15; Nnew = 50; Knew = 10; newData = randn(dim,Nnew); newLabels = sparse(randi(Knew,1,Nnew),1:Nnew,true,Knew,Nnew); oldData = randn(dim,Nold); oldLabels = sparse(randi(Kold,1,Nold),1:Nold,true,Kold,Nold); old_weighting = 1/4; F = randn(dim,Frank); Wfac = randn(dim,Wfac_numcols); params = [F(:);Wfac(:)]; f_slow = @(params) splda_map_adaptation_obj(newData,newLabels,... oldData,oldLabels,... old_weighting,... params,Frank,Wfac_numcols,true); f_fast = @(params) splda_map_adaptation_obj(newData,newLabels,... oldData,oldLabels,... old_weighting,... params,Frank,Wfac_numcols,false); fprintf('test function value equality:\n'); delta = abs(f_slow(params)-f_fast(params)), fprintf('test slow derivatives:\n'); testBackprop(f_slow,params); end
github
bsxfan/meta-embeddings-master
simulateSPLDA.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/simulateSPLDA.m
1,980
utf_8
ca9fb57a7960abc9195c92f2efad43a1
function [X,hlabels,F,W] = simulateSPLDA(big) % Inputs: % big: flag to make low or high-dimensional data, each with realistic, % single-digit EERs % % Outputs: % X: i-vectors, dim-by-N % hlabels: sparse label matrix, with one hot columns % F,W: SPLDA parameters % Assemble model to generate data nu = inf; %required: nu >= 1, integer, degrees of freedom for heavy-tailed channel noise if ~big zdim = 2; %speaker identity variable size rdim = 20; %i-vector size. required: rdim > zdim fscal = 3; %increase fscal to move speakers apart else zdim = 100; %speaker identity variable size rdim = 512; %i-vector size. required: rdim > zdim fscal = 1/20; %increase fscal to move speakers apart end F = randn(rdim,zdim)*fscal; W = randn(rdim,2*rdim); W = W*W.';W = (rdim/trace(W))*W; %model1 = create_HTPLDA_SGME_backend(nu,F,W); %oracle model %Generate synthetic labels nspeakers = 1000; recordings_per_speaker = 10; N = nspeakers*recordings_per_speaker; ilabels = repmat(1:nspeakers,recordings_per_speaker,1); ilabels = ilabels(:).'; % integer speaker labels hlabels = sparse(ilabels,1:N,true,nspeakers,N); %speaker label matrix with one-hot columns %and some training data Z = randn(zdim,nspeakers); X = F*Z*hlabels + sample_HTnoise(nu,rdim,N,W); end function X = sample_HTnoise(nu,dim,n,W) % Sample n heavy-tailed dim-dimensional variables. (Only for integer nu.) % % Inputs: % nu: integer nu >=1, degrees of freedom of resulting t-distribution % n: number of samples % W: precision matrix for T-distribution % % Output: % X: dim-by-n samples cholW = chol(W); if isinf(nu) precisions = ones(1,n); else precisions = mean(randn(nu,n).^2,1); end std = 1./sqrt(precisions); X = cholW\bsxfun(@times,std,randn(dim,n)); end
github
bsxfan/meta-embeddings-master
create_scalTrans.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/create_scalTrans.m
669
utf_8
f35ab355133b6c35d64e89119ca70ae6
function [f,fi] = create_scalTrans() if nargout==0 test_this(); return; end f = @(scal,R) scal*R; fi = @fi_this; function [R,logdetJ,back] = fi_this(scal,T) [dim,N] = size(T); R = T/scal; logdetJ = (N*dim/2)*log(scal^2); back = @back_this; function dscal = back_this(dR,dlogdetJ) dscal = N*dim*dlogdetJ/scal; dscal = dscal - (R(:).'*dR(:))/scal; end end end function test_this() [f,fi] = create_scalTrans(); R = randn(3,5); ff = @(scal) fi(scal,R); scal = pi; testBackprop_multi(ff,2,{scal}); end
github
bsxfan/meta-embeddings-master
test_MLNDA.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/test_MLNDA.m
2,084
utf_8
2b1078bd2cf9d5c3e78fc9481c8065a8
function test_MLNDA() % Assemble model to generate data big = false; nu = inf; %required: nu >= 1, integer, degrees of freedom for heavy-tailed channel noise if ~big zdim = 2; %speaker identity variable size rdim = 20; %i-vector size. required: rdim > zdim fscal = 3; %increase fscal to move speakers apart else zdim = 100; %speaker identity variable size rdim = 512; %i-vector size. required: rdim > zdim fscal = 1/20; %increase fscal to move speakers apart end F = randn(rdim,zdim)*fscal; W = randn(rdim,2*rdim); W = W*W.';W = (rdim/trace(W))*W; %model1 = create_HTPLDA_SGME_backend(nu,F,W); %oracle model %Generate synthetic labels nspeakers = 100; recordings_per_speaker = 10; N = nspeakers*recordings_per_speaker; ilabels = repmat(1:nspeakers,recordings_per_speaker,1); ilabels = ilabels(:).'; % integer speaker labels hlabels = sparse(ilabels,1:N,true,nspeakers,N); %speaker label matrix with one-hot columns %and some training data Z = randn(zdim,nspeakers); R = F*Z*hlabels + sample_HTnoise(nu,rdim,N,W); [f,fi] = create_scalTrans(); scal = pi; T = f(scal,R); ss = scal*exp(-1:0.1:1); y = zeros(size(ss)); for i=1:length(ss) y(i) = MLNDAobj(T,hlabels,F,W,fi,ss(i)); end ystar = MLNDAobj(T,hlabels,F,W,fi,scal); close all; semilogx(ss,y);hold; semilogx(scal,ystar,'*r'); end function X = sample_HTnoise(nu,dim,n,W) % Sample n heavy-tailed dim-dimensional variables. (Only for integer nu.) % % Inputs: % nu: integer nu >=1, degrees of freedom of resulting t-distribution % n: number of samples % W: precision matrix for T-distribution % % Output: % X: dim-by-n samples cholW = chol(W); if isinf(nu) precisions = ones(1,n); else precisions = mean(randn(nu,n).^2,1); end std = 1./sqrt(precisions); X = cholW\bsxfun(@times,std,randn(dim,n)); end
github
bsxfan/meta-embeddings-master
create_nice_Trans.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/create_nice_Trans.m
2,960
utf_8
20a716c7c9c49f1d36b52dd0e286b40e
function [f,fi,paramsz,fe] = create_nice_Trans(dim,rank) if nargin==0 test_this(); return; end paramsz = 1 + dim*rank + rank + dim; f = @f_this; fi = @fi_this; fe = @expand; function T = f_this(P,R) [sigma,L,d,offset] = expand(P); M = sigma*eye(dim) + L*diag(d)*L.'; T = bsxfun(@plus,offset,M\R); end function [R,logdetJ,back] = fi_this(P,T) [sigma,L,d,offset,back0] = expand(P); Delta = bsxfun(@minus,T,offset); DL = bsxfun(@times,d,L.'); DLDelta = DL*Delta; R = sigma*Delta + L*DLDelta; [logdet,back1] = logdetNice(sigma,L,d); n = size(T,2); logdetJ = -n*logdet; back = @back_that; function [dP,dT] = back_that(dR,dlogdetJ) %[logdetJ,back1] = logdetNice(sigma,L,d) [dsigma,dL,dd] = back1(-n*dlogdetJ); % R = sigma*Delta + L*DLDelta dsigma = dsigma + dR(:).'*Delta(:); dDelta = sigma*dR; dL = dL + dR*DLDelta.'; dDLDelta = L.'*dR; % DLDelta = DL*Delta; dDL = dDLDelta*Delta.'; dDelta = dDelta + DL.'*dDLDelta; % DL = bsxfun(@times,d,L.') dd = dd + sum(dDL.*L.',2); dL = dL + bsxfun(@times,dDL.',d'); % Delta = bsxfun(@minus,T,offset) dT = dDelta; doffset = -sum(dDelta,2); dP = back0(dsigma,dL,dd,doffset); end end function [sigma,L,d,offset,back] = expand(P) at = 1; sz = 1; %logsigma = P(at); %sigma = exp(logsigma); %sigma = P(at); sqrtsigma = P(at); sigma = sqrtsigma^2; at = at + sz; sz = dim*rank; L = reshape(P(at:at+sz-1),dim,rank); at = at + sz; sz = rank; d = P(at:at+sz-1); at = at + sz; sz = dim; offset = P(at:at+sz-1); at = at + sz; assert(at==length(P)+1); back = @back_this; function dP = back_this(dsigma,dL,dd,doffset) %dlogsigma = sigma*dsigma; %dP = [dlogsigma;dL(:);dd;doffset]; %dP = [dsigma;dL(:);dd;doffset]; dsqrtsigma = 2*dsigma*sqrtsigma; dP = [dsqrtsigma;dL(:);dd;doffset]; end end end function test_this() dim = 5; rank = 2; n = 6; [f,fi,sz,fe] = create_nice_Trans(dim,rank); P = randn(sz,1); R = randn(dim,n); T = f(P,R); Ri = fi(P,T); test_inverse = max(abs(R(:)-Ri(:))), testBackprop_multi(fi,2,{P,T}); %testBackprop_multi(fe,4,{P}); end
github
bsxfan/meta-embeddings-master
adaptSPLDA.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/adaptSPLDA.m
1,791
utf_8
fce312e9e18adccb87760f2788ced832
function [Ft,Wt,back] = adaptSPLDA(Fcols,Fscal,Cfac,F,W) if nargin==0 test_this(); return; end Frank = length(Fscal); Ft = [bsxfun(@times,F,Fscal), Fcols]; % Wt = inv(Ct), Ct = inv(W) + Cfac*Cfac' % Wt = W - W*Cfac*inv(I + Cfac'*W*Cfac)*Cfac'*W WCfac = W*Cfac; S = eye(size(Cfac,2)) + WCfac.'*Cfac; [Adj,back1] = LinvSR(WCfac,S,WCfac.'); Wt = W - (Adj + Adj.')/2; %numerically symmetrize back = @back_this; function [dFcols,dFscal,dCfac,dF,dW] = back_this(dFt,dWt) % Wt = W - (Adj + Adj.')/2 dAdj = -(dWt+dWt.')/2; dW = dWt; % [Wt0,back1] = LinvSR(WCfac,S,WCfac.') [dL,dS,dR] = back1(dAdj); dWCfac = dL + dR.'; % S = eye(size(Cfac,2)) + WCfac.'*Cfac; dWCfac = dWCfac + Cfac*dS.'; dCfac = WCfac*dS; %WCfac = W*Cfac; if nargout>=5 dW = dW + dWCfac*Cfac.'; end dCfac = dCfac + W.'*dWCfac; % Ft = [bsxfun(@times,F,Fscal), Fcols] dFcols = dFt(:,Frank+1:end); %OK dFscal = sum(dFt(:,1:Frank).*F,1); %OK if nargout>=4 dF = bsxfun(@times,dFt(:,1:Frank),Fscal); %OK end end end function test_this() dim = 10; Frank = 2; extra = 3; F = randn(dim,Frank); Fcols = randn(dim,extra); Fscal = randn(1,Frank); W = randn(dim,dim); Cfac = randn(dim,4); f1 = @(Fcols,Fscal,Cfac,F,W) adaptSPLDA(Fcols,Fscal,Cfac,F,W); f2 = @(Fcols,Fscal,Cfac) adaptSPLDA(Fcols,Fscal,Cfac,F,W); testBackprop_multi(f1,2,{Fcols,Fscal,Cfac,F,W}); testBackprop_multi(f2,2,{Fcols,Fscal,Cfac}); end
github
bsxfan/meta-embeddings-master
compose_trans.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/compose_trans.m
1,502
utf_8
a3a3ae4f9ea19c4b508b6c9b089842db
function [f,fi,paramsz] = compose_trans(outer_paramsz,outer_f,outer_fi,inner_paramsz,inner_f,inner_fi) if nargout==0 test_this(); return; end f = @f_this; fi = @fi_this; paramsz = outer_paramsz + inner_paramsz; function T = f_this(P,R) [outerP,innerP] = unpack(P); T = outer_f(outerP,inner_f(innerP,R)); end function [X,logdetJ,back] = fi_this(P,Y) [outerP,innerP] = unpack(P); [Z,logdet1,back1] = outer_fi(outerP,Y); [X,logdet2,back2] = inner_fi(innerP,Z); logdetJ = logdet1 + logdet2; back = @back_this; function [dParams,dT] = back_this(dR,dlogdetJ) [dParam2,dT] = back2(dR,dlogdetJ); if nargout>=2 [dParam1,dT] = back1(dT,dlogdetJ); else [dParam1] = back1(dT,dlogdetJ); end dParams = [dParam1(:);dParam2(:)]; end end function [outerP,innerP] = unpack(P) outerP = P(1:outer_paramsz); innerP = P(outer_paramsz+1:paramsz); end end function test_this() dim = 3; [inner_f,inner_fi,szi] = create_affineTrans(dim); [outer_f,outer_fi,szo] = create_affineTrans(dim); [f,fi,sz] = compose_trans(szo,outer_f,outer_fi,szi,inner_f,inner_fi); n = 5; P = randn(sz,1); R = randn(dim,n); T = f(P,R); testBackprop_multi(fi,2,{P,T}); end
github
bsxfan/meta-embeddings-master
splda_llh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/splda_llh.m
1,203
utf_8
08dc90a03b9be8bcf3987c73de370d43
function [llh,back] = splda_llh(R,labels,F,W) % Inputs: % R: D-by-N, i-vectors % labels: sparse, logical K-by-N, one hot columns, K speakers, N recordings if nargin==0 test_this(); return; end FW = F.'*W; FWR = FW*R; S = FWR*labels.'; %first order natural parameter for speaker factor posterior n = full(sum(labels,2)'); %zero order stats (recordings per speaker) FWF = FW*F; [V,E] = eig(FWF); E = diag(E); nE = bsxfun(@times,n,E); Mu = V*bsxfun(@ldivide,1+nE,V'*S); %posterior means RR = R*R.'; llh = ( Mu(:).'*S(:) - RR(:).'*W(:) ) / 2; back = @back_this; function dR = back_this(dllh) dMu = (dllh/2)*S; dS = (dllh/2)*Mu; dRR = (-dllh/2)*W; dR = (2*dRR)*R; dS = dS + V*bsxfun(@ldivide,1+nE,V'*dMu); dFWR = dS*labels; dR = dR + FW.'*dFWR; end end function test_this() D = 4; d = 2; N = 10; K = 3; R = randn(D,N); labels = sparse(randi(K,1,N),1:N,true,K,N); F = randn(D,d); W = randn(D,D+1);W=W*W.'; f = @(R) splda_llh(R,labels,F,W); testBackprop(f,{R}); end
github
bsxfan/meta-embeddings-master
create_linTrans2.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/create_linTrans2.m
1,061
utf_8
1c1bf3b7b215f991486454a53aee7d69
function [f,fi,paramsz] = create_linTrans2(dim) if nargout==0 test_this(); return; end f = @f_this; fi = @fi_this; paramsz = dim^2; function T = f_this(P,R) M = unpack(P); T = M\R; end function [R,logdetJ,back] = fi_this(P,T) M = unpack(P); [L,U] = lu(M); n = size(T,2); logdetJ = -n*sum(log(diag(U).^2))/2; Delta = T; R = M*Delta; back = @back_this; function [dP,dT] = back_this(dR,dlogdetJ) dM = (-n*dlogdetJ)*(U\inv(L)).'; dDelta = M.'*dR; dM = dM + dR*Delta.'; dP = dM(:); dT = dDelta; end end function P = unpack(P) P = reshape(P,dim,dim); end end function test_this() dim = 3; [f,fi,sz] = create_linTrans2(dim); R = randn(dim,5); P = randn(sz,1); T = f(P,R); Ri = fi(P,T); test_inverse = max(abs(R(:)-Ri(:))) testBackprop_multi(fi,2,{P,T}); end
github
bsxfan/meta-embeddings-master
create_affineTrans.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/create_affineTrans.m
1,306
utf_8
919b49f4e691b0f162612b8e658b8d07
function [f,fi,paramsz] = create_affineTrans(dim) if nargout==0 test_this(); return; end f = @f_this; fi = @fi_this; paramsz = dim*(dim+1); function T = f_this(P,R) [offset,M] = unpack(P); T = bsxfun(@plus,offset,M*R); end function [R,logdetJ,back] = fi_this(P,T) [offset,M] = unpack(P); [L,U] = lu(M); n = size(T,2); logdetJ = n*sum(log(diag(U).^2))/2; Delta = bsxfun(@minus,T,offset); R = U\(L\Delta); back = @back_this; function [dP,dT] = back_this(dR,dlogdetJ) dM = (n*dlogdetJ)*(U\inv(L)).'; dDelta = L.'\(U.'\dR); dM = dM - dDelta*R.'; doffset = -sum(dDelta,2); dP = [dM(:);doffset]; dT = dDelta; end end function [offset,P] = unpack(P) P = reshape(P,dim,dim+1); offset = P(:,end); P(:,end) = []; end end function test_this() dim = 3; [f,fi,sz] = create_affineTrans(dim); R = randn(dim,5); offset = randn(dim,1); M = randn(dim); P = randn(sz,1); T = f(P,R); Ri = fi(P,T); test_inverse = max(abs(R(:)-Ri(:))), testBackprop_multi(fi,2,{P,T}); end
github
bsxfan/meta-embeddings-master
splda_llh_full.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/splda_llh_full.m
3,374
utf_8
9f2a6f749c0dfeb7e8f3ae83f26c362e
function [llh,back] = splda_llh_full(labels,F,W,R,slow) % Like splda_llh(), but backpropagates into all of F,W and R % % Inputs: % R: D-by-N, i-vectors % labels: sparse, logical K-by-N, one hot columns, K speakers, N recordings % F,W: SPLA parameters % slow: [optional, default = false] logical, use slow = true to test derivatives % % facW: [optional, default = true] logical: % true: W = Wfac*Wfac' % false: W = Wfac % % Outputs: % llh: total log density: log P(R | F,W,labels) if nargin==0 test_this(); return; end if ~exist('slow','var') slow = false; end [nspeakers,ndata] = size(labels); FW = F.'*W; FWR = FW*R; S = FWR*labels.'; %first order natural parameter for speaker factor posterior n = full(sum(labels,2)).'; %zero order stats (recordings per speaker) FWF = FW*F; if slow [pn,back1] = posteriorNorm_slow(S,FWF,n); else [pn,back1] = posteriorNorm_fast(S,FWF,n); end RR = R*R.'; % if ~slow % cholW = chol(W); % logdetW = 2*sum(log(diag(cholW))); % else % [Lw,Uw] = lu(W); % logdetW = sum(log(diag(Uw).^2))/2; % end [logdetW,back2] = logdetLU(W); llh = ( ndata*logdetW - RR(:).'*W(:) ) / 2 - sum(pn,2); back = @back_this; function [dF,dW,dR] = back_this(dllh) % llh = ( ndata*logdetW - RR(:).'*W(:) ) / 2 - sum(pn,2) dlogdetW = ndata*dllh/2; if nargout>=3 dRR = (-dllh/2)*W; end dW = (-dllh/2)*RR; dpn = repmat(-dllh,1,nspeakers); % [logdetW,back2] = logdetLU(W) dW = dW + back2(dlogdetW); % RR = R*R.' if nargout>=3 dR = (2*dRR)*R; end % [pn,back1] = posteriorNorm_fast(S,FWF,n) [dS,dFWF] = back1(dpn); % FWF = FW*F dFW = dFWF*F.'; dF = FW.'*dFWF; % S = FWR*labels.' dFWR = dS*labels; % FWR = FW*R; dFW = dFW + dFWR*R.'; if nargout >= 3 dR = dR + FW.'*dFWR; end % FW = F.'*W; dW = dW + F*dFW; dF = dF + W*dFW.'; end end function test_this() D = 4; d = 2; N = 10; K = 3; R = randn(D,N); labels = sparse(randi(K,1,N),1:N,true,K,N); F = randn(D,d); W = randn(D,D+1);W=W*W.'; slow = true; f_slow = @(F,W,R) splda_llh_full(labels,F,W,R,slow); f_fast = @(F,W,R) splda_llh_full(labels,F,W,R); f0 = @(F,W) splda_llh_full(labels,F,W,R); fprintf('test function value equality:\n'); delta = abs(f_slow(F,W,R)-f_fast(F,W,R)), fprintf('test slow derivatives:\n'); testBackprop(f_slow,{F,W,R},{1,1,1}); %return [~,back] = f_slow(F,W,R); [dFs,dWs,dRs] = back(pi); [~,back] = f_fast(F,W,R); [dFf,dWf,dRf] = back(pi); [~,back] = f0(F,W); [dF0,dW0] = back(pi); fprintf('compare fast and slow derivatives:\n'); deltaF = max(abs(dFs(:)-dFf(:))), deltaW = max(abs(dWs(:)-dWf(:))), deltaR = max(abs(dRs(:)-dRf(:))), deltaF0 = max(abs(dFs(:)-dF0(:))), deltaW0 = max(abs(dWs(:)-dW0(:))), end
github
bsxfan/meta-embeddings-master
create_affineTrans2.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/create_affineTrans2.m
1,233
utf_8
36c35821bc884e51c0de2591659dafda
function [f,fi,paramsz] = create_affineTrans2(dim) if nargout==0 test_this(); return; end f = @f_this; fi = @fi_this; paramsz = dim*(dim+1); function T = f_this(P,R) [offset,M] = unpack(P); T = bsxfun(@plus,offset,M\R); end function [R,logdetJ,back] = fi_this(P,T) [offset,M] = unpack(P); [L,U] = lu(M); n = size(T,2); logdetJ = -n*sum(log(diag(U).^2))/2; Delta = bsxfun(@minus,T,offset); R = M*Delta; back = @back_this; function [dP,dT] = back_this(dR,dlogdetJ) dM = (-n*dlogdetJ)*(U\inv(L)).'; dDelta = M.'*dR; dM = dM + dR*Delta.'; doffset = -sum(dDelta,2); dP =[dM(:);doffset]; dT = dDelta; end end function [offset,P] = unpack(P) P = reshape(P,dim,dim+1); offset = P(:,end); P(:,end) = []; end end function test_this() dim = 3; [f,fi,sz] = create_affineTrans2(dim); R = randn(dim,5); P = randn(sz,1); T = f(P,R); Ri = fi(P,T); test_inverse = max(abs(R(:)-Ri(:))), testBackprop_multi(fi,2,{P,T}); end
github
bsxfan/meta-embeddings-master
test_MLNDA3.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/test_MLNDA3.m
2,423
utf_8
50050aa7eff426a79b6f954e2e549b3e
function test_MLNDA3() % Assemble model to generate data big = true; nu = inf; %required: nu >= 1, integer, degrees of freedom for heavy-tailed channel noise if ~big zdim = 2; %speaker identity variable size rdim = 20; %i-vector size. required: rdim > zdim fscal = 3; %increase fscal to move speakers apart else zdim = 100; %speaker identity variable size rdim = 512; %i-vector size. required: rdim > zdim fscal = 1/20; %increase fscal to move speakers apart end F = randn(rdim,zdim)*fscal; W = randn(rdim,2*rdim); W = W*W.';W = (rdim/trace(W))*W; %model1 = create_HTPLDA_SGME_backend(nu,F,W); %oracle model %Generate synthetic labels nspeakers = 100; recordings_per_speaker = 10; N = nspeakers*recordings_per_speaker; ilabels = repmat(1:nspeakers,recordings_per_speaker,1); ilabels = ilabels(:).'; % integer speaker labels hlabels = sparse(ilabels,1:N,true,nspeakers,N); %speaker label matrix with one-hot columns %and some training data Z = randn(zdim,nspeakers); R = F*Z*hlabels + sample_HTnoise(nu,rdim,N,W); [f,fi,paramsz] = create_affineTrans2(rdim); oracle = randn(paramsz,1); r = randn(rdim,1); t = f(oracle,r); rr = fi(oracle,t); [r,t,rr] T = f(oracle,R); Rtrace = trace(F*F.'+W); Ttrace = sum(T(:).^2)/size(T,2); D = eye(rdim)*Rtrace/Ttrace; offset = mean(T,2); params0 = [D(:);offset]; obj = @(params) MLNDAobj(T,hlabels,F,W,fi,params); obj_oracle = obj(oracle), obj_init = obj(params0), maxiters = 10000; timeout = 5*60; [trans,params] = train_ML_trans(F,W,T,hlabels,fi,params0,maxiters,timeout); delta_obj = obj(params) - obj(oracle) end function X = sample_HTnoise(nu,dim,n,W) % Sample n heavy-tailed dim-dimensional variables. (Only for integer nu.) % % Inputs: % nu: integer nu >=1, degrees of freedom of resulting t-distribution % n: number of samples % W: precision matrix for T-distribution % % Output: % X: dim-by-n samples cholW = chol(W); if isinf(nu) precisions = ones(1,n); else precisions = mean(randn(nu,n).^2,1); end std = 1./sqrt(precisions); X = cholW\bsxfun(@times,std,randn(dim,n)); end
github
bsxfan/meta-embeddings-master
create_linTrans.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/create_linTrans.m
1,038
utf_8
3f9b4ff66b15309e91a3ab3ecd434e55
function [f,fi,paramsz] = create_linTrans(dim) if nargout==0 test_this(); return; end f = @f_this; fi = @fi_this; paramsz = dim^2; function T = f_this(P,R) M = unpack(P); T = M*R; end function [R,logdetJ,back] = fi_this(P,T) M = unpack(P); [L,U] = lu(M); n = size(T,2); logdetJ = n*sum(log(diag(U).^2))/2; Delta = T; R = U\(L\Delta); back = @back_this; function [dP,dT] = back_this(dR,dlogdetJ) dM = (n*dlogdetJ)*(U\inv(L)).'; dDelta = L.'\(U.'\dR); dM = dM - dDelta*R.'; dP = dM(:); dT = dDelta; end end function P = unpack(P) P = reshape(P,dim,dim); end end function test_this() dim = 3; [f,fi,sz] = create_linTrans(dim); R = randn(dim,5); P = randn(sz,1); T = f(P,R); ff = @(P,T) fi(P,T); testBackprop_multi(ff,2,{P,T}); end
github
bsxfan/meta-embeddings-master
create_diaglinTrans.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/create_diaglinTrans.m
925
utf_8
fdb319cbd9815c4182da56c378c191bd
function [f,fi,paramsz] = create_diaglinTrans(dim) if nargout==0 test_this(); return; end f = @f_this; fi = @fi_this; paramsz = dim; function T = f_this(P,R) T = bsxfun(@times,P,R); end function [R,logdetJ,back] = fi_this(P,T) n = size(T,2); logdetJ = n*sum(log(P.^2))/2; R = bsxfun(@ldivide,P,T); back = @back_this; function [dP,dT] = back_this(dR,dlogdetJ) dP = n*dlogdetJ./P; dT = bsxfun(@ldivide,P,dR); %dP = dP - diag(dT*R.'); dP = dP - sum(dT.*R,2); end end end function test_this() dim = 3; [f,fi,sz] = create_diaglinTrans(dim); R = randn(dim,5); P = randn(sz,1); T = f(P,R); Ri = fi(P,T); test_inverse = max(abs(R(:)-Ri(:))), testBackprop_multi(fi,2,{P,T}); end
github
bsxfan/meta-embeddings-master
testTransform_obj.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/testTransform_obj.m
727
utf_8
c525ecb0f1fd8749b67f7140bc4e9ee1
function [y,back] = testTransform_obj(T,fi,params) if nargin==0 test_this(); return; end [R,logdetJ,back2] = fi(params,T); [llh,back1] = smvn_llh(R); y = logdetJ - llh; back = @back_this; function dparams = back_this(dy) dlogdetJ = dy; dR = back1(-dy); dparams = back2(dR,dlogdetJ); end end function test_this() R = randn(3,100); [f,fi] = create_scalTrans(); scal = pi; T = f(scal,R); ss = scal*exp(-1:0.01:5); y = zeros(size(ss)); for i=1:length(ss) y(i) = testTransform_obj(T,fi,ss(i)); end close all; semilogx(ss,y);hold; semilogx(scal,testTransform_obj(T,fi,scal),'*r'); end
github
bsxfan/meta-embeddings-master
create_sandwich_trans.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/create_sandwich_trans.m
2,506
utf_8
f40de59ee0d3622632aadb88d82f5db2
function [f,fi,paramsz,fe] = create_sandwich_trans(dim,rank) if nargin==0 test_this(); return; end [f1,fi1] = create_affineTrans(dim); [f2,fi2] = create_diaglinTrans(dim); [f3,fi3] = create_linTrans2(dim); paramsz = 1 + 2*dim*rank + 2*dim; f = @f_this; fi = @fi_this; fe = @expand; function T = f_this(P,R) [A,D,L] = expand(P); T = f1(A,f2(D,f3(L,R))); end function [R,logdetJ,back] = fi_this(P,T) [A,D,L,back4] = expand(P); [T1,logdetJ1,back1] = fi1(A,T); [T2,logdetJ2,back2] = fi2(D,T1); [R,logdetJ3,back3] = fi3(L,T2); logdetJ = logdetJ1 + logdetJ2 + logdetJ3; back = @back_that; function [dP,dT] = back_that(dR,dlogdetJ) [dL,dT2] = back3(dR,dlogdetJ); [dD,dT1] = back2(dT2,dlogdetJ); if nargout==2 [dA,dT] = back1(dT1,dlogdetJ); else dA = back1(dT1,dlogdetJ); end dP = back4(dA,dD,dL); end end function [A,D,L,back] = expand(P) at = 1; sz = 1; sigma = P(at); at = at + sz; sz = dim*rank; L = reshape(P(at:at+sz-1),dim,rank); at = at + sz; sz = dim*rank; R = reshape(P(at:at+sz-1),rank,dim); at = at + sz; sz = dim; offset = P(at:at+sz-1); at = at + sz; sz = dim; D = P(at:at+sz-1); at = at + sz; assert(at==length(P)+1); [LR,back1] = matmul(L,R); L = sigma*speye(dim) + LR; L = L(:); A = [L;offset]; back = @back_this; function dP = back_this(dA,dD,dL) dA = reshape(dA,dim,dim+1); dL = reshape(dL,dim,dim); doffset = dA(:,end); dM = dA(:,1:end-1) + dL; dsigma = trace(dM); [dL,dR] = back1(dM); dP = [dsigma;dL(:);dR(:);doffset;dD]; end end end function test_this() dim = 5; rank = 2; n = 6; [f,fi,sz,fe] = create_sandwich_trans(dim,rank); P = randn(sz,1); R = randn(dim,n); T = f(P,R); Ri = fi(P,T); test_inverse = max(abs(R(:)-Ri(:))), testBackprop_multi(fi,2,{P,T}); %testBackprop_multi(fe,3,{P}); end
github
bsxfan/meta-embeddings-master
posteriorNorm_slow.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/posteriorNorm_slow.m
1,815
utf_8
a9b32fbd06cb0f91327d57dac2f41bbd
function [y,back] = posteriorNorm_slow(A,B,b) % Computes, for every i: log N( 0 | Pi\A(:,i), inv(Pi) ), where % % precisions are Pi = I + b(i)*B % % This is the slow version, used only to verify correctness of the function % value and derivatives of the fast version, posteriorNorm_fast(). % % Inputs: % A: dim-by-n, natural parameters (precision *mean) for n Gaussians % B: dim-by-dim, common precision matrix factor (full, positive semi-definite) % b: 1-by-n, precision scale factors % % Outputs: % y: 1-by-n, log densities, evaluated at zero % back: backpropagation handle, [dA,dB,db] = back(dy) if nargin==0 test_this(); return; end [dim,n] = size(A); I = speye(dim); y = zeros(1,n); S = zeros(dim,n); for i=1:n a = A(:,i); bBI = I+b(i)*B; s = bBI\a; S(:,i) = s; logd = logdet(bBI); y(i) = (logd - s.'*a)/2; end back = @back_this; function [dA,dB,db] = back_this(dy) dA = zeros(size(A)); db = zeros(size(b)); dB = zeros(size(B)); for ii=1:n s = S(:,ii); a = A(:,ii); da = -(dy(ii)/2)*s; ds = -(dy(ii)/2)*a; dlogd = dy(ii)/2; bBI = I+b(ii)*B; dbBI = dlogd*inv(bBI); %#ok<MINV> da2 = bBI.'\ds; dA(:,ii) = da + da2; dbBI = dbBI - (da2)*s.'; dB = dB + b(ii)*dbBI; db(ii) = dbBI(:).'*B(:); end end end function y = logdet(M) [~,U] = lu(M); y = sum(log(diag(U).^2))/2; end function test_this() m = 3; n = 5; A = randn(m,n); b = rand(1,n); B = randn(m,m+1); B = B*B.'; testBackprop(@posteriorNorm_slow,{A,B,b},{1,1,1}); end
github
bsxfan/meta-embeddings-master
create_nice_Trans2.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/create_nice_Trans2.m
3,145
utf_8
0c8c4c15e2c5338c0237235ede630f48
function [f,fi,paramsz,fe] = create_nice_Trans2(dim,rank) % Creates affine transform, having a matrix: M = sigma I + L*D*L.', where % L is of low rank (tall) and D is small and square, but otherwise % unconstrained. The forward transform is: % f(X) = M \ X + offset if nargin==0 test_this(); return; end paramsz = 1 + dim*rank + rank*rank + dim; f = @f_this; fi = @fi_this; fe = @expand; function T = f_this(P,R) [sigma,L,D,offset] = expand(P); M = sigma*eye(dim) + L*D*L.'; T = bsxfun(@plus,offset,M\R); end function [R,logdetJ,back] = fi_this(P,T) [sigma,L,D,offset,back0] = expand(P); Delta = bsxfun(@minus,T,offset); DL = D*L.'; DLDelta = DL*Delta; R = sigma*Delta + L*DLDelta; [logdet,back1] = logdetNice2(sigma,L,D); n = size(T,2); logdetJ = -n*logdet; back = @back_that; function [dP,dT] = back_that(dR,dlogdetJ) %[logdetJ,back1] = logdetNice(sigma,L,d) [dsigma,dL,dD] = back1(-n*dlogdetJ); % R = sigma*Delta + L*DLDelta dsigma = dsigma + dR(:).'*Delta(:); dDelta = sigma*dR; dL = dL + dR*DLDelta.'; dDLDelta = L.'*dR; % DLDelta = DL*Delta; dDL = dDLDelta*Delta.'; dDelta = dDelta + DL.'*dDLDelta; % DL = D*L.' dD = dD + dDL*L; dL = dL + dDL.'*D; % Delta = bsxfun(@minus,T,offset) dT = dDelta; doffset = -sum(dDelta,2); dP = back0(dsigma,dL,dD,doffset); end end function [sigma,L,D,offset,back] = expand(P) at = 1; sz = 1; %logsigma = P(at); %sigma = exp(logsigma); %sigma = P(at); sqrtsigma = P(at); sigma = sqrtsigma^2; at = at + sz; sz = dim*rank; L = reshape(P(at:at+sz-1),dim,rank); at = at + sz; sz = rank*rank; D = reshape(P(at:at+sz-1),rank,rank); at = at + sz; sz = dim; offset = P(at:at+sz-1); at = at + sz; assert(at==length(P)+1); back = @back_this; function dP = back_this(dsigma,dL,dD,doffset) %dlogsigma = sigma*dsigma; %dP = [dlogsigma;dL(:);dd;doffset]; %dP = [dsigma;dL(:);dd;doffset]; dsqrtsigma = 2*dsigma*sqrtsigma; dP = [dsqrtsigma;dL(:);dD(:);doffset]; end end end function test_this() dim = 5; rank = 2; n = 6; [f,fi,sz,fe] = create_nice_Trans2(dim,rank); P = randn(sz,1); R = randn(dim,n); T = f(P,R); Ri = fi(P,T); test_inverse = max(abs(R(:)-Ri(:))), testBackprop_multi(fi,2,{P,T}); %testBackprop_multi(fe,4,{P}); end
github
bsxfan/meta-embeddings-master
matmul.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/matmul.m
373
utf_8
d1aa5614f293a88c05d079f2d4426c1e
function [M,back] = matmul(A,B) if nargin==0 test_this(); return; end M = A*B; back = @back_this; function [dA,dB] = back_this(dM) dA = dM*B.'; dB = A.'*dM; end end function test_this() m = 2; n = 3; k = 4; A = randn(m,k); B = randn(k,n); testBackprop(@matmul,{A,B}); end
github
bsxfan/meta-embeddings-master
posteriorNorm_fast.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/posteriorNorm_fast.m
2,131
utf_8
f950d84510798e37996a0f242fd5f9be
function [y,back] = posteriorNorm_fast(A,B,b) % Computes, for every i: log N( 0 | Pi\A(:,i), inv(Pi) ), where % % precisions are Pi = I + b(i)*B % % This is the fast version, which simultaneously diagonalizes all the Pi, % using eigenanalysis of B. % % Inputs: % A: dim-by-n, natural parameters (precision *mean) for n Gaussians % B: dim-by-dim, common precision matrix factor (full, positive semi-definite) % b: 1-by-n, precision scale factors % % Outputs: % y: 1-by-n, log densities, evaluated at zero % back: backpropagation handle, [dA,dB,db] = back(dy) if nargin==0 test_this(); return; end [V,L] = eig(B); %V*L*V' = B L = diag(L); bL = bsxfun(@times,b,L); logdets = sum(log1p(bL),1); bL1 = 1 + bL; S = V*bsxfun(@ldivide,bL1,V.'*A); Q = sum(A.*S,1); y = (logdets - Q)/2; back = @back_this; function [dA,dB,db] = back_this(dy) hdy = dy/2; dA = bsxfun(@times,-hdy,S); dS = bsxfun(@times,-hdy,A); dlogdets = hdy; dA2 = V*bsxfun(@ldivide,bL1,V.'*dS); dA = dA + dA2; dBlogdet = V*bsxfun(@times,sum(bsxfun(@rdivide,b.*dlogdets,bL1),2),V.'); dBsolve = bsxfun(@times,-b,dA2) * S.'; dB = dBlogdet + (dBsolve+dBsolve.')/2; if nargout>=3 db = L.'*bsxfun(@ldivide,bL1,dlogdets) - sum(S.*(B*dA2),1); end end end function test_this() m = 3; n = 5; A = randn(m,n); b = rand(1,n); B = randn(m,m+1); B = B*B.'; fprintf('test function values:\n'); err = max(abs(posteriorNorm_fast(A,B,b) - posteriorNorm_slow(A,B,b))), fprintf('test derivatives:\n'); [y,back] = posteriorNorm_fast(A,B,b); dy = randn(size(y)); [dAf,dBf,dbf] = back(dy); [~,back] = posteriorNorm_slow(A,B,b); [dAs,dBs,dbs] = back(dy); err_dA = max(abs(dAs(:)-dAf(:))), err_db = max(abs(dbs(:)-dbf(:))), err_dB = max(abs(dBs(:)-dBf(:))), %neither complex, nor real step differentiation seem to work through eig() end
github
bsxfan/meta-embeddings-master
logdetNice2.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/MLNDA/logdetNice2.m
929
utf_8
469e8c40a8fb711c201f8fbeaf1c8b32
function [logdet,back] = logdetNice2(sigma,R,D) if nargin==0 test_this(); return; end RR = R.'*R; [Ld,Ud] = lu(D); invD = inv(Ud)/Ld; S = RR/sigma + invD; [L,U] = lu(S); dim = size(R,1); logdet = ( sum(log(diag(U).^2)) + sum(log(diag(Ud).^2)) + dim*log(sigma^2) ) /2; back = @back_this; function [dsigma,dR,dD] = back_this(dlogdet) dS = dlogdet*(inv(U)/L).'; dD = dlogdet*invD.'; dsigma = dim*dlogdet/sigma; dR = R*(dS + dS.')/sigma; dsigma = dsigma - (RR(:).'*dS(:))/sigma^2; dinvD = dS; dD = dD - D.'\(dinvD/D.'); end end function test_this() dim = 5; rank = 2; sigma = randn; R = randn(dim,rank); D = randn(rank); M = sigma*eye(dim) + R*D*R.'; [log(abs(det(M))),logdetNice2(sigma,R,D)] testBackprop(@logdetNice2,{sigma,R,D}) end
github
bsxfan/meta-embeddings-master
create_semi_discrete_plda2.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SDPLDA/create_semi_discrete_plda2.m
2,834
utf_8
9de983873f97d8473844c898559ed911
function model = create_semi_discrete_plda2(N,dim,scal) if nargin==0 test_this(); return; end prior = -2*log(N); %flat prior on speaker identity variable Means = randn(dim,N); W = randn(dim,dim+2); W = W*W.'*(scal/(dim+2)); cholW = chol(W); WMeans = W*Means; offs = -sum(Means.*WMeans,1)/2; thr = -30; %-2*log(N); % to controll sparsity B0 = -Means.'*WMeans; b = max(B0(:)); B0 = B0-b; B1 = B0; B1(B1<thr) = -inf; B1 = sparse(exp(B1)); B1 = exp(B1); B{1} = B1; B1 = 2*B0; B1(B1<thr) = -inf; B1 = sparse(exp(B1)); B{2} = B1; F = [WMeans.',offs.']; model.sample = @sample; model.extract_me = @extract_me; model.log_expectations = @log_expectations; function [D,Z1,Z2] = sample(HL,Z1,Z2) % HL: K-by-T, label matrix, with 1-hot columns, for T samples from K speakers [K,T] = size(HL); if ~exist('Z1','var') || isempty(Z1) %sample Z from flat prior Z1 = sparse(randi(N,1,K),1:K,true); % N-by-K: speaker identity variables (1-hot columns) Z2 = sparse(randi(N,1,K),1:K,true); % N-by-K: speaker identity variables (1-hot columns) end %generate data MZ1 = Means*Z1; % dim-by-K MZ2 = Means*Z2; % dim-by-K D = (MZ1+MZ2)*HL + cholW\randn(dim,T); end function E = extract_me(D) T = size(D,2); E = [D;ones(1,T)]; end function L = log_expectations(E) V = F*E; mx = max(V,[],1); V = bsxfun(@minus,V,mx); V(V<thr) = -inf; V = exp(V); n = E(end,:); T = length(n); L = zeros(1,T); for i=1:max(n) f = n==i; Vi = V(:,f); L(f) = prior + i*b + 2*mx(f) + log(sum(Vi.*(B{i}*Vi),1)); end end end function test_this() N = 1000; dim = 100; scal = 0.1; model = create_semi_discrete_plda2(N,dim,scal); llhfun = @model.log_expectations; extr = @model.extract_me; n = 10000; HL = logical(speye(n)); [Enroll,Z1,Z2] = model.sample(HL); Enroll = extr(Enroll); Tar = extr(model.sample(HL,Z1,Z2)); Non = extr(model.sample(HL)); llr = @(enr,test) llhfun(enr + test) - llhfun(enr) - llhfun(test); tar = llr(Enroll,Tar); non = llr(Enroll,Non); fprintf('EER = %g%%\n',100*eer(tar,non)); % fprintf('Cllr,minCllr = %g, %g\n',cllr(tar,non),min_cllr(tar,non)); % % hist([tar,non],300); % % plot_type = Det_Plot.make_plot_window_from_string('old'); % plot_obj = Det_Plot(plot_type,'SEMI-DISCRETE-PLDA'); % % plot_obj.set_system(tar,non,'sys1'); % plot_obj.plot_steppy_det({'b','LineWidth',2},' '); end
github
bsxfan/meta-embeddings-master
create_semi_discrete_plda.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SDPLDA/create_semi_discrete_plda.m
2,051
utf_8
9b3b1423eb400e63b6083807faed3bfa
function model = create_semi_discrete_plda(N,dim,scal) if nargin==0 test_this(); return; end prior = -log(N); %flat prior on speaker identity variable Means = randn(dim,N); W = randn(dim,dim+2); W = W*W.'*(scal/(dim+2)); cholW = chol(W); WMeans = W*Means; offs = sum(Means.*WMeans,1)/2; model.sample = @sample; model.extract_me = @extract_me; model.log_expectations = @log_expectations; function [D,Z] = sample(HL,Z) % HL: K-by-T, label matrix, with 1-hot columns, for T samples from K speakers [K,T] = size(HL); if ~exist('Z','var') || isempty(Z) %sample Z from flat prior Z = sparse(randi(N,1,K),1:K,true); % N-by-K: speaker identity variables (1-hot columns) end %generate data MZ = Means*Z; % dim-by-K D = MZ*HL + cholW\randn(dim,T); end function E = extract_me(D) E = bsxfun(@minus,WMeans'*D,offs.'); end function L = log_expectations(E) mx = max(E,[],1); L = prior + mx + log(sum(exp(bsxfun(@minus,E,mx)),1)); end end function test_this() N = 1000; dim = 100; scal = 0.2; model = create_semi_discrete_plda(N,dim,scal); llhfun = @model.log_expectations; extr = @model.extract_me; n = 10000; HL = logical(speye(n)); [Enroll,Z] = model.sample(HL); Enroll = extr(Enroll); Tar = extr(model.sample(HL,Z)); Non = extr(model.sample(HL)); llr = @(enr,test) llhfun(enr + test) - llhfun(enr) - llhfun(test); tar = llr(Enroll,Tar); non = llr(Enroll,Non); fprintf('EER = %g%%\n',100*eer(tar,non)); fprintf('Cllr,minCllr = %g, %g\n',cllr(tar,non),min_cllr(tar,non)); hist([tar,non],300); plot_type = Det_Plot.make_plot_window_from_string('old'); plot_obj = Det_Plot(plot_type,'SEMI-DISCRETE-PLDA'); plot_obj.set_system(tar,non,'sys1'); plot_obj.plot_steppy_det({'b','LineWidth',2},' '); end
github
bsxfan/meta-embeddings-master
create_semi_discrete_plda3.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/SDPLDA/create_semi_discrete_plda3.m
2,232
utf_8
f61c1837d29ad7680b5804570951eaa9
function model = create_semi_discrete_plda3(N,dim,scal) if nargin==0 test_this(); return; end prior = -log(N); %flat prior on speaker identity variable Means = randn(dim,N); W = randn(dim,dim+2); W = W*W.'*(scal/(dim+2)); cholW = chol(W); WMeans = W*Means; offs = sum(Means.*WMeans,1)/2; F = [WMeans.',-offs.']; model.sample = @sample; model.extract_me = @extract_me; model.log_expectations = @log_expectations; function [D,Z] = sample(HL,Z) % HL: K-by-T, label matrix, with 1-hot columns, for T samples from K speakers [K,T] = size(HL); if ~exist('Z','var') || isempty(Z) %sample Z from flat prior Z = sparse(randi(N,1,K),1:K,true,N,K); % N-by-K: speaker identity variables (1-hot columns) end %generate data MZ = Means*Z; % dim-by-K D = MZ*HL + cholW\randn(dim,T); end function E = extract_me(D) %E = bsxfun(@minus,WMeans'*D,offs.'); E = [D;ones(1,size(D,2))]; end function L = log_expectations(E) E = F*E; mx = max(E,[],1); L = prior + mx + log(sum(exp(bsxfun(@minus,E,mx)),1)); end end function test_this() close all; N = 1000; dim = 100; scal = 0.2; model = create_semi_discrete_plda3(N,dim,scal); llhfun = @model.log_expectations; extr = @model.extract_me; n = 10000; HL = logical(speye(n)); [Enroll,Z] = model.sample(HL); Enroll = extr(Enroll); Tar = extr(model.sample(HL,Z)); Non = extr(model.sample(HL)); llr = @(enr,test) llhfun(enr + test) - llhfun(enr) - llhfun(test); tar = llr(Enroll,Tar); non = llr(Enroll,Non); fprintf('EER = %g%%\n',100*eer(tar,non)); fprintf('Cllr,minCllr = %g, %g\n',cllr(tar,non),min_cllr(tar,non)); subplot(1,3,1);hist([tar,non],300); subplot(1,3,2);hist(non,300); subplot(1,3,3);hist(tar,300); plot_type = Det_Plot.make_plot_window_from_string('old'); plot_obj = Det_Plot(plot_type,'SEMI-DISCRETE-PLDA'); plot_obj.set_system(tar,non,'sys1'); plot_obj.plot_steppy_det({'b','LineWidth',2},' '); end
github
bsxfan/meta-embeddings-master
create_diagonalized_precision.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/ParPLDA/create_diagonalized_precision.m
823
utf_8
ad8a12141df6c2eeb6adb87fbe93b6da
function dP = create_diagonalized_precision(P) if nargin==0 test_this(); return; end [V,E] = eig(P); E = diag(E); dP.logdet_I_plus_nP = @logdet_I_plus_nP; dP.solve_I_plus_nP = @solve_I_plus_nP; function y = logdet_I_plus_nP(n) nE = bsxfun(@times,n,E); y = sum(log1p(nE),1); end function X = solve_I_plus_nP(n,R) nE = bsxfun(@times,n,E); X = V*bsxfun(@ldivide,1+nE,V'*R); end end function test_this() P = randn(3,4); P = P*P.'; I = eye(size(P)); dP = create_diagonalized_precision(P); n = randi(5,1,2); M1 = I + n(1)*P; M2 = I + n(2)*P; R = randn(3,2); [log(det(M1)),log(det(M2)),dP.logdet_I_plus_nP(n)] [M1\R(:,1),M2\R(:,2),dP.solve_I_plus_nP(n,R)] end
github
bsxfan/meta-embeddings-master
equip_with_diagble_GME_scoring.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/ParPLDA/equip_with_diagble_GME_scoring.m
4,780
utf_8
2ddf2f48c430475c24f987f18d07b8f4
function model = equip_with_diagble_GME_scoring(model) % Equip any model with function handles for runtime scoring % functionality for Gaussian meta-embeddings (GMEs). % % Inputs: % model: any struct. The struct members are not referenced in this code. % A number of method handles (described below) are added to the % struct on output. % zdim: the speaker space dimensionality % % % % Meta-embeddings from the same speaker can be pooled, to form % `speaker models', when multiple enrollment sessions are available. % For example, for each of n2 speakers, let X1: m-by-n2 % represent the first enrollment sessions, in form of i-vectors. Also % let X2: m-by-n2 and Y2: d-by-n2, represent the second enrollment sessions % for each speaker, but available in both forms. The enrollment % meta-embeddings for these speakers can be formed as: % % enrollments = model.poolME(model.extractME(X1,[]),model.extractME(X2,Y2)); % % Note pooling is appropriate when data is independent, given the speaker. If % X1 and Y1 are extracted from the same set of recordings, they are not % independent. In this case: me = model.extractME(X1,Y1) is correct, % while me = model.poolME(model.extractME(X1,[]),model.extractME([],Y1)) is % incorrect. Conversely, if X1 and Y2 are extracted from different % sets of recordings (of the same speakers), then pooling is correct: % me = model.poolME(model.extractME(X1,[]),model.extractME([],Y2)). % % % For scoring, there are two functions for effiently scoring sets of trials: % - model.scoreTrails(enroll,test): scores n enrollment meta-embeddings % against the corresponding n test meta-embeddings. % This returns a vector of n scores. % - model.scoreMatrix(enroll,test): scores m enrollment meta-embeddings against % **each** of n test meta-embeddings. This % returns an m-by-n matrix of scores. % Note the argument names 'enroll' and 'test' are arbitrary. Scoring % is symmetric. These arguments are meta-embedding structs, extracted % (and optionally pooled) as described above. if nargin==0 test_this(); return; end model.poolDME = @poolDME; model.logExpectationD = @logExpectationD; model.scoreMatrixD = @scoreMatrixD; model.scoreTrialsD = @scoreTrialsD; model.estimateZD = @estimateZD; function me = poolDME(me1,me2) %assert(isequal(me1.P,me2.P)); how to test for handle equality me.n = me1.n + me2.n; me.F = me1.F + me2.F; me.dP = me1.dP; end function y = logExpectationD(me) dP = me.dP; logdet = dP.logdet_I_plus_nP(me.n); Z = estimateZD(me); y = ( sum(Z.*me.F,1) - logdet )/2; end function Z = estimateZD(me) dP = me.dP; Z = dP.solve_I_plus_nP(me.n,me.F); end function LLR = scoreMatrixD(left,right) dP = left.dP; sleft = logExpectationD(left); sright = logExpectationD(right); m = length(sleft); n = length(sright); LLR = zeros(m,n); for i=1:m ni = left.n(i); Fi = left.F(:,i); nn = ni + right.n; FF = bsxfun(@plus,Fi,right.F); logdet = dP.logdet_I_plus_nP(nn); ZZ = dP.solve_I_plus_nP(nn,FF); LLR(i,:) = ( sum(ZZ.*FF,1) - logdet )/2 - sleft(i) - sright; end end function llr = scoreTrialsD(enroll,test) llr = logExpectationD(poolDME(enroll,test)) - logExpectationD(enroll) - logExpectationD(test); end end function test_this dim = 10; zdim = 2; mu = randn(dim,1); V = randn(dim,zdim); W = randn(dim,dim+1); W = W * W.'; model.mu = mu; model.V = V; model.W = W; model = SPLDA_equip_with_extractor(model); model = SPLDA_equip_with_diagble_extractor(model); model = equip_with_GME_scoring(model,zdim); model = equip_with_diagble_GME_scoring(model); m = 3; %n = m; n = 4; enroll1 = randn(dim,m); enroll2 = randn(dim,m); test = randn(dim,n); E1 = model.extractME(enroll1); E2 = model.extractME(enroll2); E = model.poolME(E1,E2); T = model.extractME(test); %model.scoreTrials(E1,T) %model.logExpectation(E) model.scoreMatrix(E,T) E1 = model.extractDME(enroll1); E2 = model.extractDME(enroll2); E = model.poolDME(E1,E2); T = model.extractDME(test); %model.scoreTrialsD(E1,T) %model.logExpectationD(E) model.scoreMatrixD(E,T) end
github
bsxfan/meta-embeddings-master
create_partition_posterior.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/stochastic_clustering/create_partition_posterior.m
13,237
utf_8
3d17173981da73f237fb493d8df5d67f
function pp = create_partition_posterior(alpha,beta,llhfun,Emb) if nargin==0 test_this(); return; end if isinf(alpha) || (beta==1) || (alpha==0 && beta==0) error('degenerate cases not handled'); end if beta>0 Kfactor = log(beta) - gammaln(1-beta); else Kfactor = log(alpha); end llh_fine = llhfun(Emb); %may be useful for deciding which cluster to split [D,T] = size(Emb); % ignores all terms not dependent on K (i.e. terms dependent just on % alpha,beta and T) function C = CRP_Kterm(K) if beta>0 C = K*Kfactor + gammaln(alpha/beta + K); else C = K*Kfactor; end end %unnormalized partition posterior function logPtilde = getlogPtilde(part) logPtilde = part.llh_K + sum(part.llh_counts) + sum(part.llh_subsets); end pp.create_partition = @create_partition; function part = create_partition(HL) if ~exist('HL','var') || isempty(HL) HL = logical(speye(T)); end part.HL = HL; part.counts = full(sum(HL,2)); part.K = length(part.counts); part.llh_K = CRP_Kterm(part.K); part.llh_counts = gammaln(part.counts-beta); part.PE = Emb*part.HL.'; % pooled embeddings part.llh_subsets = llhfun(part.PE); part.llr = part.llh_subsets - llh_fine*part.HL.'; % LLRs for each cluster: coarse vs fine part.logPtilde = getlogPtilde(part); %po = wrap(part); end function po = wrap(part) po.part = part; po.test_merge = @(i,j) test_merge(part,i,j); po.test_split = @(i,labels) test_split(part,i,labels); po.dumb_merge = @(i,j) dumb_merge(part,i,j); po.smart_merge = @(i,j) smart_merge(part,i,j); po.dumb_split = @(i,labels) dumb_split(part,i,labels); po.smart_split = @(i,labels) smart_split(part,i,labels); end %tentative merge for MH acceptance test function [logPtilde,commit] = test_merge(part,i,j) if i==j logPtilde = part.logPtilde; state = []; else w = true(part.K,1); w([i,j]) = false; state.llh_counts = gammaln(part.counts(i)+part.counts(j)-beta); state.PE = part.PE(:,i) + part.PE(:,j); state.llh_subsets = llhfun(state.PE); state.llh_K = CRP_Kterm(part.K-1); logPtilde = state.llh_K + part.llh_counts.'*w + sum(state.llh_counts) ... + part.llh_subsets*w + sum(state.llh_subsets); state.logPtilde = logPtilde; end commit = @() commit_merge(part,i,j,state); end function [logPtilde,commit] = test_split(part,i,labels) % i: cluster to split % labels: one hot label matrix: % row indices are new cluster indices (starting at 1) % column indices are original recording (embedding) indices if isempty(labels) % no split logPtilde = part.logPtilde; state = []; else % split w = true(part.K,1); w(i) = false; state.counts = full(sum(labels,2)); state.K = part.K + 1; state.llh_K = CRP_Kterm(state.K); state.PE = Emb*labels.'; state.llh_subsets = llhfun(state.PE); state.llh_counts = gammaln(state.counts-beta); logPtilde = state.llh_K + part.llh_counts.'*w + part.llh_subsets*w + ... sum(state.llh_counts) + sum(state.llh_subsets); state.logPtilde = logPtilde; end commit = @() commit_split(part,i,labels,state); end function mpart = commit_merge(part,i,j,state) if i == j %no merge mpart = part; return; end mpart.K = part.K - 1; mpart.llh_K = state.llh_K; k = min(i,j); %put merged cluster here ell = max(i,j); %delete this cluster counts = part.counts; counts(k) = counts(i) + counts(j); counts(ell) = []; mpart.counts = counts; PE = part.PE; PE(:,k) = state.PE; PE(:,ell) = []; mpart.PE = PE; llh_subsets = part.llh_subsets; llh_subsets(k) = state.llh_subsets; llh_subsets(ell) = []; mpart.llh_subsets = llh_subsets; llh_counts = part.llh_counts; llh_counts(k) = state.llh_counts; llh_counts(ell) = []; mpart.llh_counts = llh_counts; HL = part.HL; HL(k,:) = HL(i,:) | HL(j,:); HL(ell,:) = []; mpart.HL = HL; mpart.logPtilde = state.logPtilde; mpart.llr = llh_subsets - llh_fine*HL.'; end function spart = commit_split(part,i,labels,state) if isempty(labels) % no split spart = part; return; end HL = part.HL; HL(i,:) = []; HL = [HL;labels]; spart.HL = HL; PE = part.PE; PE(:,i) = []; PE = [PE,state.PE]; spart.PE = PE; llh_subsets = part.llh_subsets; llh_subsets(i) = []; llh_subsets = [llh_subsets,state.llh_subsets]; spart.llh_subsets = llh_subsets; llh_counts = part.llh_counts; llh_counts(i) = []; llh_counts = [llh_counts;state.llh_counts]; spart.llh_counts = llh_counts; counts = part.counts; counts(i) = []; counts = [counts;state.counts]; spart.counts = counts; spart.llh_K = state.llh_K; spart.K = state.K; spart.logPtilde = state.logPtilde; spart.llr = llh_subsets - llh_fine*HL.'; end function [logQ,i,j] = dumb_merge(part,i,j) K = part.K; if nargin==1 || isempty(i) i = randi(K); j = randi(K); end logQ = -2*log(K); %K^2 equiprobable states end function [logQ,i,j] = smart_merge(part,i,j) K = part.K; sample = nargin==1 || isempty(i); if sample i = randi(K); end logQ = -log(K); scounts = (part.counts(i)-beta) + part.counts; scounts(i) = part.counts(i); log_prior = gammaln(scounts).'; lpi = log_prior(i); log_prior = log_prior + CRP_Kterm(K-1); log_prior(i) = lpi + CRP_Kterm(K); sEi = part.PE(:,i); sE = bsxfun(@plus,part.PE(:,i),part.PE); sE(:,i) = sEi; log_post = log_prior + llhfun(sE); mx = max(log_post); norm = mx + log(sum(exp(log_post-mx))); if sample [~,j] = max(log_post + randgumbel(K,1) ); end logQ = logQ + log_post(j) - norm; end function [logQ,i,labels] = dumb_split(part,i,labels) K = part.K; if nargin==1 || isempty(i) i = randi(K); %K equiprobable choices for i n = part.counts(i); r = part.HL(i,:); kk = randi(2,1,n); labels = sparse(kk,find(r),true); % 1 + S(n,2) equiprobable states, % where S(n,k) = 2^(n-1) - 1 is % the Stirling number of the 2nd % kind: the number of ways to % partition n items into 2 % non-empty subsets. We add 1 % because we allow one subset to % be empty. if min(sum(labels,2))==0 % one cluster is empty labels = []; %signal no split end else n = part.counts(i); end logQ = -log(K) - (n-1)*log(2); % Q = (1/K) * ( S(n,2) + 1 ) end function [logQ,i,labels] = smart_split(part,i,labels) K = part.K; % choose cluster to be split llh = -part.llh_subsets; %alternative is to use llr sample = nargin==1 || isempty(i); if sample [~,i] = max( llh + randgumbel(1,K) ); end mx = max(llh); norm = mx + log(sum(exp(llh-mx))); logQ = llh(i) - norm; %split n = part.counts(i); r = part.HL(i,:); if n==1 %no split if sample labels = [r;zeros(1,n)]; end return; end E = Emb(:,r); %assign first one arbitrarily (no effect on Q) PE = [E(:,1),zeros(D,1)]; counts = [1;0]; nt = 1; %number of tables if sample kk = zeros(1,n); kk(1) = 1; else kk = ones(1,n); kk(labels(2,r)) = 2; end for j=2:n if nt == 1 % table 2 still empty logPrior = log([counts(1)-beta;nt*beta+alpha]); else logPrior = log(counts-beta); end SE = bsxfun(@plus,E(:,j),PE); logPost = logPrior.' + llhfun(SE); mx = max(logPost); norm = mx + log(sum(exp(logPost-mx))); if sample [~,k] = max(logPost + randgumbel(1,2) ); else k = kk(j); end logQ = logQ + logPost(k) - norm; PE(:,k) = SE(:,k); counts(k) = counts(k) + 1; if k==2 nt = 2; end end if sample && nt==2 labels = sparse(kk,find(r),true); elseif sample && nt==1 labels = []; %signal no split end end function part1 = smart_split_dumb_merge(part0) logP0 = part0.logPtilde; if flip_coin() % split %fwd [logQ,i,labels] = smart_split(part0); logQfwd = logQ - log(2); [logP1,commit] = test_split(part0,i,labels); part1 = commit(); %reverse [i,j] = merge_from_split(part,i,labels); logQrev = dumb_merge(part1,i,j) - log(2); else % merge %fwd [logQ,i,j] = dumb_merge(part0); logQfwd = logQ - log(2); [logP1,commit] = test_merge(part0,i,j); part1 = commit(); %reverse [i,labels] = split_from_merge(i,j); logQrev = smart_split(part1,i,labels) - log(2); end MH = exp(logP1 - logP0 + logQrev - logQfwd); if rand > MH part1 = part0; % reject end end function part1 = smart_merge_dumb_split(part0) logP0 = part0.logPtilde; if flip_coin() % split %fwd [logQ,i,labels] = dumb_split(part0); logQfwd = logQ - log(2); [logP1,commit] = test_split(part0,i,labels); part1 = commit(); %reverse [i,j] = merge_from_split(part,i,labels); logQrev = smart_merge(part1,i,j) - log(2); else % merge %fwd [logQ,i,j] = smart_merge(part0); logQfwd = logQ - log(2); [logP1,commit] = test_merge(part0,i,j); part1 = commit(); %reverse [i,labels] = split_from_merge(i,j); logQrev = dumb_split(part1,i,labels) - log(2); end MH = exp(logP1 - logP0 + logQrev - logQfwd); if rand > MH part1 = part0; % reject end end function [i,j] = merge_from_split(part,i,labels) if isempty(labels) % no split j = i; % no merge else K = part.K; i = K; j = K+1; end end function [i,labels] = split_from_merge(part,i,j) if i==j % no split labels = []; else i = min(i,j); labels = part.HL([i,j],:); end end end function heads = flip_coin() heads = rand >= 0.5; end function test_this() dim = 1; N = 10; Emb = randn(dim,N); llhfun = @(Emb) zeros(1,size(Emb,2)); alpha = pi; beta = 1/pi; pp = create_partition_posterior(alpha,beta,llhfun,Emb); part = pp.create_partition(); counts = part.part.counts.' for iter=1:100 if rand>0.5 % split [~,i,labels] = part.dumb_split([],[]); [~,commit] = part.test_split(i,labels); if rand>0.5 part = commit(); end else %merge [~,i,j] = part.dumb_merge([],[]); [~,commit] = part.test_merge(i,j); if rand>0.5 part = commit(); end end counts = part.part.counts.' end end
github
bsxfan/meta-embeddings-master
sample_speaker.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/synthdata/sample_speaker.m
1,520
utf_8
f0f62cb9af06dc368f90cf9c9d6c92d3
function [X,precisions] = sample_speaker(z,F,k,n,chi_sq) % Sample n heavy-tailed observations of speaker with identity variable z. % Inputs: % z: d-by-1 speaker identity variable % F: D-by-d factor loading matrix % k: integer, k>=1, where nu=2k is degrees of freedom of resulting % t-distribution % n: number of samples % chi_sq: [optional] If given and true, then precisions are sampled from % chi^2 with DF: nu = k*2. In this case, k*2 must be an integer, % so for example k=0.5 is valid and gives Cauchy samples. % % Output: % X: D-by-n samples % precisions: 1-by-n, the hidden precisions if nargin==0 test_this(); return; end if ~exist('n','var') || isempty(n) n = size(z,2); end if exist('chi_sq','var') && ~isempty(chi_sq) && chi_sq % sample Chi^2, with DF = nu=2k, scaled by 1/nu, so that mean = 1. nu = 2*k; precisions = mean(randn(nu,n).^2,1); else %Gamma % Sample n precisions independently from Gamma(k,k), which has mean = 1 % mode = (k-1)/k. precisions = -mean(log(rand(k,n)),1); end std = 1./sqrt(precisions); dim = size(F,1); Y = bsxfun(@times,std,randn(dim,n)); X = bsxfun(@plus,F*z,Y); end function test_this() close all; z = 0; F = zeros(100,1); k = 5; [X,precisions] = sample_speaker(z,F,k,1000); figure; plot(X(1,:),X(2,:),'.'); figure; plot(sum(X.^2,1),1./precisions,'.'); end
github
bsxfan/meta-embeddings-master
sample_HTnoise.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/synthdata/sample_HTnoise.m
925
utf_8
f33058cc3a73b7eacb30c87af34db71e
function [X,precisions] = sample_HTnoise(nu,dim,n,W) % Sample n heavy-tailed observations of speaker with identity variable z. % Inputs: % nu: integer nu >=1, degrees of freedom of resulting t-distribution % n: number of samples % % Output: % X: dim-by-n samples % precisions: 1-by-n, the hidden precisions if nargin==0 test_this(); return; end if ~exist('W','var') || isempty(W) cholW = speye(dim); else cholW = chol(W); end precisions = mean(randn(nu,n).^2,1); std = 1./sqrt(precisions); X = cholW\bsxfun(@times,std,randn(dim,n)); end function test_this() close all; dim = 2; nu = 2; W = randn(2,3); W = W*W.'; [X,precisions] = sample_HTnoise(nu,dim,1000,W); figure; plot(X(1,:),X(2,:),'.');axis('equal');axis('square'); %figure; %plot(sum(X.^2,1),1./precisions,'.'); end
github
bsxfan/meta-embeddings-master
qfuser_linear.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/fusion/funcs/qfuser_linear.m
2,337
utf_8
0fe31df563db3c6f4f08ea791e83c340
function [fusion,w0] = qfuser_linear(w,scores,scrQ,ndx,w_init) % This function does the actual quality fusion (and is passed to % the training function when training the quality fusion weights). % The scores from the linear fusion are added to the combined % quality measure for each trial to produce the final score. % Inputs: % w: The trained quality fusion weights. If empty, this function % returns a function handle. % scores: A matrix of scores where the number of rows is the % number of systems to be fused and the number of columns % is the number of scores. % scrQ: An object of type Quality containing the quality measures % for models and segments. % ndx: A Key or Ndx object indicating trials. % w_init: The trained weights from the linear fusion (without % quality measures) training. % Outputs: % fusion: If w is supplied, fusion is a vector of fused scores. % If w is not supplied, fusion is a function handle to a % function that takes w as input and produces a vector of fused % scores as output. This function wraps the scores and quality % measures. % w0: Initial weights for starting the quality fusion training. if nargin==0 test_this(); return end assert(isa(scrQ,'Quality')) assert(isa(ndx,'Ndx')||isa(ndx,'Key')) if ~exist('w_init','var') assert(~isempty(w),'If w=[], then w_init must be supplied.'); w_init = w; end [m,n] = size(scores); wlin_sz = m+1; % linear fuser f1 = linear_fuser([],scores); w1 = w_init(1:wlin_sz); [wlin,wq] = splitvec_fh(wlin_sz); f1 = f1(wlin); [q,n1] = size(scrQ.modelQ); [q2,n2] = size(scrQ.segQ); assert(q==q2); scrQ.modelQ = [scrQ.modelQ;ones(1,n1)]; scrQ.segQ = [scrQ.segQ;ones(1,n2)]; q = q + 1; f2 = AWB_sparse(scrQ,ndx,tril_to_symm_fh(q)); f2 = f2(wq); wq_sz = q*(q+1)/2; w3 = zeros(wq_sz,1); % assemble fusion = sum_of_functions(w,[1,1],f1,f2); w0 = [w1;w3]; end function test_this() k = 2; m = 3; n = 4; q = 2; qual = Quality(); qual.modelQ = randn(q,m); qual.segQ = randn(q,n); ndx = Ndx(); ndx.trialmask = false(m,n); ndx.trialmask(1,1:2) = true; ndx.trialmask(2:3,3:4) = true; scores = randn(k,sum(ndx.trialmask(:))); w_init = [randn(k+1,1)]; % linear init [fusion,w0] = qfuser_linear([],scores,qual,ndx,w_init); test_MV2DF(fusion,w0); [fusion(w0),linear_fuser(w_init,scores)] end
github
bsxfan/meta-embeddings-master
AWB_sparse.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/fusion/funcs/AWB_sparse.m
2,062
utf_8
dcb6e85fdcca1dfb1b5cdee3eb6ab112
function fh = AWB_sparse(qual,ndx,w) % Produces trial quality measures from segment quality measures % using the weighting matrix 'w'. % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. % % Algorithm: Y = A*reshape(w,..)*B % Inputs: % qual: A Quality object containing quality measures in modelQ % and segQ fields. % ndx: A Key or Ndx object indicating trials. % w: The combination weights for making trial quality measures. % Outputs: % fh: If 'w' is given, vector of quality scores --- one for each % trial. If 'w' is empty, a function handle that produces % these scores given a 'w'. if nargin==0 test_this(); return end assert(isa(qual,'Quality')) assert(isa(ndx,'Ndx')||isa(ndx,'Key')) [q,m] = size(qual.modelQ); [q1,n] = size(qual.segQ); assert(q==q1); if isa(ndx,'Ndx') trials = ndx.trialmask; else trials = ndx.tar | ndx.non; end ftrials = find(trials(:)); k = length(ftrials); assert(m==size(trials,1)&n==size(trials,2)); [ii,jj] = ind2sub([m,n],ftrials); function y = map_this(w) WR = reshape(w,q,q)*qual.segQ; y = zeros(1,k); done = 0; for j=1:n right = WR(:,j); col = right.'*qual.modelQ(:,trials(:,j)); len = length(col); y(done+1:done+len) = col; done = done + len; end assert(done==k); end function w = transmap_this(y) Y = sparse(ii,jj,y,m,n); w = qual.modelQ*Y*qual.segQ.'; end map = @(y) map_this(y); transmap = @(y) transmap_this(y); fh = linTrans([],map,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function test_this() m = 3; n = 4; q = 2; qual = Quality(); qual.modelQ = randn(q,m); qual.segQ = randn(q,n); ndx = Ndx(); ndx.trialmask = false(m,n); ndx.trialmask(1,1:2) = true; ndx.trialmask(2:3,3:4) = true; ndx.trialmask f = AWB_sparse(qual,ndx); w = randn(q*q,1); test_MV2DF(f,w); W = reshape(w,q,q) AWB = qual.modelQ'*W*qual.segQ [f(w),AWB(ndx.trialmask(:))] end
github
bsxfan/meta-embeddings-master
dcfplot.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/plotting/dcfplot.m
1,889
utf_8
9fbbba6b08ba70f285386536481e29d5
function dcfplot(devkeyname,evalkeyname,devscrfilename,evalscrfilename,outfilename,plot_title,xmin,xmax,ymin,ymax,prior) % Makes a Norm_DCF plot of the dev and eval scores for a system. % Inputs: % devkeyname: The name of the file containing the Key for % the dev scores. % evalkeyname: The name of the file containing the Key for % the eval scores. % devscrfilename: The name of the file containing the Scores % for the dev trials. % evalscrfilename: The name of the file containing the % Scores the eval trials. % outfilename: The name for the PDF file that the plot will be % written in. % plot_title: A string for the plot title. (optional) % xmin, xmax, ymin, ymax: The boundaries of the plot. (optional) % prior: The effective target prior. (optional) assert(isa(devkeyname,'char')) assert(isa(evalkeyname,'char')) assert(isa(devscrfilename,'char')) assert(isa(evalscrfilename,'char')) assert(isa(outfilename,'char')) if ~exist('plot_title','var') || isempty(plot_title) plot_title = ''; end if ~exist('xmin','var') xmin = -10; xmax = 0; ymin = 0; ymax = 1.2; prior = 0.001; end [dev_tar,dev_non] = get_tar_non_scores(devscrfilename,devkeyname); [eval_tar,eval_non] = get_tar_non_scores(evalscrfilename,evalkeyname); close all plot_obj = Norm_DCF_Plot([xmin,xmax,ymin,ymax],plot_title); plot_obj.set_system(dev_tar,dev_non,'dev') plot_obj.plot_operating_point(logit(prior),'m--','new DCF point') plot_obj.plot_curves([0 0 0 1 1 1 0 0],{{'b--'},{'g--'},{'r--'}}) plot_obj.set_system(eval_tar,eval_non,'eval') plot_obj.plot_curves([0 0 1 1 1 1 0 1],{{'r','LineWidth',2},{'b'},{'g'},{'r'},{'k*'}}) plot_obj.display_legend() plot_obj.save_as_pdf(outfilename) end function [tar,non] = get_tar_non_scores(scrfilename,keyname) key = Key.read(keyname); scr = Scores.read(scrfilename); [tar,non] = scr.get_tar_non(key); end
github
bsxfan/meta-embeddings-master
fast_actDCF.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/stats/fast_actDCF.m
3,032
utf_8
5e62c5e1058f0ba3f5a59149249da2a9
function [dcf,Pmiss,Pfa] = fast_actDCF(tar,non,plo,normalize) % Computes the actual average cost of making Bayes decisions with scores % calibrated to act as log-likelihood-ratios. The average cost (DCF) is % computed for a given range of target priors and for unity cost of error. % If un-normalized, DCF is just the Bayes error-rate. % % Usage examples: dcf = fast_actDCF(tar,non,-10:0.01:0) % norm_dcf = fast_actDCF(tar,non,-10:0.01:0,true) % [dcf,pmiss,pfa] = fast_actDCF(tar,non,-10:0.01:0) % % Inputs: % tar: a vector of T calibrated target scores % non: a vector of N calibrated non-target scores % Both are assumed to be of the form % % log P(data | target) % llr = ----------------------- % log P(data | non-target) % % where log is the natural logarithm. % % plo: an ascending vector of log-prior-odds, plo = logit(Ptar) % = log(Ptar) - log(1-Ptar) % % normalize: (optional, default false) return normalized dcf if true. % % % Outputs: % dcf: a vector of DCF values, one for every value of plo. % % dcf(plo) = Ptar(plo)*Pmiss(plo) + (1-Ptar(plo))*Pfa(plo) % % where Ptar(plo) = sigmoid(plo) = 1./(1+exp(-plo)) and % where Pmiss and Pfa are computed by counting miss and false-alarm % rates, when comparing 'tar' and 'non' scores to the Bayes decision % threshold, which is just -plo. If 'normalize' is true, then dcf is % normalized by dividing by min(Ptar,1-Ptar). % % Pmiss: empirical actual miss rate, one value per element of plo. % Pmiss is not altered by parameter 'normalize'. % % Pfa: empirical actual false-alarm rate, one value per element of plo. % Pfa is not altered by parameter 'normalize'. % % Note, the decision rule applied here is to accept if % % llr >= Bayes threshold. % % or reject otherwise. The >= is a consequence of the stability of the % sort algorithm , where equal values remain in the original order. % % if nargin==0 test_this(); return end assert(isvector(tar)) assert(isvector(non)) assert(isvector(plo)) assert(issorted(plo),'Parameter plo must be in ascending order.'); tar = tar(:)'; non = non(:)'; plo = plo(:)'; if ~exist('normalize','var') || isempty(normalize) normalize = false; end D = length(plo); T = length(tar); N = length(non); [s,ii] = sort([-plo,tar]); % -plo are thresholds r = zeros(1,T+D); r(ii) = 1:T+D; r = r(1:D); % rank of thresholds Pmiss = r-(D:-1:1); [s,ii] = sort([-plo,non]); % -plo are thresholds r = zeros(1,N+D); r(ii) = 1:N+D; r = r(1:D); % rank of thresholds Pfa = N - r + (D:-1:1); Pmiss = Pmiss / T; Pfa = Pfa / N; Ptar = sigmoid(plo); Pnon = sigmoid(-plo); dcf = Ptar.*Pmiss + Pnon.*Pfa; if normalize dcf = dcf ./ min(Ptar,Pnon); end end function test_this() tar = [1 2 5 7]; non = [-7 -5 -2 -1]; plo = -6:6; [dcf,Pmiss,Pfa] = fast_actDCF(tar,non,plo) end
github
bsxfan/meta-embeddings-master
fast_minDCF.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/stats/fast_minDCF.m
2,585
utf_8
6a709a2b121037d7919f57c87d835531
function [minDCF,Pmiss,Pfa,prbep,eer] = fast_minDCF(tar,non,plo,normalize) % Inputs: % % tar: vector of target scores % non: vector of non-target scores % plo: vector of prior-log-odds: plo = logit(Ptar) % = log(Ptar) - log(1-Ptar) % % normalize: if true, return normalized minDCF, else un-normalized. % (optional, default = false) % % Output: % minDCF: a vector with one value for every element of plo % Note that minDCF is parametrized by plo: % % minDCF(Ptar) = min_t Ptar * Pmiss(t) + (1-Ptar) * Pfa(t) % % where t is the adjustable decision threshold and % Ptar = sigmoid(plo) = 1./(1+exp(-plo)) % If normalize == true, then the returned value is % minDCF(Ptar) / min(Ptar,1-Ptar). % % % Pmiss: a vector with one value for every element of plo. % This is Pmiss(tmin), where tmin is the minimizing threshold % for minDCF, at every value of plo. Pmiss is not altered by % parameter 'normalize'. % % Pfa: a vector with one value for every element of plo. % This is Pfa(tmin), where tmin is the minimizing threshold for % minDCF, at every value of plo. Pfa is not altered by % parameter 'normalize'. % % prbep: precision-recall break-even point: Where #FA == #miss % % eer: the equal error rate. % % Note, for the un-normalized case: % minDCF(plo) = sigmoid(plo).*Pfa(plo) + sigmoid(-plo).*Pmiss(plo) if nargin==0 test_this(); return end assert(isvector(tar)) assert(isvector(non)) assert(isvector(plo)) if ~exist('normalize','var') || isempty(normalize) normalize = false; end plo = plo(:); [Pmiss,Pfa] = rocch(tar,non); if nargout > 3 Nmiss = Pmiss * length(tar); Nfa = Pfa * length(non); prbep = rocch2eer(Nmiss,Nfa); end if nargout > 4 eer = rocch2eer(Pmiss,Pfa); end Ptar = sigmoid(plo); Pnon = sigmoid(-plo); cdet = [Ptar,Pnon]*[Pmiss(:)';Pfa(:)']; [minDCF,ii] = min(cdet,[],2); if nargout>1 Pmiss = Pmiss(ii); Pfa = Pfa(ii); end if normalize minDCF = minDCF ./ min(Ptar,Pnon); end end function test_this close all; plo = -20:0.01:20; tar = randn(1,1e4)+4; non = randn(1,1e4); minDCF = fast_minDCF(tar,non,plo,true); %sminDCF = slow_minDCF(tar,non,plo,true); %plot(plo,minDCF,'r',plo,sminDCF,'k'); plot(plo,minDCF,'r'); hold on; tar = randn(1,1e5)+4; non = randn(1,1e5); minDCF = fast_minDCF(tar,non,plo,true); plot(plo,minDCF,'g') tar = randn(1,1e6)+4; non = randn(1,1e6); minDCF = fast_minDCF(tar,non,plo,true); plot(plo,minDCF,'b') hold off; end