plateform
stringclasses
1 value
repo_name
stringlengths
13
113
name
stringlengths
3
74
ext
stringclasses
1 value
path
stringlengths
12
229
size
int64
23
843k
source_encoding
stringclasses
9 values
md5
stringlengths
32
32
text
stringlengths
23
843k
github
bsxfan/meta-embeddings-master
rocch.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/det/rocch.m
2,725
utf_8
68aaac9f8a1f40d0d5eac901abc533d5
function [pmiss,pfa] = rocch(tar_scores,nontar_scores) % ROCCH: ROC Convex Hull. % Usage: [pmiss,pfa] = rocch(tar_scores,nontar_scores) % (This function has the same interface as compute_roc.) % % Note: pmiss and pfa contain the coordinates of the vertices of the % ROC Convex Hull. % % For a demonstration that plots ROCCH against ROC for a few cases, just % type 'rocch' at the MATLAB command line. % % Inputs: % tar_scores: scores for target trials % nontar_scores: scores for non-target trials if nargin==0 test_this(); return end assert(nargin==2) assert(isvector(tar_scores)) assert(isvector(nontar_scores)) Nt = length(tar_scores); Nn = length(nontar_scores); N = Nt+Nn; scores = [tar_scores(:)',nontar_scores(:)']; Pideal = [ones(1,Nt),zeros(1,Nn)]; %ideal, but non-monotonic posterior %It is important here that scores that are the same (i.e. already in order) should NOT be swapped. %MATLAB's sort algorithm has this property. [scores,perturb] = sort(scores); Pideal = Pideal(perturb); [Popt,width] = pavx(Pideal); nbins = length(width); pmiss = zeros(1,nbins+1); pfa = zeros(1,nbins+1); %threshold leftmost: accept eveything, miss nothing left = 0; %0 scores to left of threshold fa = Nn; miss = 0; for i=1:nbins pmiss(i) = miss/Nt; pfa(i) = fa/Nn; left = left + width(i); miss = sum(Pideal(1:left)); fa = N - left - sum(Pideal(left+1:end)); end pmiss(nbins+1) = miss/Nt; pfa(nbins+1) = fa/Nn; end function test_this() figure(); subplot(2,3,1); tar = [1]; non = [0]; [pmiss,pfa] = rocch(tar,non); [pm,pf] = compute_roc(tar,non); plot(pfa,pmiss,'r-^',pf,pm,'g--v'); axis('square');grid;legend('ROCCH','ROC'); title('2 scores: non < tar'); subplot(2,3,2); tar = [0]; non = [1]; [pmiss,pfa] = rocch(tar,non); [pm,pf] = compute_roc(tar,non); plot(pfa,pmiss,'r-^',pf,pm,'g-v'); axis('square');grid; title('2 scores: tar < non'); subplot(2,3,3); tar = [0]; non = [-1,1]; [pmiss,pfa] = rocch(tar,non); [pm,pf] = compute_roc(tar,non); plot(pfa,pmiss,'r-^',pf,pm,'g--v'); axis('square');grid; title('3 scores: non < tar < non'); subplot(2,3,4); tar = [-1,1]; non = [0]; [pmiss,pfa] = rocch(tar,non); [pm,pf] = compute_roc(tar,non); plot(pfa,pmiss,'r-^',pf,pm,'g--v'); axis('square');grid; title('3 scores: tar < non < tar'); xlabel('P_{fa}'); ylabel('P_{miss}'); subplot(2,3,5); tar = randn(1,100)+1; non = randn(1,100); [pmiss,pfa] = rocch(tar,non); [pm,pf] = compute_roc(tar,non); plot(pfa,pmiss,'r-^',pf,pm,'g'); axis('square');grid; title('45^{\circ} DET'); subplot(2,3,6); tar = randn(1,100)*2+1; non = randn(1,100); [pmiss,pfa] = rocch(tar,non); [pm,pf] = compute_roc(tar,non); plot(pfa,pmiss,'r-^',pf,pm,'g'); axis('square');grid; title('flatter DET'); end
github
bsxfan/meta-embeddings-master
compute_roc.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/det/compute_roc.m
1,956
utf_8
16907ef9816ee330ac64b4eeb708366b
function [Pmiss, Pfa] = compute_roc(true_scores, false_scores) % compute_roc computes the (observed) miss/false_alarm probabilities % for a set of detection output scores. % % true_scores (false_scores) are detection output scores for a set of % detection trials, given that the target hypothesis is true (false). % (By convention, the more positive the score, % the more likely is the target hypothesis.) % % this code is matlab-tized for speed. % speedup: Old routine 54 secs -> new routine 5.71 secs. % for 109776 points. %------------------------- %Compute the miss/false_alarm error probabilities assert(nargin==2) assert(isvector(true_scores)) assert(isvector(false_scores)) num_true = length(true_scores); num_false = length(false_scores); assert(num_true>0) assert(num_false>0) total=num_true+num_false; Pmiss = zeros(num_true+num_false+1, 1); %preallocate for speed Pfa = zeros(num_true+num_false+1, 1); %preallocate for speed scores(1:num_false,1) = false_scores; scores(1:num_false,2) = 0; scores(num_false+1:total,1) = true_scores; scores(num_false+1:total,2) = 1; scores=DETsort(scores); sumtrue=cumsum(scores(:,2),1); sumfalse=num_false - ([1:total]'-sumtrue); Pmiss(1) = 0; Pfa(1) = 1.0; Pmiss(2:total+1) = sumtrue ./ num_true; Pfa(2:total+1) = sumfalse ./ num_false; end function [y,ndx] = DETsort(x,col) % DETsort Sort rows, the first in ascending, the remaining in decending % thereby postponing the false alarms on like scores. % based on SORTROWS if nargin<1, error('Not enough input arguments.'); end if ndims(x)>2, error('X must be a 2-D matrix.'); end if nargin<2, col = 1:size(x,2); end if isempty(x), y = x; ndx = []; return, end ndx = (1:size(x,1))'; % sort 2nd column ascending [v,ind] = sort(x(ndx,2)); ndx = ndx(ind); % reverse to decending order ndx(1:size(x,1)) = ndx(size(x,1):-1:1); % now sort first column ascending [v,ind] = sort(x(ndx,1)); ndx = ndx(ind); y = x(ndx,:); end
github
bsxfan/meta-embeddings-master
rocchdet.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/det/rocchdet.m
5,471
utf_8
2452dd1f98aad313c79879d410214cb2
function [x,y,eer,mindcf] = rocchdet(tar,non,dcfweights,pfa_min,pfa_max,pmiss_min,pmiss_max,dps) % ROCCHDET: Computes ROC Convex Hull and then maps that to the DET axes. % % (For demo, type 'rocchdet' on command line.) % % Inputs: % % tar: vector of target scores % non: vector of non-target scores % % dcfweights: 2-vector, such that: DCF = [pmiss,pfa]*dcfweights(:). % (Optional, provide only if mindcf is desired, otherwise % omit or use [].) % % pfa_min,pfa_max,pmiss_min,pmiss_max: limits of DET-curve rectangle. % The DET-curve is infinite, non-trivial limits (away from 0 and 1) % are mandatory. % (Uses min = 0.0005 and max = 0.5 if omitted.) % % dps: number of returned (x,y) dots (arranged in a curve) in DET space, % for every straight line-segment (edge) of the ROC Convex Hull. % (Uses dps = 100 if omitted.) % % Outputs: % % x: probit(Pfa) % y: probit(Pmiss) % eer: ROCCH EER = max_p mindcf(dcfweights=[p,1-p]), which is also % equal to the intersection of the ROCCH with the line pfa = pmiss. % % mindcf: Identical to result using traditional ROC, but % computed by mimimizing over the ROCCH vertices, rather than % over all the ROC points. if nargin==0 test_this(); return end assert(isvector(tar)) assert(isvector(non)) if ~exist('pmiss_max','var') || isempty(pmiss_max) pfa_min = 0.0005; pfa_max = 0.5; pmiss_min = 0.0005; pmiss_max = 0.5; end if ~exist('dps','var') || isempty(dps) dps = 100; end assert(pfa_min>0 && pfa_max<1 && pmiss_min>0 && pmiss_max<1,'limits must be strictly inside (0,1)'); assert(pfa_min<pfa_max && pmiss_min < pmiss_max); [pmiss,pfa] = rocch(tar,non); if nargout>3 dcf = dcfweights(:)'*[pmiss(:)';pfa(:)']; mindcf = min(dcf); end %pfa is decreasing %pmiss is increasing box.left = pfa_min; box.right = pfa_max; box.top = pmiss_max; box.bottom = pmiss_min; x = []; y = []; eer = 0; for i=1:length(pfa)-1 xx = pfa(i:i+1); yy = pmiss(i:i+1); [xdots,ydots,eerseg] = plotseg(xx,yy,box,dps); x = [x,xdots]; y = [y,ydots]; eer = max(eer,eerseg); end end function [x,y,eer] = plotseg(xx,yy,box,dps) %xx and yy should be sorted: assert(xx(2)<=xx(1)&&yy(1)<=yy(2)); XY = [xx(:),yy(:)]; dd = [1,-1]*XY; if min(abs(dd))==0 eer = 0; else %find line coefficieents seg s.t. seg'[xx(i);yy(i)] = 1, %when xx(i),yy(i) is on the line. seg = XY\[1;1]; eer = 1/(sum(seg)); %candidate for EER, eer is highest candidate end %segment completely outside of box if xx(1)<box.left || xx(2)>box.right || yy(2)<box.bottom || yy(1)>box.top x = []; y = []; return end if xx(2)<box.left xx(2) = box.left; yy(2) = (1-seg(1)*box.left)/seg(2); end if xx(1)>box.right xx(1) = box.right; yy(1) = (1-seg(1)*box.right)/seg(2); end if yy(1)<box.bottom yy(1) = box.bottom; xx(1) = (1-seg(2)*box.bottom)/seg(1); end if yy(2)>box.top yy(2) = box.top; xx(2) = (1-seg(2)*box.top)/seg(1); end dx = xx(2)-xx(1); xdots = xx(1)+dx*(0:dps)/dps; ydots = (1-seg(1)*xdots)/seg(2); x = probit(xdots); y = probit(ydots); end function test_this subplot(2,3,1); hold on; make_det_axes(); tar = randn(1,100)+2; non = randn(1,100); [x,y,eer] = rocchdet(tar,non); [pmiss,pfa] = compute_roc(tar,non); plot(x,y,'g',probit(pfa),probit(pmiss),'r'); legend(sprintf('ROCCH-DET (EER = %3.1f%%)',eer*100),'classical DET',... 'Location','SouthWest'); title('EER read off ROCCH-DET'); subplot(2,3,2); show_eer(pmiss,pfa,eer); subplot(2,3,3); [pmiss,pfa] = rocch(tar,non); show_eer(pmiss,pfa,eer); subplot(2,3,4); hold on; make_det_axes(); tar = randn(1,100)*2+3; non = randn(1,100); [x,y,eer] = rocchdet(tar,non); [pmiss,pfa] = compute_roc(tar,non); plot(x,y,'b',probit(pfa),probit(pmiss),'k'); legend(sprintf('ROCCH-DET (EER = %3.1f%%)',eer*100),'classical DET',... 'Location','SouthWest'); title('EER read off ROCCH-DET'); subplot(2,3,5); show_eer(pmiss,pfa,eer); subplot(2,3,6); [pmiss,pfa] = rocch(tar,non); show_eer(pmiss,pfa,eer); end function show_eer(pmiss,pfa,eer) p = 0:0.001:1; x = p; y = zeros(size(p)); for i=1:length(p); %y(i) = mincdet @ ptar = p(i), cmiss = cfa = 1 y(i) = min(p(i)*pmiss+(1-p(i))*pfa); end plot([min(x),max(x)],[eer,eer],x,y); grid; legend('EER','minDCF(P_{tar},C_{miss}=C_{fa}=1)','Location','South'); xlabel('P_{tar}'); title('EER via minDCF on classical DET'); end function make_det_axes() % make_det_axes creates a plot for displaying detection performance % with the axes scaled and labeled so that a normal Gaussian % distribution will plot as a straight line. % % The y axis represents the miss probability. % The x axis represents the false alarm probability. % % Creates a new figure, switches hold on, embellishes and returns handle. pROC_limits = [0.0005 0.5]; pticks = [0.001 0.002 0.005 0.01 0.02 0.05 0.1 0.2 0.3 0.4]; ticklabels = ['0.1';'0.2';'0.5';' 1 ';' 2 ';' 5 ';'10 ';'20 ';'30 ';'40 ']; axis('square'); set (gca, 'xlim', probit(pROC_limits)); set (gca, 'xtick', probit(pticks)); set (gca, 'xticklabel', ticklabels); set (gca, 'xgrid', 'on'); xlabel ('False Alarm probability (in %)'); set (gca, 'ylim', probit(pROC_limits)); set (gca, 'ytick', probit(pticks)); set (gca, 'yticklabel', ticklabels); set (gca, 'ygrid', 'on') ylabel ('Miss probability (in %)') end
github
bsxfan/meta-embeddings-master
map_mod_names.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/manip/map_mod_names.m
3,127
utf_8
6aa97cdf9b5df6095e803bd14f612e52
function ndx = map_mod_names(ndx,src_map,dst_map) % Changes the model names in an index using two maps. The one map % lists the training segment for each model name and the other map % lists the new model name for each training segment. Existing % model names are replaced by new model names that are mapped to % the same training segment. If a model name is not present in the % src_map, it is left unchanged in the output ndx. If a train seg % is not present in the dst_map, the source model is dropped from % the output ndx (along with all its trials). % Inputs: % ndx: the Key or Ndx for which the model names must be changed % scr_map: the map from current model names to trn seg names % dst_map: the map from trn seg names to new model names % Outputs: % ndx: the Key or Ndx with a modified modelset field if nargin == 0 test_this() return end assert(nargin==3) assert(isa(ndx,'Ndx')||isa(ndx,'Key')) assert(isstruct(src_map)) assert(isstruct(dst_map)) assert(isfield(src_map,'keySet')) assert(isfield(dst_map,'keySet')) assert(isfield(src_map,'values')) assert(isfield(dst_map,'values')) [trnsegs,is_present1] = maplookup(src_map,ndx.modelset); num_unchanged = length(is_present1) - sum(is_present1); if num_unchanged ~= 0 log_warning('Keeping %d model name(s) unchanged.\n',num_unchanged); end [newnames,is_present2] = maplookup(dst_map,trnsegs); num_dropped = length(is_present2) - sum(is_present2); if num_dropped ~= 0 log_warning('Discarding %d row(s) in score matrix.\n',num_dropped); end keepndxs = true(length(ndx.modelset),1); keepndxs(is_present1) = is_present2; newmodnames = cell(length(is_present2),1); newmodnames(is_present2) = newnames; ndx.modelset(is_present1) = newmodnames; ndx.modelset = ndx.modelset(keepndxs); if isa(ndx,'Ndx') ndx.trialmask = ndx.trialmask(keepndxs,:); else ndx.tar = ndx.tar(keepndxs,:); ndx.non = ndx.non(keepndxs,:); end function test_this() src_map.keySet = {'mod1','mod2','mod3','mod4','mod8'}; src_map.values = {'seg1','seg2','seg3','seg5','seg8'}; dst_map.keySet = {'seg1','seg2','seg3','seg4','seg5','seg6'}; dst_map.values = {'new1','new2','new3','new4','new5','new6'}; ndx = Ndx(); fprintf('Test1\n'); ndx.modelset = {'mod2','mod3','mod4'}; ndx.trialmask = true(3,4); fprintf('Input:\n'); disp(ndx.modelset) fprintf('Output should be:\n'); out = {'new2','new3','new5'}; disp(out) fprintf('Output is:\n'); newndx = map_mod_names(ndx,src_map,dst_map); disp(newndx.modelset) fprintf('Test2\n'); ndx.modelset = {'mod2','mod3','mod10','mod4','mod6'}; ndx.trialmask = true(5,4); fprintf('Input:\n'); disp(ndx.modelset) fprintf('Output should be:\n'); out = {'new2','new3','mod10','new5','mod6'}; disp(out) fprintf('Output is:\n'); newndx = map_mod_names(ndx,src_map,dst_map); disp(newndx.modelset) fprintf('Test3\n'); ndx.modelset = {'mod2','mod3','mod10','mod4','mod8','mod6'}; ndx.trialmask = true(6,4); fprintf('Input:\n'); disp(ndx.modelset) fprintf('Output should be:\n'); out = {'new2','new3','mod10','new5','mod6'}; disp(out) fprintf('Output is:\n'); newndx = map_mod_names(ndx,src_map,dst_map); disp(newndx.modelset)
github
bsxfan/meta-embeddings-master
maplookup.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/manip/maplookup.m
3,084
utf_8
9e8a55e6a2201b6a0e975469dfe9c299
function [values,is_present] = maplookup(map,keys) % Does a map lookup, to map mutliple keys to multiple values in one call. % The parameter 'map' represents a function, where each key maps to a % unique value. Each value may be mapped to by one or more keys. % % Inputs: % map.keySet: a one-dimensional cell array; % or one-dimensional numeric array; % or a two dimensional char array, where each row is an % element. % The elements should be unique. If there are repeated elements, % the last one of each will be used. % % map.values: The values that each member of keySet maps to, in the same % order. % % keys: The array of keys to look up in the map. The class should agree % with map.keySet. % % Outputs: % values: a one-dimensional cell array; or one dimensional numeric array; % or a two dimensional char array, where rows are string values. % Each value corresponds to one of the elements in keys. % % is_present: logical array of same size as keys, indicating which keys % are in map.keySet. % Optional: if not asked, then maplookup crashes if one or % more keys are not in the map. If is_present is asked, % then maplookup does not crash for missing keys. The keys % that are in the map are: keys(is_present). if nargin==0 test_this(); return; end assert(nargin==2) assert(isstruct(map)) assert(isfield(map,'keySet')) assert(isfield(map,'values')) if ischar(map.keySet) keySetSize = size(map.keySet,1); else keySetSize = length(map.keySet); end if ischar(map.values) valueSize = size(map.values,1); else valueSize = length(map.keySet); end if ~valueSize==keySetSize error('bad map: sizes of keySet and values are different') end if ~strcmp(class(map.keySet),class(keys)) error('class(keys) = ''%s'', should be class(map.keySet) = ''%s''',class(keys),class(map.keySet)); end if ischar(keys) [is_present,at] = ismember(keys,map.keySet,'rows'); else [is_present,at] = ismember(keys,map.keySet); end missing = length(is_present) - sum(is_present); if missing>0 if nargout<2 error('%i of keys not in map',missing); else if ischar(map.values) values = map.values(at(is_present),:); else values = map.values(at(is_present)); end end else if ischar(map.values) values = map.values(at,:); else values = map.values(at); end end end function test_this() map.keySet = ['one ';'two ';'three']; map.values = ['a';'b';'c']; maplookup(map,['one ';'one ';'three']) map.keySet = {'one','two','three'}; map.values = [1,2,3]; maplookup(map,{'one','one','three'}) map.values = {'a','b','c'}; maplookup(map,{'one','one','three'}) map.keySet = [1 2 3]; maplookup(map,[1 1 3]) %maplookup(map,{1 2 3}) [values,is_present] = maplookup(map,[1 1 3 4 5]) fprintf('Now testing error message:\n'); maplookup(map,[1 1 3 4 5]) end
github
bsxfan/meta-embeddings-master
test_binary_classifier.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/test_binary_classifier.m
1,332
utf_8
9683ce2757d7eb67c8a8ec37954cbab4
function obj_val = test_binary_classifier(objective_function,classf, ... prior,system,input_data) % Returns the result of the objective function evaluated on the % scores. % % Inputs: % objective_function: a function handle to the objective function % to feed the scores into % classf: length T vector where T is the number of trials with entries +1 for target scores; -1 % for non-target scores % prior: the prior (given to the system that produced the scores) % system: a function handle to the system to be run % input_data: the data to run the system on (to produce scores) % % Outputs % obj_val: the value returned by the objective function if nargin==0 test_this(); return; end scores = system(input_data); obj_val = evaluate_objective(objective_function,scores,classf,prior); end function test_this() num_trials = 100; input_data = randn(20,num_trials); prior = 0.5; maxiters = 1000; classf = [ones(1,num_trials/2),-ones(1,num_trials/2)]; tar = input_data(:,1:num_trials/2); non = input_data(:,num_trials/2+1:end); [sys,run_sys,w0] = linear_fusion_factory(tar,non); w = train_binary_classifier(@cllr_obj,classf,sys,[],w0,[],maxiters,[],prior,[],true); system = @(data) run_sys(w,data); test_binary_classifier(@cllr_obj,classf,prior,system,input_data) end
github
bsxfan/meta-embeddings-master
evaluate_objective.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/evaluate_objective.m
1,417
utf_8
70262971965caac5629612bd125dd0a2
function obj_val = evaluate_objective(objective_function,scores,classf, ... prior) % Returns the result of the objective function evaluated on the % scores. % % Inputs: % objective_function: a function handle to the objective function % to feed the scores into % scores: length T vector of scores to be evaluated where T is % the number of trials % classf: length T vector with entries +1 for target scores; -1 % for non-target scores % prior: the prior (given to the system that produced the scores) % % Outputs % obj_val: the value returned by the objective function if nargin==0 test_this(); return; end if ~exist('objective_function','var') || isempty(objective_function) objective_function = @(w,T,weights,logit_prior) cllr_obj(w,T,weights,logit_prior); end logit_prior = logit(prior); prior_entropy = objective_function([0;0],[1,-1],[prior,1-prior],logit_prior); ntar = length(find(classf>0)); nnon = length(find(classf<0)); N = nnon+ntar; weights = zeros(1,N); weights(classf>0) = prior/(ntar*prior_entropy); weights(classf<0) = (1-prior)/(nnon*prior_entropy); obj_val = objective_function(scores,classf,weights,logit_prior); end function test_this() num_trials = 20; scores = randn(1,num_trials); classf = [ones(1,num_trials/2),-ones(1,num_trials/2)]; prior = 0.5; res = evaluate_objective(@cllr_obj,scores,classf,prior) end
github
bsxfan/meta-embeddings-master
train_binary_classifier.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/train_binary_classifier.m
3,938
utf_8
de96b98d88aa8e3d0c36785a2f9a3a94
function [w,cxe,w_pen,optimizerState,converged] = ... train_binary_classifier(classifier,classf,w0,objective_function,prior,... penalizer,lambda,maxiters,maxCG,optimizerState,... quiet,cstepHessian) % % Supervised training of a regularized fusion. % % % Inputs: % % classifier: MV2DF function handle that maps parameters to llr-scores. % Note: The training data is already wrapped in this handle. % % classf: 1-by-N row of class labels: % -1 for non_target, % +1 for target, % 0 for ignore % % w0: initial parameters. This is NOT optional. % % objective_function: A function handle to an Mv2DF function that % maps the output (llr-scores) of classifier, to % the to-be-minimized objective (called cxe). % optional, use [] to invoke 'cllr_obj'. % % prior: a prior probability for target to set the 'operating point' % of the objective function. % optional: use [] to invoke default of 0.5 % % penalizer: MV2DF function handle that maps parameters to a positive % regularization penalty. % % lambda: a weighting for the penalizer % % maxiters: the maximum number of Newton Trust Region optimization % iterations to perform. Note, the user can make maxiters % small, examine the solution and then continue training: % -- see w0 and optimizerState. % % % % optimizerState: In this implementation, it is the trust region radius. % optional: % omit or use [] % If not supplied when resuming iteration, % this may cost some extra iterations. % Resume further iteration thus: % [w1,...,optimizerState] = train_binary_classifier(...); % ... examine solution w1 ... % [w2,...,optimizerState] = train_binary_classifier(...,w1,...,optimizerState); % % % quiet: if false, outputs more info during training % % % Outputs: % w: the solution. % cxe: normalized multiclass cross-entropy of the solution. % The range is 0 (good) to 1(useless). % % optimizerState: see above, can be used to resume iteration. % if nargin==0 test_this(); return; end if ~exist('maxCG','var') || isempty(maxCG) maxCG = 100; end if ~exist('optimizerState','var') optimizerState=[]; end if ~exist('prior','var') || isempty(prior) prior = 0.5; end if ~exist('objective_function','var') || isempty(objective_function) objective_function = @(w,T,weights,logit_prior) cllr_obj(w,T,weights,logit_prior); end %prior_entropy = -prior*log(prior)-(1-prior)*log(1-prior); prior_entropy = objective_function([0;0],[1,-1],[prior,1-prior],logit(prior)); classf = classf(:)'; ntar = length(find(classf>0)); nnon = length(find(classf<0)); N = nnon+ntar; weights = zeros(size(classf)); weights(classf>0) = prior/(ntar*prior_entropy); weights(classf<0) = (1-prior)/(nnon*prior_entropy); %weights remain 0, where classf==0 w=[]; if exist('penalizer','var') && ~isempty(penalizer) obj1 = objective_function(classifier,classf,weights,logit(prior)); obj2 = penalizer(w); obj = sum_of_functions(w,[1,lambda],obj1,obj2); else obj = objective_function(classifier,classf,weights,logit(prior)); end w0 = w0(:); if exist('cstepHessian','var') &&~ isempty(cstepHessian) obj = replace_hessian([],obj,cstepHessian); end [w,y,optimizerState,converged] = trustregion_newton_cg(obj,w0,maxiters,maxCG,optimizerState,[],1,quiet); if exist('penalizer','var') && ~isempty(penalizer) w_pen = lambda*obj2(w); else w_pen = 0; end cxe = y-w_pen; if ~quiet fprintf('cxe = %g, pen = %g\n',cxe,w_pen); end function test_this() %invoke test for linear_fuser, which calls train_binary_classifier linear_fuser();
github
bsxfan/meta-embeddings-master
qfuser_v5.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/qfuser_v5.m
921
utf_8
f82cbe0c178dae2a667496466b612770
function [fusion,w0] = qfuser_v5(w,scores,wfuse) if nargin==0 test_this(); return; end % block 1 f1 = linear_fuser([],scores.scores); w1 = wfuse; [whead,wtail] = splitvec_fh(length(w1)); f1 = f1(whead); % block 2 modelQ = scores.modelQ; [q,n1] = size(modelQ); modelQ = [modelQ;ones(1,n1)]; segQ = scores.segQ; [q2,n2] = size(segQ); segQ = [segQ;ones(1,n2)]; assert(q==q2); q = q + 1; wq = q*(q+1)/2; f2 = AWB_fh(modelQ',segQ,tril_to_symm_fh(q,wtail)); w2 = zeros(wq,1); % assemble fusion = sum_of_functions(w,[1,1],f1,f2); w0 = [w1;w2]; end function test_this() m = 5; k = 2; n1 = 4; n2 = 5; scores.sindx = [1,2,3]; scores.qindx = [4,5]; scores.scores = randn(m,n1*n2); scores.modelQ = randn(k,n1); scores.segQ = randn(k,n2); wfuse = [1,2,3,4]'; [fusion,w0] = qfuser_v4([],scores,wfuse); %test_MV2DF(fusion,w0); [fusion(w0),linear_fuser(wfuse,scores.scores(scores.sindx,:))] %fusion(w0) end
github
bsxfan/meta-embeddings-master
qfuser_v2.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/qfuser_v2.m
1,166
utf_8
e10bf159cbd2dacaf85be8d4a90554f6
function [fusion,params] = qfuser_v2(w,scores) % % Inputs: % % scores: the primary detection scores, for training % D-by-T matrix of T scores for D input systems % % quality_input: K-by-T matrix of quality measures % % Output: % fusion: is numeric if w is numeric, or a handle to an MV2DF, representing: % % y= (alpha'*scores+beta) * sigmoid( gamma'*quality_inputs + delta) % if nargin==0 test_this(); return; end % Create building blocks [Cal,params1] = parallel_cal_augm([],scores.scores); m = size(scores.scores,1)+1; [P,params2] = QQtoP(params1.tail,scores.modelQ,scores.segQ,m); %params.get_w0 = @(wfuse) [params1.get_w0() ;params2.get_w0()]; params.get_w0 = @(wfuse) [params1.get_w0(wfuse) ;params2.get_w0()]; params.tail = params2.tail; % Assemble building blocks % modulate linear fusion with quality fusion = sumcolumns_fh(m,dottimes_of_functions(w,P,Cal)); end function test_this() m = 3; k = 2; n1 = 4; n2 = 5; scores.scores = randn(m,n1*n2); scores.modelQ = randn(k,n1); scores.segQ = randn(k,n2); [fusion,params] = qfuser_v2([],scores); w0 = params.get_w0(); test_MV2DF(fusion,w0); fusion(w0) end
github
bsxfan/meta-embeddings-master
linear_fuser.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/linear_fuser.m
2,654
utf_8
627fab3e121d1d87d9fad2a3234d26f8
function [fusion,params] = linear_fuser(w,scores) % % Does affine fusion of scores: It does a weighted sum of scores and adds % an offset. % % Inputs: % scores: M-by-N matrix of N scores for each of M input systems. % w: Optional: % - when supplied, the output 'fusion' is the vector of fused scores. % - when w=[], the output 'fusion' is a function handle, to be used % for training the fuser. % w is a (K+1)-vector, with one weight per system, followed by the % offset. % % fusion: if w is given, fusion is a vector of N fused scores. % if w is not given, fusion is a function handle, so that % fusion(w) = @(w) linear_fusion(scores,w). % w0: default values for w, to initialize training. % % For training use: % [fuser,params] = linear_fuser(train_scores); % w0 = get_w0(); % w = train_binary_classifier(fuser,...,w0,...); % % For test use: % fused_scores = linear_fuser(test_scores,w); % if nargin==0 test_this(); return; end if ~exist('scores','var') || isempty(scores) fusion = sprintf(['linear fuser:',repmat(' %g',1,length(w))],w); return; end wsz = size(scores,1)+1; [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @() zeros(wsz,1); %params.get_w0 = @() randn(wsz,1); params.tail = wtail; fusion = fusion_mv2df(whead,scores); end function test_this() N = 100; dim = 2; % number of used systems % ----------------synthesize training data ------------------- randn('state',0); means = randn(dim,2)*8; %signal [tar,non] = make_data(N,means); % ------------- create system ------------------------------ [fuser,params] = linear_fuser([],[tar,non]); % ------------- train it ------------------------------ ntar = size(tar,2); nnon = size(non,2); classf = [ones(1,ntar),-ones(1,nnon)]; prior = 0.1; maxiters = 50; quiet = true; objfun = []; w0 = params.get_w0(); [w,cxe] = train_binary_classifier(fuser,classf,w0,objfun,prior,[],0,maxiters,[],[],quiet); fprintf('train Cxe = %g\n',cxe); % ------------- test it ------------------------------ [tar,non] = make_data(N,means); scores = [tar,non]; tail = [1;2;3]; wbig = [w;tail]; [fused_scores,params] = linear_fuser(wbig,scores); check_tails = [tail,params.tail], cxe = evaluate_objective(objfun,fused_scores,classf,prior); fprintf('test Cxe = %g\n',cxe); plot(fused_scores); end function [tar,non] = make_data(N,means) [dim,K] = size(means); X = 5*randn(dim,K*N); % noise ii = 1:N; for k=1:K X(:,ii) = bsxfun(@plus,means(:,k),X(:,ii)); ii = ii+N; end N = K*N; tar = X(:,1:N/2); non = X(:,N/2+(1:N/2)); end
github
bsxfan/meta-embeddings-master
qfuser_v3.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/qfuser_v3.m
1,290
utf_8
a2245f6284afa9f203096fc932e8cf07
function [fusion,params] = qfuser_v3(w,scores) % % Inputs: % % scores: the primary detection scores, for training % D-by-T matrix of T scores for D input systems % % quality_input: K-by-T matrix of quality measures % % Output: % fusion: is numeric if w is numeric, or a handle to an MV2DF, representing: % % y= (alpha'*scores+beta) * sigmoid( gamma'*quality_inputs + delta) % if nargin==0 test_this(); return; end % Create building blocks [Cal,params1] = parallel_cal([],scores.scores); m = size(scores.scores,1); [LLH,params2] = QQtoLLH(params1.tail,scores.modelQ,scores.segQ,m); P = LLH; %P = exp_mv2df(logsoftmax_trunc_mv2df(LLH,m)); W = reshape(params2.get_w0(),[],m); W(:) = 0; W(end,:) = 0.5/(m+1); %params.get_w0 = @(wfuse) [params1.get_w0(wfuse) ;params2.get_w0()]; params.get_w0 = @(wfuse) [params1.get_w0(wfuse) ;W(:)]; params.tail = params2.tail; % Assemble building blocks % modulate linear fusion with quality fusion = sumcolumns_fh(m,dottimes_of_functions(w,P,Cal)); end function test_this() m = 3; k = 2; n1 = 4; n2 = 5; scores.scores = randn(m,n1*n2); scores.modelQ = randn(k,n1); scores.segQ = randn(k,n2); [fusion,params] = qfuser_v3([],scores); w0 = params.get_w0([1 2 3 4]'); test_MV2DF(fusion,w0); fusion(w0) end
github
bsxfan/meta-embeddings-master
qfuser_v6.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/qfuser_v6.m
1,013
utf_8
0bcb6e5fbd79494afd1c1c36eff1e95c
function [fusion,w0] = qfuser_v6(w,scores,wfuse) if nargin==0 test_this(); return; end % block 1 f1 = linear_fuser([],scores.scores); w1 = wfuse; [whead,wtail] = splitvec_fh(length(w1)); f1 = f1(whead); % block 2 modelQ = scores.modelQ; [q,n1] = size(modelQ); modelQ = [modelQ;ones(1,n1)]; segQ = scores.segQ; [q2,n2] = size(segQ); segQ = [segQ;ones(1,n2)]; assert(q==q2); q = q + 1; wq = q*(q+1)/2; r = AWB_fh(modelQ',segQ,tril_to_symm_fh(q)); [whead,wtail] = splitvec_fh(wq,wtail); r = r(whead); w2 = zeros(wq,1);w2(end) = -5; % block 3 s = AWB_fh(modelQ',segQ,tril_to_symm_fh(q,wtail)); w3 = w2; % assemble rs = stack([],r,s); fusion = scalibration_fh(stack(w,f1,rs)); w0 = [w1;w2;w3]; end function test_this() m = 3; k = 2; n1 = 4; n2 = 5; scores.scores = randn(m,n1*n2); scores.modelQ = randn(k,n1); scores.segQ = randn(k,n2); wfuse = [1,2,3,4]'; [fusion,w0] = qfuser_v6([],scores,wfuse); test_MV2DF(fusion,w0); [fusion(w0),linear_fuser(wfuse,scores.scores)] %fusion(w0) end
github
bsxfan/meta-embeddings-master
qfuser_v1.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/qfuser_v1.m
1,137
utf_8
8dcda09e63d0f7e6a3f1fc2298b84d7e
function [fusion,params] = qfuser_v1(w,scores) % % Inputs: % % scores: the primary detection scores, for training % D-by-T matrix of T scores for D input systems % % quality_input: K-by-T matrix of quality measures % % Output: % fusion: is numeric if w is numeric, or a handle to an MV2DF, representing: % % y= (alpha'*scores+beta) * sigmoid( gamma'*quality_inputs + delta) % if nargin==0 test_this(); return; end % Create building blocks [linfusion,params1] = linear_fuser([],scores.scores); [Q,params2] = outerprod_of_sigmoids(params1.tail,scores.modelQ,scores.segQ); params.get_w0 = @(ssat) [params1.get_w0(); params2.get_w0(ssat)]; params.tail = params2.tail; % Assemble building blocks % modulate linear fusion with quality fusion = dottimes_of_functions([],Q,linfusion); if ~isempty(w) fusion = fusion(w); end end function test_this() m = 3; k = 2; n1 = 4; n2 = 5; scores.scores = randn(m,n1*n2); scores.modelQ = randn(k,n1); scores.segQ = randn(k,n2); ssat = 0.99; [fusion,params] = qfuser_v1([],scores); w0 = params.get_w0(ssat); test_MV2DF(fusion,w0); fusion(w0) end
github
bsxfan/meta-embeddings-master
qfuser_v7.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/qfuser_v7.m
1,107
utf_8
8d156ad2d97a7aa1b90d702cb2f0a195
function [fusion,w0] = qfuser_v7(w,scores,wfuse) if nargin==0 test_this(); return; end % block 1 f1 = linear_fuser([],scores.scores); w1 = wfuse; [whead,wtail] = splitvec_fh(length(w1)); f1 = f1(whead); % block 2 modelQ = scores.modelQ; [q,n1] = size(modelQ); modelQ = [modelQ;ones(1,n1)]; segQ = scores.segQ; [q2,n2] = size(segQ); segQ = [segQ;ones(1,n2)]; assert(q==q2); q = q + 1; wq = q*(q+1)/2; f2 = AWB_fh(modelQ',segQ,tril_to_symm_fh(q)); w2 = zeros(wq,1); [whead,rs] = splitvec_fh(wq,wtail); f2 = f2(whead); % block 3 n = size(scores.scores,2); map = @(rs) repmat(rs,n,1); transmap =@(RS) sum(reshape(RS,2,[]),2); RS = linTrans(rs,map,transmap); w3 = [-10;-10]; % assemble f12 = sum_of_functions([],[1,1],f1,f2); XRS = stack(w,f12,RS); fusion = scalibration_fh(XRS); w0 = [w1;w2;w3]; end function test_this() m = 3; k = 2; n1 = 4; n2 = 5; scores.scores = randn(m,n1*n2); scores.modelQ = randn(k,n1); scores.segQ = randn(k,n2); wfuse = [1,2,3,4]'; [fusion,w0] = qfuser_v7([],scores,wfuse); test_MV2DF(fusion,w0); [fusion(w0),linear_fuser(wfuse,scores.scores)] end
github
bsxfan/meta-embeddings-master
qfuser_v4.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/qfuser_v4.m
1,388
utf_8
cd65aea99057c92c142fc7e024dc1d53
function [fusion,w0] = qfuser_v4(w,scores,wfuse) % qindx: index set for rows of scores.scores which are per-trial quality % measures. % % sindx: index set for rows of scores.scores which are normal discriminative % scores. if nargin==0 test_this(); return; end sindx = scores.sindx; qindx = scores.qindx; m =length(sindx); % Create building blocks [Cal,w1] = parallel_cal([],scores.scores(sindx,:),wfuse); [whead,wtail] = splitvec_fh(length(w1)); Cal = Cal(whead); [LLH1,w2] = QQtoLLH([],scores.modelQ,scores.segQ,m); [whead,wtail] = splitvec_fh(length(w2),wtail); LLH1 = LLH1(whead); W2 = reshape(w2,[],m); W2(:) = 0; W2(end,:) = 0.5/(m+1); w2 = W2(:); [LLH2,w3] = QtoLLH([],scores.scores(qindx,:),m); LLH2 = LLH2(wtail); LLH = sum_of_functions([],[1,1],LLH1,LLH2); %LLH = LLH1; P = LLH; %P = exp_mv2df(logsoftmax_trunc_mv2df(LLH,m)); w0 = [w1;w2;w3]; % Assemble building blocks % modulate linear fusion with quality fusion = sumcolumns_fh(m,dottimes_of_functions(w,P,Cal)); end function test_this() m = 5; k = 2; n1 = 4; n2 = 5; scores.sindx = [1,2,3]; scores.qindx = [4,5]; scores.scores = randn(m,n1*n2); scores.modelQ = randn(k,n1); scores.segQ = randn(k,n2); wfuse = [1,2,3,4]'; [fusion,w0] = qfuser_v4([],scores,wfuse); %test_MV2DF(fusion,w0); [fusion(w0),linear_fuser(wfuse,scores.scores(scores.sindx,:))] %fusion(w0) end
github
bsxfan/meta-embeddings-master
scal_fuser.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/scalibration/scal_fuser.m
2,918
utf_8
7e49185b74a064be721d9c243a08c07f
function [fusion,params] = scal_fuser(w,scores) % % Does scal calibration % % Inputs: % scores: M-by-N matrix of N scores for each of M input systems. % w: Optional: % - when supplied, the output 'fusion' is the vector of fused scores. % - when w=[], the output 'fusion' is a function handle, to be used % for training the fuser. % w is a (K+1)-vector, with one weight per system, followed by the % offset. % % fusion: if w is given, fusion is a vector of N fused scores. % if w is not given, fusion is a function handle, so that % fusion(w) = @(w) linear_fusion(scores,w). % w0: default values for w, to initialize training. % % For training use: % [fuser,params] = scal_fuser(train_scores); % w0 = params.get_w0(); % w = train_binary_classifier(fuser,...,w0,...); % % For test use: % fused_scores = scal_fuser(test_scores,w); % if nargin==0 test_this(); return; end if ~exist('scores','var') || isempty(scores) fusion = sprintf(['scal fuser:',repmat(' %g',1,length(w))],w); return; end [m,n] = size(scores); wsz = size(scores,1)+1; [wlin,wtail] = splitvec_fh(wsz); [rs,wtail] = splitvec_fh(2,wtail); x = fusion_mv2df(wlin,scores); xrs = stack([],x,rs); fusion = scal_simple_fh(xrs); if ~isempty(w) fusion = fusion(w); wtail = wtail(w); end params.get_w0 = @() [zeros(wsz,1);-10;-10]; params.tail = wtail; end function test_this() N = 10; dim = 2; % number of used systems % ----------------synthesize training data ------------------- randn('state',0); means = randn(dim,2)*8; %signal [tar,non] = make_data(N,means); tar = [tar,[min(non(1,:));min(non(2,:))]]; non = [non,[max(tar(1,:));max(tar(2,:))]]; % ------------- create system ------------------------------ [fuser,params] = scal_fuser([],[tar,non]); w0 = params.get_w0(); test_mv2df(fuser,w0); return; % ------------- train it ------------------------------ ntar = size(tar,2); nnon = size(non,2); classf = [ones(1,ntar),-ones(1,nnon)]; prior = 0.1; maxiters = 50; quiet = false; objfun = []; w0 = params.get_w0(); [w,cxe] = train_binary_classifier(fuser,classf,w0,objfun,prior,[],0,maxiters,[],[],quiet); fprintf('train Cxe = %g\n',cxe); % ------------- test it ------------------------------ [tar,non] = make_data(N,means); ntar = size(tar,2); nnon = size(non,2); classf = [ones(1,ntar),-ones(1,nnon)]; scores = [tar,non]; tail = [1;2;3]; wbig = [w;tail]; [fused_scores,params] = scal_fuser(wbig,scores); check_tails = [tail,params.tail], cxe = evaluate_objective(objfun,fused_scores,classf,prior); fprintf('test Cxe = %g\n',cxe); plot(fused_scores); end function [tar,non] = make_data(N,means) [dim,K] = size(means); X = 5*randn(dim,K*N); % noise ii = 1:N; for k=1:K X(:,ii) = bsxfun(@plus,means(:,k),X(:,ii)); ii = ii+N; end N = K*N; tar = X(:,1:N/2); non = X(:,N/2+(1:N/2)); end
github
bsxfan/meta-embeddings-master
scal_fuser_slow.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/scalibration/scal_fuser_slow.m
2,972
utf_8
abc2a78dc2b6cf08cfdd508f4dabdb71
function [fusion,params] = scal_fuser_slow(w,scores) % % Does scal calibration % % Inputs: % scores: M-by-N matrix of N scores for each of M input systems. % w: Optional: % - when supplied, the output 'fusion' is the vector of fused scores. % - when w=[], the output 'fusion' is a function handle, to be used % for training the fuser. % w is a (K+1)-vector, with one weight per system, followed by the % offset. % % fusion: if w is given, fusion is a vector of N fused scores. % if w is not given, fusion is a function handle, so that % fusion(w) = @(w) linear_fusion(scores,w). % w0: default values for w, to initialize training. % % For training use: % [fuser,params] = scal_fuser(train_scores); % w0 = params.get_w0(); % w = train_binary_classifier(fuser,...,w0,...); % % For test use: % fused_scores = scal_fuser(test_scores,w); % if nargin==0 test_this(); return; end if ~exist('scores','var') || isempty(scores) fusion = sprintf(['scal fuser:',repmat(' %g',1,length(w))],w); return; end [m,n] = size(scores); wsz = size(scores,1)+1; [wlin,wtail] = splitvec_fh(wsz); [rs,wtail] = splitvec_fh(2,wtail); map = @(rs) repmat(rs,n,1); transmap =@(RS) sum(reshape(RS,2,[]),2); RS = linTrans(rs,map,transmap); X = fusion_mv2df(wlin,scores); XRS = stack([],X,RS); fusion = scalibration_fh(XRS); if ~isempty(w) fusion = fusion(w); wtail = wtail(w); end params.get_w0 = @() [zeros(wsz,1);-5;-5]; params.tail = wtail; end function test_this() N = 1000; dim = 2; % number of used systems % ----------------synthesize training data ------------------- randn('state',0); means = randn(dim,2)*8; %signal [tar,non] = make_data(N,means); tar = [tar,[min(non(1,:));min(non(2,:))]]; non = [non,[max(tar(1,:));max(tar(2,:))]]; % ------------- create system ------------------------------ [fuser,params] = scal_fuser([],[tar,non]); % ------------- train it ------------------------------ ntar = size(tar,2); nnon = size(non,2); classf = [ones(1,ntar),-ones(1,nnon)]; prior = 0.1; maxiters = 50; quiet = false; objfun = []; w0 = params.get_w0(); [w,cxe] = train_binary_classifier(fuser,classf,w0,objfun,prior,[],0,maxiters,[],[],quiet); fprintf('train Cxe = %g\n',cxe); % ------------- test it ------------------------------ [tar,non] = make_data(N,means); ntar = size(tar,2); nnon = size(non,2); classf = [ones(1,ntar),-ones(1,nnon)]; scores = [tar,non]; tail = [1;2;3]; wbig = [w;tail]; [fused_scores,params] = scal_fuser(wbig,scores); check_tails = [tail,params.tail], cxe = evaluate_objective(objfun,fused_scores,classf,prior); fprintf('test Cxe = %g\n',cxe); plot(fused_scores); end function [tar,non] = make_data(N,means) [dim,K] = size(means); X = 5*randn(dim,K*N); % noise ii = 1:N; for k=1:K X(:,ii) = bsxfun(@plus,means(:,k),X(:,ii)); ii = ii+N; end N = K*N; tar = X(:,1:N/2); non = X(:,N/2+(1:N/2)); end
github
bsxfan/meta-embeddings-master
logsumexp_special.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/scalibration/logsumexp_special.m
1,102
utf_8
a15ffa60b181fdc8b0a1e3fb4bcfd403
function [y,deriv] = logsumexp_special(w) % This is a MV2DF. See MV2DF_API_DEFINITION.readme. % % If w = [x;r], where r is scalar and x vector, then % y = log(exp(x)+exp(r)) if nargin==0 test_this(); return; end if isempty(w) y = @(w)logsumexp_special(w); return; end if isa(w,'function_handle') outer = logsumexp_special([]); y = compose_mv(outer,w,[]); return; end [r,x] = get_rx(w); rmax = (r>x); rnotmax = ~rmax; y = zeros(size(x)); y(rmax) = log(exp(x(rmax)-r)+1)+r; y(rnotmax) = log(exp(r-x(rnotmax))+1)+x(rnotmax); if nargout>1 deriv = @(Dy) deriv_this(Dy,r,x,y); end end function [r,x] = get_rx(w) w = w(:); r = w(end); x = w(1:end-1); end function [g,hess,linear] = deriv_this(dy,r,x,y) gr = exp(r-y); gx = exp(x-y); g = [gx.*dy(:);gr.'*dy(:)]; linear = false; hess = @(dw) hess_this(dw,dy,gr,gx); end function [h,Jv] = hess_this(dw,dy,gr,gx) [dr,dx] = get_rx(dw); p = gr.*gx.*dy; h = [p.*(dx-dr);dr*sum(p)-dx.'*p]; if nargout>1 Jv = gx.*dx+dr*gr; end end function test_this() f = logsumexp_special([]); test_MV2DF(f,randn(5,1)); end
github
bsxfan/meta-embeddings-master
scalibration_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/scalibration/scalibration_fh.m
1,735
utf_8
b9918a8e2a9fa07dfcef33933013931b
function f = scalibration_fh(w) % This is a factory for a function handle to an MV2DF, which represents % the vectorization of the s-calibration function. The whole mapping works like % this, in MATLAB-style pseudocode: % % If y = f([x;r;s]), where x,r,s are column vectors of size m, then y % is a column vector of size m and % % y = log( exp(x) + exp(r) ) + log( exp(-s) + 1 ) % - log( exp(x) + exp(-s) ) - log( exp(r) + 1 ) % % Viewed as a data-dependent calibration transform from x to y, with % parameters r and s, then: % % r: is the log-odds that x is a typical non-target score, given that % there really is a target. % % s: is the log-odds that x is a typical target score, given that % there really is a non-target. % % Ideally r and s should be large negative, in which case this is almost % an identity transform from x to y, but with saturation at large % positive and negative values. Increasing r increases the lower % saturation level. Increasing s decreases the upper saturation level. if nargin==0 test_this(); return; end x = columnJofN_fh(1,3); r = columnJofN_fh(2,3); s = columnJofN_fh(3,3); neg = @(x)-x; negr = linTrans(r,neg,neg); negs = linTrans(s,neg,neg); num1 = logsumexp_fh(2,2,stack([],x,r)); num2 = neglogsigmoid_fh(s); den1 = neglogsigmoid_fh(negr); den2 = logsumexp_fh(2,2,stack([],x,negs)); f = sum_of_functions([],[1 1],num1,num2); f = sum_of_functions([],[1 -1],f,den1); f = sum_of_functions([],[1 -1],f,den2); if exist('w','var') && ~isempty(w) f = f(w); end end function test_this() n = 3; x = randn(n,1); r = randn(n,1); s = randn(n,1); X = [x;r;s]; f = scalibration_fh([]); test_MV2DF(f,X(:)); end
github
bsxfan/meta-embeddings-master
scalibration_fragile_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/scalibration/scalibration_fragile_fh.m
2,389
utf_8
8eec3ccf6bcd5f130a3d399194acd676
function f = scalibration_fragile_fh(direction,w) % % Don't use this function, it is just for reference. It will break for % large argument values. % % This is a factory for a function handle to an MV2DF, which represents % the vectorization of the logsumexp function. The whole mapping works like % this, in MATLAB-style psuedocode: % % F: R^(m*n) --> R^n, where y = F(x) is computed thus: % % n = length(x)/m % If direction=1, X = reshape(x,m,n), or % if direction=1, X = reshape(x,n,m). % y = log(sum(exp(X),direction)) % % Inputs: % m: the number of inputs to each individual logsumexp calculation. % direction: 1 sums down columns, or 2 sums accross rows. % w: optional, if ssupplied % % Outputs: % f: a function handle to the MV2DF described above. % % see: MV2DF_API_DEFINITION.readme if nargin==0 test_this(); return; end f = vectorized_function([],@(X)F0(X,direction),3,direction); if exist('w','var') && ~isempty(w) f = f(w); end end function [y,f1] = F0(X,dr) if dr==1 x = X(1,:); p = X(2,:); q = X(3,:); else x = X(:,1); p = X(:,2); q = X(:,3); end expx = exp(x); num = (expx-1).*p+1; den = (expx-1).*q+1; y = log(num)-log(den); f1 = @() F1(expx,p,q,num,den,dr); end function [J,f2,linear] = F1(expx,p,q,num,den,dr) linear = false; if dr==1 J = [expx.*(p-q)./(num.*den);(expx-1)./num;-(expx-1)./den]; else J = [expx.*(p-q)./(num.*den),(expx-1)./num,-(expx-1)./den]; end f2 = @(dX) F2(dX,expx,p,q,num,den,dr); end function H = F2(dX,expx,p,q,num,den,dr) d2dx2 = -expx.*(p-q).*(p+q+p.*q.*(expx.^2-1)-1)./(num.^2.*den.^2); d2dxdp = expx./num.^2; d2dxdq = -expx./den.^2; d2dp2 = -(expx-1).^2./num.^2; d2dq2 = (expx-1).^2./den.^2; if dr==1 dx = dX(1,:); dp = dX(2,:); dq = dX(3,:); H = [ dx.*d2dx2+dp.*d2dxdp+dq.*d2dxdq; ... dx.*d2dxdp+dp.*d2dp2; ... dx.*d2dxdq+dq.*d2dq2... ]; else dx = dX(:,1); dp = dX(:,2); dq = dX(:,3); H = [ dx.*d2dx2+dp.*d2dxdp+dq.*d2dxdq, ... dx.*d2dxdp+dp.*d2dp2, ... dx.*d2dxdq+dq.*d2dq2... ]; end end function test_this() n = 10; x = randn(1,n); p = rand(1,n); q = rand(1,n); X = [x;p;q]; fprintf('testing dir==1:\n'); f = scalibration_fragile_fh(1); test_MV2DF(f,X(:)); fprintf('\n\n\ntesting dir==2:\n'); f = scalibration_fragile_fh(2); X = X'; test_MV2DF(f,X(:)); end
github
bsxfan/meta-embeddings-master
scal_simple_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/scalibration/scal_simple_fh.m
1,903
utf_8
b6e3992c13b4424d2129302a3c51424c
function f = scal_simple_fh(w) % This is a factory for a function handle to an MV2DF, which represents % the vectorization of the s-calibration function. The whole mapping works like % this, in MATLAB-style pseudocode: % % If y = f([x;r;s]), where r,s are scalar, x is column vector of size m, % then y is a column vector of size m and % % y_i = log( exp(x_i) + exp(r) ) + log( exp(-s) + 1 ) % - log( exp(x_i) + exp(-s) ) - log( exp(r) + 1 ) % % Viewed as a data-dependent calibration transform from x to y, with % parameters r and s, then: % % r: is the log-odds that x is a typical non-target score, given that % there really is a target. % % s: is the log-odds that x is a typical target score, given that % there really is a non-target. % % Ideally r and s should be large negative, in which case this is almost % an identity transform from x to y, but with saturation at large % positive and negative values. Increasing r increases the lower % saturation level. Increasing s decreases the upper saturation level. if nargin==0 test_this(); return; end [x,rs] = splitvec_fh(-2); [r,s] = splitvec_fh(-1,rs); neg = @(t)-t; negr = linTrans(r,neg,neg); negs = linTrans(s,neg,neg); linmap = linTrans([],@(x)map(x),@(y)transmap(y)); %add last element to others num1 = logsumexp_special(stack([],x,r)); num2 = neglogsigmoid_fh(s); num = linmap(stack([],num1,num2)); den1 = neglogsigmoid_fh(negr); den2 = logsumexp_special(stack([],x,negs)); den = linmap(stack([],den2,den1)); f = sum_of_functions([],[1 -1],num,den); if exist('w','var') && ~isempty(w) f = f(w); end end function y = map(x) y = x(1:end-1)+x(end); end function x = transmap(y) x = [y(:);sum(y)]; end function test_this() n = 3; x = randn(n,1); r = randn(1,1); s = randn(1,1); X = [x;r;s]; f = scal_simple_fh([]); test_MV2DF(f,X(:)); end
github
bsxfan/meta-embeddings-master
quality_fuser_v3.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/aside/quality_fuser_v3.m
1,843
utf_8
1be42594eb854e9b0b4d89daa27c0759
function [fusion,params] = quality_fuser_v3(w,scores,train_vecs,test_vecs,train_ndx,test_ndx,ddim) % % Inputs: % % scores: the primary detection scores, for training % D-by-T matrix of T scores for D input systems % % train_vecs: K1-by-M matrix, one column-vector for each of M training % segemnts % % test_vecs: K2-by-N matrix, one column-vector for each of N training % segemnts % % train_ndx: 1-by-T index where train_ndx(t) is the index into train_vecs % for trial t. % % test_ndx: 1-by-T index where test_ndx(t) is the index into test_vecs % for trial t. % ddim: dimension of subspace for quality distandce calculation, % where ddim <= min(K1,K2) % % Outputs: % if nargin==0 test_this(); return; end % Check data dimensions [K1,M] = size(train_vecs); [K2,N] = size(test_vecs); assert(ddim<min(K1,K2)); [D,T] = size(scores); assert(T == length(train_ndx)); assert(T == length(test_ndx)); assert(max(train_ndx)<=M); assert(max(test_ndx)<=N); % Create building blocks [linfusion,params1] = linear_fuser([],scores); [quality,params2] = sigmoid_log_sumsqdist(params1.tail,train_vecs,test_vecs,train_ndx,test_ndx,ddim); params.get_w0 = @(ssat) [params1.get_w0(); params2.get_w0(ssat)]; params.tail = params2.tail; % Assemble building blocks % modulate linear fusion with quality fusion = dottimes_of_functions([],quality,linfusion); if ~isempty(w) fusion = fusion(w); end end function test_this() D = 2; N = 5; T = 3; Q = 4; ndx = ceil(T.*rand(1,N)); scores = randn(D,N); train = randn(Q,T); test = randn(Q,T); ddim = 2; ssat = 0.99; [fusion,params] = quality_fuser_v3([],scores,train,test,ndx,ndx,ddim); w0 = params.get_w0(ssat); test_MV2DF(fusion,w0); quality_fuser_v3(w0,scores,train,test,ndx,ndx,ddim), end
github
bsxfan/meta-embeddings-master
quality_fuser_v1.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/aside/quality_fuser_v1.m
2,225
utf_8
64802a2f8ee68bcd9f60a31166e64fdb
function [fusion,params] = quality_fuser_v1(w,scores,train_vecs,test_vecs,train_ndx,test_ndx,ddim) % % Inputs: % % scores: the primary detection scores, for training % D-by-T matrix of T scores for D input systems % % train_vecs: K1-by-M matrix, one column-vector for each of M training % segments % % test_vecs: K2-by-N matrix, one column-vector for each of N training % segemnts % % train_ndx: 1-by-T index where train_ndx(t) is the index into train_vecs % for trial t. % % test_ndx: 1-by-T index where test_ndx(t) is the index into test_vecs % for trial t. % ddim: dimension of subspace for quality distandce calculation, % where ddim <= min(K1,K2) if nargin==0 test_this(); return; end % Check data dimensions [K1,M] = size(train_vecs); [K2,N] = size(test_vecs); assert(ddim<min(K1,K2)); [D,T] = size(scores); assert(T == length(train_ndx)); assert(T == length(test_ndx)); assert(max(train_ndx)<=M); assert(max(test_ndx)<=N); % Create building blocks [linfusion,params1] = linear_fuser([],scores); [train_quality,params2] = sigmoid_logdistance(params1.tail,train_vecs,ddim); train_distributor = duplicator_fh(train_ndx,size(train_vecs,2)); train_quality = train_distributor(train_quality); [test_quality,params3] = sigmoid_logdistance(params2.tail,test_vecs,ddim); test_distributor = duplicator_fh(test_ndx,size(test_vecs,2)); test_quality = test_distributor(test_quality); params.get_w0 = @(ssat) [params1.get_w0(); params2.get_w0(ssat); params3.get_w0(ssat)]; params.tail = params3.tail; % Assemble building blocks % combine train and test quality quality = dottimes_of_functions([],train_quality,test_quality); % modulate linear fusion with quality fusion = dottimes_of_functions([],quality,linfusion); if ~isempty(w) fusion = fusion(w); end end function test_this() D = 2; N = 5; T = 3; Q = 4; ndx = ceil(T.*rand(1,N)); scores = randn(D,N); train = randn(Q,T); test = randn(Q,T); ddim = 2; ssat = 0.999; [fusion,params] = quality_fuser_v1([],scores,train,test,ndx,ndx,ddim); w0 = params.get_w0(ssat); test_MV2DF(fusion,w0); quality_fuser_v1(w0,scores,train,test,ndx,ndx,ddim), end
github
bsxfan/meta-embeddings-master
quality_fuser_v2.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/aside/quality_fuser_v2.m
1,827
utf_8
702d7721d2e75e4164bd1fc9b7ba4c57
function [fusion,params] = quality_fuser_v2(w,scores,train_vecs,test_vecs,train_ndx,test_ndx,ddim) % % Inputs: % % scores: the primary detection scores, for training % D-by-T matrix of T scores for D input systems % % train_vecs: K1-by-M matrix, one column-vector for each of M training % segemnts % % test_vecs: K2-by-N matrix, one column-vector for each of N training % segemnts % % train_ndx: 1-by-T index where train_ndx(t) is the index into train_vecs % for trial t. % % test_ndx: 1-by-T index where test_ndx(t) is the index into test_vecs % for trial t. % ddim: dimension of subspace for quality distandce calculation, % where ddim <= min(K1,K2) if nargin==0 test_this(); return; end % Check data dimensions [K1,M] = size(train_vecs); [K2,N] = size(test_vecs); assert(ddim<min(K1,K2)); [D,T] = size(scores); assert(T == length(train_ndx)); assert(T == length(test_ndx)); assert(max(train_ndx)<=M); assert(max(test_ndx)<=N); % Create building blocks [linfusion,params1] = linear_fuser([],scores); [quality,params2] = prod_sigmoid_logdist(params1.tail,train_vecs,test_vecs,train_ndx,test_ndx,ddim); params.get_w0 = @(ssat) [params1.get_w0(); params2.get_w0(ssat)]; params.tail = params2.tail; % Assemble building blocks % modulate linear fusion with quality fusion = dottimes_of_functions([],quality,linfusion); if ~isempty(w) fusion = fusion(w); end end function test_this() D = 2; N = 5; T = 3; Q = 4; ndx = ceil(T.*rand(1,N)); scores = randn(D,N); train = randn(Q,T); test = randn(Q,T); ddim = 2; ssat = 0.99; [fusion,params] = quality_fuser_v2([],scores,train,test,ndx,ndx,ddim); w0 = params.get_w0(ssat); test_MV2DF(fusion,w0); quality_fuser_v2(w0,scores,train,test,ndx,ndx,ddim), end
github
bsxfan/meta-embeddings-master
quality_fuser_v4.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/aside/quality_fuser_v4.m
1,217
utf_8
d5315c3a78ba4e1277f9493c119d0cc8
function [fusion,params] = quality_fuser_v4(w,scores,quality_inputs) % % Inputs: % % scores: the primary detection scores, for training % D-by-T matrix of T scores for D input systems % % quality_input: K-by-T matrix of quality measures % % Output: % fusion: is numeric if w is numeric, or a handle to an MV2DF, representing: % % y= (alpha'*scores+beta) * sigmoid( gamma'*quality_inputs + delta) % if nargin==0 test_this(); return; end % Check data dimensions [D,T] = size(scores); [K,T2] = size(quality_inputs); assert(T==T2); % Create building blocks [linfusion,params1] = linear_fuser([],scores); [quality,params2] = fused_sigmoid(params1.tail,quality_inputs); params.get_w0 = @(ssat) [params1.get_w0(); params2.get_w0(ssat)]; params.tail = params2.tail; % Assemble building blocks % modulate linear fusion with quality fusion = dottimes_of_functions([],quality,linfusion); if ~isempty(w) fusion = fusion(w); end end function test_this() D = 4; T = 5; K = 3; scores = randn(D,T); Q = randn(K,T); ssat = 0.99; [fusion,params] = quality_fuser_v4([],scores,Q); w0 = params.get_w0(ssat); test_MV2DF(fusion,w0); w0(D+1)=1; quality_fuser_v4(w0,scores,Q), end
github
bsxfan/meta-embeddings-master
sigmoid_logdistance.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/sigmoid_logdistance.m
1,561
utf_8
b124d29ef74e835d894f8dd7de72c760
function [sld,params] = sigmoid_logdistance(w,input_data,ddim) % % Algorithm: sld = sigmoid( % log( % sum(bsxfun(@minus,M*input_data,c).^2,1) % )) % % % Inputs: % w: is vec([M,c]), where M is ddim-by-D and c is ddim-by-1 % Use w=[] to let output sld be an MV2DF function handle. % % input_data: D-by-T matrix % % ddim: the first dimension of the W matrix given as the first % parameter to run_sys % % Outputs: % sld: function handle (if w=[]), or numeric % params.get_w0(ssat): returns w0 for optimization initialization, % 0<ssat<1 is required average sigmoid output. % params.tail: is tail of parameter w, which is not consumed by this % function. if nargin==0 test_this(); return; end datadim = size(input_data,1); wsz = ddim*(datadim+1); [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(ssat) init_w0(ssat); params.tail = wtail; dist = square_distance_mv2df(whead,input_data,ddim); sld = one_over_one_plus_w_mv2df(dist); function w0 = init_w0(ssat) W0 = randn(ddim,datadim+1); W0(:,end) = 0; % centroid from which distances are computed d0 = (1-ssat)/ssat; d = square_distance_mv2df(W0(:),input_data,ddim); W0 = sqrt(d0/median(d))*W0; w0 = W0(:); end end function test_this() K = 5; N = 10; data = randn(N,K); ddim = 3; ssat = 0.99; [sys,params] = sigmoid_logdistance([],data,ddim); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); dist = sigmoid_logdistance(w0,data,ddim), end
github
bsxfan/meta-embeddings-master
QtoLLH.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/QtoLLH.m
612
utf_8
e0bc4e7d0bfd4082fc37fb474bc44c8c
function [LLH,w0] = QtoLLH(w,Q,n) % if nargin==0 test_this(); return; end if ~exist('Q','var') || isempty(Q) LLH = sprintf(['QtoLLH:',repmat(' %g',1,length(w))],w); return; end [m,k] = size(Q); wsz = m*n; if nargout>1, w0 = zeros(wsz,1); end LLH = linTrans(w,@(w)map_this(w),@(w)transmap_this(w)); function y = map_this(w) w = reshape(w,n,m); y = w*Q; end function w = transmap_this(y) y = reshape(y,n,k); w = y*Q.'; end end function test_this() Q = randn(2,10); [sys,w0] = QtoLLH([],Q,3); test_MV2DF(sys,w0); end
github
bsxfan/meta-embeddings-master
fused_sigmoid.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/fused_sigmoid.m
1,293
utf_8
1f35e45a3c945008307dd1222a281bb8
function [ps,params] = fused_sigmoid(w,input_data) % % Algorithm: ps = sigmoid( alpha'*input_data +beta) % % % Inputs: % w: is [alpha; beta], where alpha is D-by-1 and beta is scalar. % Use w=[] to let output ps be an MV2DF function handle. % If w is a function handle to an MV2DF then ps is the function handle % to the composition of w and this function. % % input_data: D-by-T matrix % % % Outputs: % ps: function handle (if w=[], or w is handle), or numeric T-by-1 % params.get_w0(ssat): returns w0 for optimization initialization, % 0<ssat<1 is required average sigmoid output. % params.tail: is tail of parameter w, which is not consumed by this % function. if nargin==0 test_this(); return; end [dim,n] = size(input_data); wsz = dim+1; [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(ssat) init_w0(ssat,dim); params.tail = wtail; y = fusion_mv2df(whead,input_data); ps = sigmoid_mv2df(y); function w0 = init_w0(ssat,dim) alpha = zeros(dim,1); beta = logit(ssat); w0 = [alpha;beta]; end end function test_this() K = 5; T = 10; data = randn(K,T); ssat = 0.99; [sys,params] = fused_sigmoid([],data); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); ps = fused_sigmoid(w0,data), end
github
bsxfan/meta-embeddings-master
sigmoid_log_sumsqdist.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/sigmoid_log_sumsqdist.m
1,638
utf_8
0b9f81df4bc93fe52ac7dbaa98594160
function [sig,params] = sigmoid_log_sumsqdist(w,data1,data2,ndx1,ndx2,ddim) % % Similar to prod_sigmoid_logdist, but adds square distances from two sides % before doing sigmoid(log()). % if nargin==0 test_this(); return; end datadim = size(data1,1); assert(datadim==size(data2,1),'data1 and data2 must have same number of rows'); assert(length(ndx1)==length(ndx2)); assert(max(ndx1)<=size(data1,2)); assert(max(ndx2)<=size(data2,2)); wsz = ddim*(datadim+1); [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(ssat) init_w0(ssat); params.tail = wtail; sqd1 = square_distance_mv2df([],data1,ddim); %Don't put whead in here, sqd2 = square_distance_mv2df([],data2,ddim); %or here. Will cause whead to be called twice. % distribute over trials distrib = duplicator_fh(ndx1,size(data1,2)); sqd1 = distrib(sqd1); distrib = duplicator_fh(ndx2,size(data2,2)); sqd2 = distrib(sqd2); sumsq_dist = sum_of_functions([],[1,1],sqd1,sqd2); sig = one_over_one_plus_w_mv2df(sumsq_dist); sig = sig(whead); %Finally plug whead in here. function w0 = init_w0(ssat) W0 = randn(ddim,datadim+1); %subspace projector W0(:,end) = 0; % centroid from which distances are computed d0 = (1-ssat)/ssat; d = sumsq_dist(W0(:)); W0 = sqrt(d0/median(d))*W0; w0 = W0(:); end end function test_this() K = 5; N1 = 10; N2 = 2*N1; data1 = randn(K,N1); data2 = randn(K,N2); ndx1 = [1:N1,1:N1]; ndx2 = 1:N2; ddim = 3; ssat = 0.99; [sys,params] = sigmoid_log_sumsqdist([],data1,data2,ndx1,ndx2,ddim); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); dist = sigmoid_log_sumsqdist(w0,data1,data2,ndx1,ndx2,ddim), end
github
bsxfan/meta-embeddings-master
prmtrzd_sig_log_dist.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/prmtrzd_sig_log_dist.m
1,767
utf_8
edf7387841000e5933bda732bca5b79b
function [ps,params] = prmtrzd_sig_log_dist(w,input_data,ddim) % % Algorithm: ps = sigmoid( % offs+scal*log( % sum(bsxfun(@minus,M*input_data,c).^2,1) % )) % % % Inputs: % w: is [ vec(M); c; scal; offs], where M is ddim-by-D; c is ddim-by-1; % and scal and offs are scalar. % Use w=[] to let output ps be an MV2DF function handle. % If w is a function handle to an MV2DF then ps is the function handle % to the composition of w and this function. % % input_data: D-by-T matrix % % ddim: the first dimension of the M matrix % % Outputs: % ps: function handle (if w=[], or w is handle), or numeric T-by-1 % params.get_w0(ssat): returns w0 for optimization initialization, % 0<ssat<1 is required average sigmoid output. % params.tail: is tail of parameter w, which is not consumed by this % function. if nargin==0 test_this(); return; end [datadim,n] = size(input_data); Mc_sz = ddim*(datadim+1); wsz = Mc_sz + 2; [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(ssat) init_w0(ssat); params.tail = wtail; [M_c,scal_offs] = splitvec_fh(Mc_sz,w); ld = log_distance_mv2df(M_c,input_data,ddim); tld = scale_and_translate(w,ld,scal_offs,1,n); ps = sigmoid_mv2df(tld); function w0 = init_w0(ssat) M = randn(ddim,datadim); c = zeros(ddim,1); scal = 1; offs = 0; w0 = [M(:);c;scal;offs]; x = ld(w0); y = logit(ssat); scal = y/median(x); w0(end-1) = scal; end end function test_this() K = 5; N = 10; data = randn(N,K); ddim = 3; ssat = 0.99; [sys,params] = prmtrzd_sig_log_dist([],data,ddim); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); ps = prmtrzd_sig_log_dist(w0,data,ddim), end
github
bsxfan/meta-embeddings-master
QQtoLLH.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/QQtoLLH.m
623
utf_8
73d37a5922aafaa7dd4dd6e5a2cfbe51
function [LLH,w0] = QQtoLLH(w,qleft,qright,n) % if nargin==0 test_this(); return; end qleft = [qleft;ones(1,size(qleft,2))]; qright = [qright;ones(1,size(qright,2))]; qdim = size(qleft,1); qdim2 = size(qright,1); assert(qdim==qdim2); q2 = qdim*(qdim+1)/2; wsz = n*q2; if nargout>1, w0 = zeros(wsz,1); end lh = cell(1,n); tail = w; for i=1:n [wi,tail] = splitvec_fh(q2,tail); lh{i} = AWB_fh(qleft',qright,tril_to_symm_fh(qdim,wi)); end LLH = interleave(w,lh); end function test_this() qleft = randn(3,3); qright = randn(3,2); [sys,w0] = QQtoLLH([],qleft,qright,2); test_MV2DF(sys,w0); end
github
bsxfan/meta-embeddings-master
QQtoP.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/QQtoP.m
771
utf_8
0a940f8a8a56510a32ad6a45accddc02
function [P,params] = QQtoP(w,qleft,qright,n) % if nargin==0 test_this(); return; end qleft = [qleft;ones(1,size(qleft,2))]; qright = [qright;ones(1,size(qright,2))]; [qdim,nleft] = size(qleft); [qdim2,nright] = size(qright); assert(qdim==qdim2); q2 = qdim*(qdim+1)/2; wsz = n*q2; [whead,wtail] = splitvec_fh(wsz); params.get_w0 = @() zeros(wsz,1); params.tail = wtail; lh = cell(1,n); for i=1:n [wi,whead] = splitvec_fh(q2,whead); lh{i} = AWB_fh(qleft',qright,tril_to_symm_fh(qdim,wi)); end P = exp_mv2df(logsoftmax_mv2df(interleave(w,lh),n)); %P = interleave(w,lh); end function test_this() qleft = randn(3,3); qright = randn(3,2); [sys,params] = QQtoP([],qleft,qright,2); w0 = params.get_w0(); test_MV2DF(sys,w0); P = sys(w0), end
github
bsxfan/meta-embeddings-master
prod_sigmoid_logdist.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/prod_sigmoid_logdist.m
2,506
utf_8
f31a256c5434fca4b6c0641d23a2ebc1
function [sig,params] = prod_sigmoid_logdist(w,data1,data2,ndx1,ndx2,ddim) % % Algorithm: sig = distribute(ndx1,sigmoid( % log( % sum(bsxfun(@minus,M*data_1,c).^2,1) % ))) % * % distribute(ndx2,sigmoid( % log( % sum(bsxfun(@minus,M*data_2,c).^2,1) % ))) % % % Inputs: % w: is vec([M,c]), where M is ddim-by-D and c is ddim-by-1 % Use w=[] to let output sld be an MV2DF function handle. % % data_1: D-by-T1 matrix % data_2: D-by-T2 matrix % ndx1,ndx2: indices of size 1 by T to distribute T1 and T2 segs over T % trials % % ddim: the first dimension of the M matrix % % Outputs: % sig: function handle (if w=[]), or numeric % params.get_w0(ssat): returns w0 for optimization initialization, % 0<ssat<1 is required average sigmoid output. % params.tail: is tail of parameter w, which is not consumed by this % function. if nargin==0 test_this(); return; end datadim = size(data1,1); assert(datadim==size(data2,1),'data1 and data2 must have same number of rows'); assert(length(ndx1)==length(ndx2)); assert(max(ndx1)<=size(data1,2)); assert(max(ndx2)<=size(data2,2)); wsz = ddim*(datadim+1); [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(ssat) init_w0(ssat); params.tail = wtail; sqd1 = square_distance_mv2df([],data1,ddim); %Don't put whead in here, sqd2 = square_distance_mv2df([],data2,ddim); %or here. Will cause whead to be called twice. sig1 = one_over_one_plus_w_mv2df(sqd1); sig2 = one_over_one_plus_w_mv2df(sqd2); % distribute over trials distrib = duplicator_fh(ndx1,size(data1,2)); sig1 = distrib(sig1); distrib = duplicator_fh(ndx2,size(data2,2)); sig2 = distrib(sig2); sigh = dottimes_of_functions([],sig1,sig2); sig = sigh(whead); function w0 = init_w0(ssat) W0 = randn(ddim,datadim+1); %subspace projector W0(:,end) = 0; % centroid from which distances are computed d0 = (1-ssat)/ssat; s = sigh(W0(:)); d = (1-s)./s; W0 = sqrt(d0/median(d))*W0; w0 = W0(:); end end function test_this() K = 5; N1 = 10; N2 = 2*N1; data1 = randn(K,N1); data2 = randn(K,N2); ndx1 = [1:N1,1:N1]; ndx2 = 1:N2; ddim = 3; ssat = 0.01; [sys,params] = prod_sigmoid_logdist([],data1,data2,ndx1,ndx2,ddim); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); sig = prod_sigmoid_logdist(w0,data1,data2,ndx1,ndx2,ddim), end
github
bsxfan/meta-embeddings-master
outerprod_of_sigmoids.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/outerprod_of_sigmoids.m
1,033
utf_8
2577209cada6747a9615d0ce3b375b7f
function [Q,params] = outerprod_of_sigmoids(w,qleft,qright) % if nargin==0 test_this(); return; end [qdim,nleft] = size(qleft); [qdim2,nright] = size(qright); assert(qdim==qdim2); wsz = qdim+1; [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(ssat) init_w0(ssat); params.tail = wtail; % fleft = sigmoid_mv2df(fusion_mv2df([],qleft)); %Don't put whead in here, % fright = sigmoid_mv2df(fusion_mv2df([],qright)); %or here. Will cause whead to be called twice. fleft = fusion_mv2df([],qleft); %Don't put whead in here, fright = fusion_mv2df([],qright); %or here. Will cause whead to be called twice. Q = outerprod_of_functions(whead,fleft,fright,nleft,nright); function w0 = init_w0(ssat) w0 = zeros(wsz,1); offs = logit(ssat); w0(end) = offs; end end function test_this() qleft = randn(3,10); qright = randn(3,20); ssat = 0.99; [sys,params] = outerprod_of_sigmoids([],qleft,qright); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); sig = outerprod_of_sigmoids(w0,qleft,qright), end
github
bsxfan/meta-embeddings-master
parallel_cal.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/parallel_cal.m
983
utf_8
252822b934d5469fc96448f80d2f3e90
function [calscores,w0] = parallel_cal(w,scores,wfuse) % if nargin==0 test_this(); return; end if ~exist('scores','var') || isempty(scores) calscores = sprintf(['parallel calibration:',repmat(' %g',1,length(w))],w); return; end [m,n] = size(scores); if nargout>1, w0 = init_w0(wfuse); end calscores = linTrans(w,@(w)map_this(w),@(w)transmap_this(w)); function w0 = init_w0(wfuse) assert(length(wfuse)-1==m); scal = wfuse(1:end-1); offs = wfuse(end); W = [scal*(m+1);((m+1)/m)*offs*ones(m,1)]; w0 = W(:); end function y = map_this(w) w = reshape(w,m,2); y = bsxfun(@times,scores,w(:,1)); y = bsxfun(@plus,y,w(:,2)); end function w = transmap_this(y) y = reshape(y,m,n); w = [sum(y.*scores,2),sum(y,2)]; end end function test_this() scores = randn(4,10); [sys,w0] = parallel_cal([],scores,(1:5)'); test_MV2DF(sys,w0); end
github
bsxfan/meta-embeddings-master
parallel_cal_augm.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/parallel_cal_augm.m
1,115
utf_8
f5a8bba6d164ab5577c8429ce5835305
function [calscores,params] = parallel_cal_augm(w,scores) % if nargin==0 test_this(); return; end if ~exist('scores','var') || isempty(scores) calscores = sprintf(['parallel calibration:',repmat(' %g',1,length(w))],w); return; end [m,n] = size(scores); scores = [scores;zeros(1,n)]; wsz = 2*m; [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(wfuse) init_w0(wfuse); params.tail = wtail; waugm = augmentmatrix_fh(m,0,whead); calscores = linTrans(waugm,@(w)map_this(w),@(w)transmap_this(w)); function w0 = init_w0(wfuse) scal = wfuse(1:end-1); offs = wfuse(end); W = [scal*(m+1);((m+1)/m)*offs*ones(m,1)]; w0 = W(:); end function y = map_this(w) w = reshape(w,m+1,2); y = bsxfun(@times,scores,w(:,1)); y = bsxfun(@plus,y,w(:,2)); end function w = transmap_this(y) y = reshape(y,m+1,n); w = [sum(y.*scores,2),sum(y,2)]; end end function test_this() scores = randn(4,10); [sys,params] = parallel_cal_augm([],scores); w0 = params.get_w0(); test_MV2DF(sys,w0); end
github
bsxfan/meta-embeddings-master
prod_of_prmtrzd_sigmoids.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/prod_of_prmtrzd_sigmoids.m
1,537
utf_8
6c18de0879f128ada38d483ace60b57f
function [ps,params] = prod_of_prmtrzd_sigmoids(w,input_data) % % Algorithm: ps = prod_i sigmoid( alpha_i*input_data(i,:) + beta_i) % % % Inputs: % w: is vec([alpha; beta]), where alpha and beta are 1-by-D. % Use w=[] to let output ps be an MV2DF function handle. % If w is a function handle to an MV2DF then ps is the function handle % to the composition of w and this function. % % input_data: D-by-T matrix % % % Outputs: % ps: function handle (if w=[], or w is handle), or numeric T-by-1 % params.get_w0(ssat): returns w0 for optimization initialization, % 0<ssat<1 is required average sigmoid output. % params.tail: is tail of parameter w, which is not consumed by this % function. if nargin==0 test_this(); return; end m = size(input_data,1); prms = cell(1,m); [ps,prms{1}] = prmtrzd_sigmoid([],input_data(1,:)); for i=2:m [ps2,prms{i}] = prmtrzd_sigmoid(prms{i-1}.tail,input_data(i,:)); ps = dottimes_of_functions([],ps,ps2); end if ~isempty(w) ps = ps(w); end params.get_w0 = @(ssat) init_w0(ssat,m); params.tail = prms{m}.tail; function w0 = init_w0(ssat,m) ssat = ssat^(1/m); w0 = zeros(m*2,1); at = 1; for j=1:m w0(at:at+1) = prms{j}.get_w0(ssat); at = at + 2; end end end function test_this() K = 3; T = 10; data = randn(K,T); ssat = 0.99; [sys,params] = prod_of_prmtrzd_sigmoids([],data); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); ps = prod_of_prmtrzd_sigmoids(w0,data), end
github
bsxfan/meta-embeddings-master
prmtrzd_sigmoid.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/prmtrzd_sigmoid.m
1,312
utf_8
616d28e7f84f188fcd7759c98a9c3c66
function [ps,params] = prmtrzd_sigmoid(w,input_data) % % Algorithm: ps = sigmoid( w0+w1'*input_data ), where % w = [w1;w0]; w0 is scalar; and w1 is vector % % % Inputs: % w = [w1;w0]; w0 is scalar; and w1 is vector. % Use w=[] to let output ps be an MV2DF function handle. % If w is a function handle to an MV2DF then ps is the function handle % to the composition of w and this function. % % input_data: D-by-T matrix % % % Outputs: % ps: function handle (if w=[], or w is handle), or numeric T-by-1 % params.get_w0(ssat): returns w0 for optimization initialization, % 0<ssat<1 is required average sigmoid output. % params.tail: is tail of parameter w, which is not consumed by this % function. if nargin==0 test_this(); return; end m = size(input_data,1); wsz = m+1; [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(ssat) init_w0(ssat); params.tail = wtail; y = fusion_mv2df(whead,input_data); ps = sigmoid_mv2df(y); function w0 = init_w0(ssat) w0 = zeros(wsz,1); offs = logit(ssat); w0(end) = offs; end end function test_this() D = 3; K = 5; data = randn(D,K); ssat = 0.99; [sys,params] = prmtrzd_sigmoid([],data); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); ps = prmtrzd_sigmoid(w0,data), end
github
bsxfan/meta-embeddings-master
augmentmatrix_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/augmentmatrix_fh.m
826
utf_8
d2182cb06b78c3d43519f09b297ddad2
function fh = augmentmatrix_fh(m,value,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. % % Algorithm: y = [reshape(w,m,n);ones(1,n)](:) if nargin==0 test_this(); return; end function y = map_this(w) n = length(w)/m; y = [reshape(w,m,n);value*ones(1,n)]; end function y = linmap_this(w) n = length(w)/m; y = [reshape(w,m,n);zeros(1,n)]; end function w = transmap_this(y) y = reshape(y,m+1,[]); w = y(1:m,:); end map = @(w) map_this(w); linmap = @(w) linmap_this(w); transmap = @(y) transmap_this(y); fh = affineTrans([],map,linmap,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function test_this() m = 3; f = augmentmatrix_fh(m,1); test_MV2DF(f,randn(m*4,1)); end
github
bsxfan/meta-embeddings-master
bsx_col_plus_row.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/bsx_col_plus_row.m
912
utf_8
59d5f6f7ea9d75509fbc6bf64b63b465
function fh = bsx_col_plus_row(m,n,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. % % Algorithm: col = w(1:m) % row = w(m+1:end) % y = bsxfun(@plus,col(:),row(:)'), % if nargin==0 test_this(); return; end function y = map_this(w) if isempty(m), m = length(w)-n; end col = w(1:m); row = w(m+1:end); y = bsxfun(@plus,col(:),row(:).'); end function w = transmap_this(y,sz) if isempty(m), m = sz-n; end y = reshape(y,m,[]); w=zeros(sz,1); w(1:m) = sum(y,2); w(m+1:end) = sum(y,1).'; end map = @(w) map_this(w); transmap = @(y,sz) transmap_this(y,sz); fh = linTrans_adaptive([],map,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function test_this() m = 2; n = 3; w = randn(m+n,1); f = bsx_col_plus_row(m,n); test_MV2DF(f,w); end
github
bsxfan/meta-embeddings-master
duplicator_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/duplicator_fh.m
805
utf_8
890c37f077bde2305a0f1c545b71c36a
function f = duplicator_fh(duplication_indices,xdim,w) % % This factory creates a function handle to an MV2DF, which represents the % function: % % y = x(duplication_indices) % if nargin==0 test_this(); return; end map = @(x) x(duplication_indices); %xdim = max(duplication_indices); ydim = length(duplication_indices); c = zeros(1,ydim); r = zeros(1,ydim); at = 1; for i=1:xdim ci = find(duplication_indices==i); n = length(ci); r(at:at+n-1) = i; c(at:at+n-1) = ci; at = at + n; end reverse = sparse(r,c,1,xdim,ydim); transmap = @(y) reverse*y; f = @(w) linTrans(w,map,transmap); if exist('w','var') && ~isempty(w) f = f(w); end end function test_this() dup = [ 1 3 1 3]; x = [ 1 2 3 4]; f = duplicator_fh(dup,length(x)); y = f(x), test_MV2DF(f,x); end
github
bsxfan/meta-embeddings-master
splitvec_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/splitvec_fh.m
1,343
utf_8
aff993bc1037dc1d6673762983fd5497
function [head,tail] = splitvec_fh(head_size,w) % % % If head_size <0 then tail_size = - head_size if nargin==0 test_this(); return; end tail_size = - head_size; function w = transmap_head(y,sz) w=zeros(sz,1); w(1:head_size)=y; end function w = transmap_tail(y,sz) w=zeros(sz,1); w(head_size+1:end)=y; end function w = transmap_head2(y,sz) w=zeros(sz,1); w(1:end-tail_size) = y; end function w = transmap_tail2(y,sz) w=zeros(sz,1); w(1+end-tail_size:end) = y; end if head_size>0 map_head = @(w) w(1:head_size); map_tail = @(w) w(head_size+1:end); head = linTrans_adaptive([],map_head,@(y,sz)transmap_head(y,sz)); tail = linTrans_adaptive([],map_tail,@(y,sz)transmap_tail(y,sz)); elseif head_size<0 map_head = @(w) w(1:end-tail_size); map_tail = @(w) w(1+end-tail_size:end); head = linTrans_adaptive([],map_head,@(y,sz)transmap_head2(y,sz)); tail = linTrans_adaptive([],map_tail,@(y,sz)transmap_tail2(y,sz)); else error('head size cannot be 0') end if exist('w','var') && ~isempty(w) head = head(w); tail = tail(w); end end function test_this() [head,tail] = splitvec_fh(2); fprintf('testing head:\n'); test_MV2DF(head,[1 2 3 4 5]); fprintf('\n\n\ntesting tail:\n'); test_MV2DF(tail,[1 2 3 4 5]); end
github
bsxfan/meta-embeddings-master
log_distance_mv2df.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/log_distance_mv2df.m
1,968
utf_8
ab190182251a8ee9a8cce755c6615e99
function [y,deriv] = log_distance_mv2df(w,input_data,new_dim) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % The function projects each column of input_data to a subspace and then % computes log distance from a centroid. The input_data is fixed, but % the projection and centroid parameters are variable. % % W = reshape(w); % y.' = log sum((W(:,1:end-1).'*input_data - W(:,end)).^2,1) % % W is the augmented matrix [M c] where M maps an input vector % to a lower dimensional space and c is the centroid in % the lower dimensional space. % % Parameters: % w: the vectorized version of the W matrix % input_data: is an K-by-T matrix of input vectors of length K, for % each of T trials. % new_dim: the dimension of vectors in the lower dimensional space. % if nargin==0 test_this(); return; end if isempty(w) [dim, num_trials] = size(input_data); map = @(w) map_this(w,input_data,dim,new_dim); transmap = @(w) transmap_this(w,input_data,num_trials,new_dim); delta = linTrans(w,map,transmap); y = logsumsquares_fh(new_dim,1,delta); return; end if isa(w,'function_handle') f = log_distance_mv2df([],input_data,new_dim); y = compose_mv(f,w,[]); return; end f = log_distance_mv2df([],input_data,new_dim); if nargout==1 y = f(w); else [y,deriv] = f(w); end function y = map_this(w,input_data,dim,new_dim) % V = [input_data; ones(1,num_trials)]; W = reshape(w,new_dim,dim+1); y = bsxfun(@minus,W(:,1:end-1)*input_data,W(:,end)); y = y(:); function dx = transmap_this(dy,input_data,num_trials,new_dim) dY = reshape(dy,new_dim,num_trials); % V = [input_data; ones(1,num_trials)]; % Vt = V.'; % dX = dY*Vt; dYt = dY.'; dYtSum = sum(dYt,1); dX = [input_data*dYt;-dYtSum].'; dx = dX(:); function test_this() K = 5; N = 10; P = 3; M = randn(P,N); c = randn(P,1); W = [M c]; w = W(:); input_data = randn(N,K); f = log_distance_mv2df([],input_data,P); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
AWB_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/AWB_fh.m
675
utf_8
3ab5ec4ad82fe2f901f95abf30fb3193
function fh = AWB_fh(A,B,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. % % Algorithm: Y = A*reshape(w,..)*B if nargin==0 test_this(); return; end [m,n] = size(A); [r,s] = size(B); function y = map_this(w) w = reshape(w,n,r); y = A*w*B; end function w = transmap_this(y) y = reshape(y,m,s); w = A.'*y*B.'; end map = @(y) map_this(y); transmap = @(y) transmap_this(y); fh = linTrans([],map,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function test_this() A = randn(2,3); B = randn(4,5); f = AWB_fh(A,B); test_MV2DF(f,randn(3*4,1)); end
github
bsxfan/meta-embeddings-master
xoverxplusalpha.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/xoverxplusalpha.m
792
utf_8
9fbd612d42a50cee70f2b05dce2bf16c
function [y,deriv] = xoverxplusalpha(w,x) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % alpha --> x./(x+alpha) % if nargin==0 test_this(); return; end if isempty(w) y = @(w)xoverxplusalpha(w,x); return; end if isa(w,'function_handle') f = xoverxplusalpha([],x); y = compose_mv(f,w,[]); return; end x = x(:); assert(numel(w)==1); y = x./(x+w); deriv = @(Dy) deriv_this(Dy,x,w); end function [g,hess,linear] = deriv_this(Dy,x,w) g0 = -x./(x+w).^2; g = Dy.'*g0; linear = false; hess = @(Dw) hess_this(Dw,Dy,x,w,g0); end function [h,Jv] = hess_this(Dw,Dy,x,w,g0) h = 2*Dw * Dy.'*(x./(x+w).^3); if nargin>1 Jv = Dw*g0; end end function test_this() x = randn(1,100); w = randn(1); f = xoverxplusalpha([],x); test_MV2DF(f,w); end
github
bsxfan/meta-embeddings-master
tril_to_symm_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/tril_to_symm_fh.m
786
utf_8
9ea53a1f6c15720e67c1c446d7dfad43
function fh = tril_to_symm_fh(m,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. % % Algorithm: w is vector of sizem*(m+1)/2 % w -> m-by-m lower triangular matrix Y % Y -> Y + Y' if nargin==0 test_this(); return; end indx = tril(true(m)); function y = map_this(w) y = zeros(m); y(indx(:)) = w; y = y + y.'; end function w = transmap_this(y) y = reshape(y,m,m); y = y + y.'; w = y(indx(:)); end map = @(w) map_this(w); transmap = @(y) transmap_this(y); fh = linTrans([],map,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function test_this() m=3; n = m*(m+1)/2; f = tril_to_symm_fh(m); test_MV2DF(f,randn(n,1)); end
github
bsxfan/meta-embeddings-master
square_distance_mv2df.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/square_distance_mv2df.m
1,835
utf_8
a5d544c6956f70a3c3afdec634a2c891
function [y,deriv] = square_distance_mv2df(w,input_data,new_dim) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % The function computes the square distance of the vectors for each trial. % y.' = sum((W(:,1:end-1).'*input_data + W(:,end)).^2,1) % % W is the augmented matrix [M c] where M maps a score vector % to a lower dimensional space and c is an offset vector in % the lower dimensional space. % % Parameters: % w: the vectorized version of the W matrix % input_data: is an M-by-T matrix of input vectors of length M, for each of T % trials. % new_dim: the dimension of vectors in the lower dimensional space. % if nargin==0 test_this(); return; end if isempty(w) [dim, num_trials] = size(input_data); map = @(w) map_this(w,input_data,dim,new_dim); transmap = @(w) transmap_this(w,input_data,num_trials,new_dim); delta = linTrans(w,map,transmap); y = sums_of_squares(delta,new_dim); return; end if isa(w,'function_handle') f = square_distance_mv2df([],input_data,new_dim); y = compose_mv(f,w,[]); return; end f = square_distance_mv2df([],input_data,new_dim); if nargout==1 y = f(w); else [y,deriv] = f(w); end function y = map_this(w,input_data,dim,new_dim) % V = [input_data; ones(1,num_trials)]; W = reshape(w,new_dim,dim+1); y = bsxfun(@minus,W(:,1:end-1)*input_data,W(:,end)); y = y(:); function dx = transmap_this(dy,input_data,num_trials,new_dim) dY = reshape(dy,new_dim,num_trials); % V = [input_data; ones(1,num_trials)]; % Vt = V.'; % dX = dY*Vt; dYt = dY.'; dYtSum = sum(dYt,1); dX = [input_data*dYt;-dYtSum].'; dx = dX(:); function test_this() K = 5; N = 10; P = 3; M = randn(P,N); c = randn(P,1); W = [M c]; w = W(:); input_data = randn(N,K); f = square_distance_mv2df([],input_data,P); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
addtotranspose_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/addtotranspose_fh.m
493
utf_8
c009e482a302e2825fb3f59940bcc79e
function fh = addtotranspose_fh(m,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. if nargin==0 test_this(); return; end function y = map_this(w) w = reshape(w,m,m); y = w+w.'; end map = @(y) map_this(y); transmap = @(y) map_this(y); fh = linTrans([],map,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function test_this() m=3; f = addtotranspose_fh(3); test_MV2DF(f,randn(m*m,1)); end
github
bsxfan/meta-embeddings-master
subvec_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/subvec_fh.m
544
utf_8
a8942d310965ca178a123eb3f4a78f21
function fh = subvec_fh(first,len,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. if nargin==0 test_this(); return; end map = @(w) w(first:first+len-1); function w = transmap_this(y,sz) w=zeros(sz,1); w(first:first+len-1)=y; end transmap = @(y,sz) transmap_this(y,sz); fh = linTrans_adaptive([],map,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function test_this() first = 2; len = 3; f = subvec_fh(first,len); test_MV2DF(f,randn(5,1)); end
github
bsxfan/meta-embeddings-master
linTrans_adaptive.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/templates/linTrans_adaptive.m
1,173
utf_8
66276c8cd337da71a4e14efc67112765
function [y,deriv] = linTrans_adaptive(w,map,transmap) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Applies linear transform y = map(w). It needs the transpose of map, % transmap for computing the gradient. map and transmap are function % handles. if nargin==0 test_this(); return; end if isempty(w) y = @(w)linTrans_adaptive(w,map,transmap); return; end if isa(w,'function_handle') outer = linTrans_adaptive([],map,transmap); y = compose_mv(outer,w,[]); return; end y = map(w); y = y(:); deriv = @(g2) deriv_this(g2,map,transmap,numel(w)); end function [g,hess,linear] = deriv_this(g2,map,transmap,wlen) g = transmap(g2,wlen); g = g(:); %linear = false; % use this to test linearity of map, if in doubt linear = true; hess = @(d) hess_this(map,d); end function [h,Jd] = hess_this(map,d) h = []; if nargout>1 Jd = map(d); Jd = Jd(:); end end function test_this() first = 2; len = 3; map = @(w) w(first:first+len-1); function w = transmap_test(y,sz) w=zeros(sz,1); w(first:first+len-1)=y; end transmap = @(y,sz) transmap_test(y,sz); f = linTrans_adaptive([],map,transmap); test_MV2DF(f,randn(5,1)); end
github
bsxfan/meta-embeddings-master
logsumexp_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/logsumexp_fh.m
1,287
utf_8
764511ba624a62ac12e572a26a5e7aa2
function f = logsumexp_fh(m,direction,w) % This is a factory for a function handle to an MV2DF, which represents % the vectorization of the logsumexp function. The whole mapping works like % this, in MATLAB-style psuedocode: % % F: R^(m*n) --> R^n, where y = F(x) is computed thus: % % n = length(x)/m % If direction=1, X = reshape(x,m,n), or % if direction=1, X = reshape(x,n,m). % y = log(sum(exp(X),direction)) % % Inputs: % m: the number of inputs to each individual logsumexp calculation. % direction: 1 sums down columns, or 2 sums accross rows. % w: optional, if ssupplied % % Outputs: % f: a function handle to the MV2DF described above. % % see: MV2DF_API_DEFINITION.readme if nargin==0 test_this(); return; end f = vectorized_function([],@(X)F0(X,direction),m,direction); if exist('w','var') && ~isempty(w) f = f(w); end end function [y,f1] = F0(X,dr) M = max(X,[],dr); y = log(sum(exp(bsxfun(@minus,X,M)),dr))+M; f1 = @() F1(X,y,dr); end function [J,f2,linear] = F1(X,y,dr) linear = false; J = exp(bsxfun(@minus,X,y)); f2 = @(dX) F2(dX,J,dr); end function H = F2(dX,J,dr) H = J.*bsxfun(@minus,dX,sum(dX.*J,dr)); end function test_this() m = 4;n = 10; f = logsumexp_fh(m,1); X = randn(n,m); test_MV2DF(f,X(:)); end
github
bsxfan/meta-embeddings-master
one_over_one_plus_w_mv2df.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/one_over_one_plus_w_mv2df.m
717
utf_8
d735233c52193c323d03cdb85d0948f5
function [y,deriv] = one_over_one_plus_w_mv2df(w) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % y = 1 ./ (1 + w) if nargin==0 test_this(); return; end if isempty(w) y = @(w)one_over_one_plus_w_mv2df(w); return; end if isa(w,'function_handle') outer = one_over_one_plus_w_mv2df([]); y = compose_mv(outer,w,[]); return; end w = w(:); y = 1 ./ (1 + w); deriv = @(dy) deriv_this(dy,y); function [g,hess,linear] = deriv_this(dy,y) linear = false; g = -dy.*(y.^2); hess = @(d) hess_this(d,dy,y); function [h,Jv] = hess_this(d,dy,y) h = 2*dy.*d.*(y.^3); if nargout>1 Jv = -d.*(y.^2); end function test_this() f = one_over_one_plus_w_mv2df([]); test_MV2DF(f,randn(3,1));
github
bsxfan/meta-embeddings-master
sigmoid_mv2df.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/sigmoid_mv2df.m
758
utf_8
e0591c88d68032fcf2a300fe7f2e8df0
function [y,deriv] = sigmoid_mv2df(w) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % y = sigmoid(w) = 1./(1+exp(-w)), vectorized as MATLAB usually does. if nargin==0 test_this(); return; end if isempty(w) y = @(w)sigmoid_mv2df(w); return; end if isa(w,'function_handle') outer = sigmoid_mv2df([]); y = compose_mv(outer,w,[]); return; end w = w(:); y = sigmoid(w); y1 = sigmoid(-w); deriv = @(dy) deriv_this(dy,y,y1); function [g,hess,linear] = deriv_this(dy,y,y1) linear = false; g = dy.*y.*y1; hess = @(d) hess_this(d,dy,y,y1); function [h,Jv] = hess_this(d,dy,y,y1) h = dy.*d.*(y.*y1.^2 - y.^2.*y1); if nargout>1 Jv = d.*y.*y1; end function test_this() f = sigmoid_mv2df([]); test_MV2DF(f,randn(3,1));
github
bsxfan/meta-embeddings-master
neglogsigmoid_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/neglogsigmoid_fh.m
1,075
utf_8
dc180d133fc039197aa99a5e4186c6a7
function f = neglogsigmoid_fh(w) % This is a factory for a function handle to an MV2DF, which represents % the vectorization of the logsigmoid function. The mapping is, in % MATLAB-style code: % % y = log(sigmoid(w)) = log(1./1+exp(-w)) = -log(1+exp(-w)) % % Inputs: % m: the number of inputs to each individual logsumexp calculation. % direction: 1 sums down columns, or 2 sums accross rows. % w: optional, if ssupplied % % Outputs: % f: a function handle to the MV2DF described above. % % see: MV2DF_API_DEFINITION.readme if nargin==0 test_this(); return; end f = vectorized_function([],@(x)F0(x)); if exist('w','var') && ~isempty(w) f = f(w); end end function [y,f1] = F0(x) logp1 = -neglogsigmoid(x); logp2 = -neglogsigmoid(-x); y = -logp1; f1 = @() F1(logp1,logp2); end function [J,f2,linear] = F1(logp1,logp2) linear = false; J = -exp(logp2); f2 = @(dx) F2(dx,logp1,logp2); end function h = F2(dx,logp1,logp2) h = dx.*exp(logp1+logp2); end function test_this() n = 10; f = neglogsigmoid_fh([]); x = randn(n,1); test_MV2DF(f,x); end
github
bsxfan/meta-embeddings-master
logsumsquares_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/logsumsquares_fh.m
1,275
utf_8
c1e543f6680e7257b1f55ff61d967598
function f = logsumsquares_fh(m,direction,w) % This is a factory for a function handle to an MV2DF, which represents % the vectorization of the logsumsquares function. The whole mapping works like % this, in MATLAB-style psuedocode: % % F: R^(m*n) --> R^n, where y = F(x) is computed thus: % % n = length(x)/m % If direction=1, X = reshape(x,m,n), or % if direction=1, X = reshape(x,n,m). % y = log(sum(X.^2,direction)) % % Inputs: % m: the number of inputs to each individual logsumexp calculation. % direction: 1 sums down columns, or 2 sums accross rows. % % % Outputs: % f: a function handle to the MV2DF described above. % % see: MV2DF_API_DEFINITION.readme if nargin==0 test_this(); return; end f = vectorized_function([],@(X)F0(X,direction),m,direction); if exist('w','var') && ~isempty(w) f = f(w); end end function [y,f1] = F0(X,dr) ssq = sum(X.^2,dr); y = log(ssq); f1 = @() F1(X,ssq,dr); end function [J,f2,linear] = F1(X,s,dr) linear = false; J = bsxfun(@times,X,2./s); f2 = @(dX) F2(dX,X,s,dr); end function H = F2(dX,X,s,dr) H = bsxfun(@times,dX,2./s) - bsxfun(@times,X,4*sum(X.*dX,dr)./(s.^2)); end function test_this() m = 4;n = 10; f = logsumsquares_fh(m,1); X = randn(n,m); test_MV2DF(f,X(:)); end
github
bsxfan/meta-embeddings-master
expneg_mv2df.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/expneg_mv2df.m
675
utf_8
f12485f16e7f66d9deb530df461bdcdc
function [y,deriv] = expneg_mv2df(w) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % y = exp(-w), vectorized as MATLAB usually does. if nargin==0 test_this(); return; end if isempty(w) y = @(w)expneg_mv2df(w); return; end if isa(w,'function_handle') outer = expneg_mv2df([]); y = compose_mv(outer,w,[]); return; end w = w(:); y = exp(-w); deriv = @(dy) deriv_this(dy,y); function [g,hess,linear] = deriv_this(dy,y) linear = false; g = -dy.*y; hess = @(d) hess_this(d,dy,y); function [h,Jv] = hess_this(d,dy,y) h = dy.*y.*d; if nargout>1 Jv = -d.*y; end function test_this() f = expneg_mv2df([]); test_MV2DF(f,randn(3,1));
github
bsxfan/meta-embeddings-master
square_mv2df.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/square_mv2df.m
634
utf_8
f7604570a85ea6be67d98ae414127642
function [y,deriv] = square_mv2df(w) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % y = w.^2 if nargin==0 test_this(); return; end if isempty(w) y = @(w)square_mv2df(w); return; end if isa(w,'function_handle') outer = square_mv2df([]); y = compose_mv(outer,w,[]); return; end w = w(:); y = w.^2; deriv = @(dy) deriv_this(dy,w); function [g,hess,linear] = deriv_this(dy,w) linear = false; g = 2*dy.*w; hess = @(d) hess_this(d,dy,w); function [h,Jv] = hess_this(d,dy,w) h = 2*dy.*d; if nargout>1 Jv = 2*w.*d; end function test_this() f = square_mv2df([]); test_MV2DF(f,randn(3,1));
github
bsxfan/meta-embeddings-master
logsigmoid_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/logsigmoid_fh.m
1,068
utf_8
65bf6e2f03af50449d9492d02f7e3c98
function f = logsigmoid_fh(w) % This is a factory for a function handle to an MV2DF, which represents % the vectorization of the logsigmoid function. The mapping is, in % MATLAB-style code: % % y = log(sigmoid(w)) = log(1./1+exp(-w)) = -log(1+exp(-w)) % % Inputs: % m: the number of inputs to each individual logsumexp calculation. % direction: 1 sums down columns, or 2 sums accross rows. % w: optional, if ssupplied % % Outputs: % f: a function handle to the MV2DF described above. % % see: MV2DF_API_DEFINITION.readme if nargin==0 test_this(); return; end f = vectorized_function([],@(x)F0(x)); if exist('w','var') && ~isempty(w) f = f(w); end end function [y,f1] = F0(x) logp1 = -neglogsigmoid(x); logp2 = -neglogsigmoid(-x); y = logp1; f1 = @() F1(logp1,logp2); end function [J,f2,linear] = F1(logp1,logp2) linear = false; J = exp(logp2); f2 = @(dx) F2(dx,logp1,logp2); end function h = F2(dx,logp1,logp2) h = -dx.*exp(logp1+logp2); end function test_this() n = 10; f = logsigmoid_fh([]); x = randn(n,1); test_MV2DF(f,x); end
github
bsxfan/meta-embeddings-master
exp_mv2df.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/exp_mv2df.m
659
utf_8
410b48565ed23cbda996866e44dfb2fa
function [y,deriv] = exp_mv2df(w) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % y = exp(w), vectorized as MATLAB usually does. if nargin==0 test_this(); return; end if isempty(w) y = @(w)exp_mv2df(w); return; end if isa(w,'function_handle') outer = exp_mv2df([]); y = compose_mv(outer,w,[]); return; end w = w(:); y = exp(w); deriv = @(dy) deriv_this(dy,y); function [g,hess,linear] = deriv_this(dy,y) linear = false; g = dy.*y; hess = @(d) hess_this(d,dy,y); function [h,Jv] = hess_this(d,dy,y) h = dy.*y.*d; if nargout>1 Jv = d.*y; end function test_this() f = exp_mv2df([]); test_MV2DF(f,randn(3,1));
github
bsxfan/meta-embeddings-master
vectorized_function.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/templates/vectorized_function.m
4,600
utf_8
9c5431b821aa6587c3849945d31dd1fd
function [y,deriv] = vectorized_function(w,f,m,direction) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % This template vectorizes the given function F: R^m -> R as follows: % k = length(w)/m; % If direction=1, X = reshape(w,m,k), y(j) = F(X(:,j)), or % if direction=2, X = reshape(w,k,m), y(i) = F(X(i,:)), % so that length(y) = k. % % Input parameters: % w: As with every MV2DF, w can be [], a vector, or a function handle to % another MV2DF. % f: is a function handle to an m-file that represents the function % F: R^m -> R, as well as its first and second derivatives. % % m: The input dimension to F. % (optional, default m = 1) % % direction: is used as explained above to determine whether columns, % or rows of X are processed by F. % (optional, default direction = 2) % % Function f works as follows: % (Note that f, f1 and f2 have to know the required direction, it is % not passed to them.) % [y,f1] = f(X), where X and y are as defined above. % % Function f1 works as follows: % [J,f2] = f1(), where size(J) = size(X). % Column/row i of J is the gradient of y(i) w.r.t. % column/row i of W. % f2 is a function handle to 2nd order derivatives. % If 2nd order derivatives are 0, then f2 should be []. % % Function f2 works as follows: % H = f2(dX), where size(dX) = size(X). % If direction=1, H(:,j) = H_i * dX(:,j), or % if direction=2, H(i,:) = dX(i,:)* H_i, where % H_i is Hessian of y(i), w.r.t. colum/row i of X. % % if nargin==0 test_this(); return; end if ~exist('m','var') m = 1; direction = 2; end if isempty(w) y = @(w)vectorized_function(w,f,m,direction); return; end if isa(w,'function_handle') outer = vectorized_function([],f,m,direction); y = compose_mv(outer,w,[]); return; end if direction==1 W = reshape(w,m,[]); elseif direction==2 W = reshape(w,[],m); else error('illegal direction %i',direction); end if nargout==1 y = f(W); else [y,f1] = f(W); deriv = @(dy) deriv_this(dy,f1,direction); end y = y(:); end function [g,hess,linear] = deriv_this(dy,f1,direction) if direction==1 dy = dy(:).'; else dy = dy(:); end if nargout==1 J = f1(); g = reshape(bsxfun(@times,J,dy),[],1); else [J,f2] = f1(); linear = isempty(f2); g = reshape(bsxfun(@times,J,dy),[],1); hess = @(d) hess_this(d,f2,J,dy,direction); end end function [h,Jv] = hess_this(dx,f2,J,dy,direction) dX = reshape(dx,size(J)); if isempty(f2) h = []; else h = reshape(bsxfun(@times,dy,f2(dX)),[],1); end if nargout>1 Jv = sum(dX.*J,direction); Jv = Jv(:); end end %%%%%%%%%%%%%%%%%%%% Example function: z = x^2 + y^3 %%%%%%%%%%%%%%%%%%%% % example function: z = x^2 + y^3 function [z,f1] = x2y3(X,direction) if direction==1 x = X(1,:); y = X(2,:); else x = X(:,1); y = X(:,2); end z = x.^2+y.^3; f1 = @() f1_x2y3(x,y,direction); end % example function 1st derivative: z = x^2 + y^2 function [J,f2] = f1_x2y3(x,y,direction) if direction==1 J = [2*x;3*y.^2]; else J = [2*x,3*y.^2]; end f2 = @(dxy) f2_x2y3(dxy,y,direction); end % example function 2nd derivative: z = x^2 + y^2 function H = f2_x2y3(dxy,y,direction) if direction==1 H = dxy.*[2*ones(size(y));6*y]; else H = dxy.*[2*ones(size(y)),6*y]; end end %%%%%%%%%%%%%%%%%%%% Example function: z = x*y^2 %%%%%%%%%%%%%%%%%%%% % example function: z = x*y^2 function [z,f1] = xy2(X,direction) if direction==1 x = X(1,:); y = X(2,:); else x = X(:,1); y = X(:,2); end y2 = y.^2; z = x.*+y2; f1 = @() f1_xy2(x,y,y2,direction); end % example function 1st derivative: z = x*y^2 function [J,f2] = f1_xy2(x,y,y2,direction) if direction==1 J = [y2;2*x.*y]; else J = [y2,2*x.*y]; end f2 = @(dxy) f2_xy2(dxy,x,y,direction); end % example function 2nd derivative: z = x*y^2 function H = f2_xy2(dxy,x,y,direction) if direction==1 dx = dxy(1,:); dy = dxy(2,:); H = [2*y.*dy;2*y.*dx+2*x.*dy]; else dx = dxy(:,1); dy = dxy(:,2); H = [2*y.*dy,2*y.*dx+2*x.*dy]; end end function test_this() k = 5; m = 2; dr = 1; fprintf('Testing x^2+y^2 in direction %i:\n\n',dr); f = vectorized_function([],@(X)x2y3(X,dr),2,dr); test_MV2DF(f,randn(k*m,1)); dr = 2; fprintf('\n\n\n\nTesting x*y^2 in direction %i:\n\n',dr); f = vectorized_function([],@(X)xy2(X,dr),2,dr); test_MV2DF(f,randn(k*m,1)); end
github
bsxfan/meta-embeddings-master
logdet_chol.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/logdet_chol.m
1,185
utf_8
706e5c1e5b5b660da50408bd221522a0
function [y,deriv] = logdet_chol(w) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % y = log(det(W)), where W is positive definite and W = reshape(w,...) if nargin==0 test_this(); return; end if isempty(w) y = @(w)logdet_chol(w); return; end if isa(w,'function_handle') outer = logdet_chol([]); y = compose_mv(outer,w,[]); return; end dim = sqrt(length(w)); W = reshape(w,dim,dim); if nargout>1 %[inv_map,bi_inv_map,logdet,iW] = invchol2(W); [inv_map,bi_inv_map,logdet,iW] = invchol_or_lu(W); y = logdet; deriv = @(dy) deriv_this(dy,bi_inv_map,iW); else %[inv_map,bi_inv_map,logdet] = invchol2(W); [inv_map,bi_inv_map,logdet] = invchol_or_lu(W); y = logdet; end function [g,hess,linear] = deriv_this(dy,bi_inv_map,iW) G = iW.'; grad = G(:); g = dy*grad; linear = false; hess = @(d) hess_this(grad,bi_inv_map,dy,d); function [h,Jd] = hess_this(grad,bi_inv_map,dy,d) dim = sqrt(length(d)); D = reshape(d,dim,dim); H = - dy*bi_inv_map(D).'; h = H(:); if nargout>1 Jd = grad.'*d(:); end function test_this() m = 3; n = 10; w = []; A = UtU(w,n,m); f = logdet_chol(A); w = randn(m*n,1); test_MV2DF(f,w,true);
github
bsxfan/meta-embeddings-master
sumsquares_penalty.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/sumsquares_penalty.m
916
utf_8
8f40fb9f94c7424808c89e165ec9960c
function [y,deriv] = sumsquares_penalty(w,lambda) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % See code for details. if nargin==0 test_this(); return; end if isempty(w) y = @(w)sumsquares_penalty(w,lambda); return; end if isa(w,'function_handle') outer = sumsquares_penalty([],lambda); y = compose_mv(outer,w,[]); return; end w = w(:); if isscalar(lambda) lambda = lambda*ones(size(w)); else lambda = lambda(:); end y = 0.5*w.'*(lambda.*w); deriv = @(dy) deriv_this(dy,lambda,lambda.*w); function [g,hess,linear] = deriv_this(dy,lambda,lambda_w) linear = false; g = dy*lambda_w; hess = @(d) hess_this(d,dy,lambda,lambda_w); function [h,Jv] = hess_this(d,dy,lambda,lambda_w) h = dy*lambda.*d; if nargout>1 Jv = d(:).'*lambda_w; end function test_this() lambda = randn(10,1); f = sumsquares_penalty([],lambda); test_MV2DF(f,randn(size(lambda)));
github
bsxfan/meta-embeddings-master
wmlr_obj.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/wmlr_obj.m
2,299
utf_8
f450d5fdd89f4854a123b7d7947d32c3
function [y,deriv] = wmlr_obj(w,X,T,weights,logprior); % This is a SCAL2DF. See SCAL2DF_API_DEFINITION.readme. % Weighted multiclass linear logistic regression objective function. % w is vectorized D-by-K parameter matrix W (to be optimized) % X is D-by-N data matrix, for N trials % T is K-by-N, 0/1 class label matrix, with exactly one 1 per column. % weights is N-vector of objective function weights, one per trial. % logprior is logarithm of prior, % % The K-by-N log-likelihood matrix is % bsxfun(@plus,W'*X,logprior(:)); if nargin==0 test_this(); return; end if isempty(w) y = @(w)wmlr_obj(w,X,T,weights,logprior); return; end if isa(w,'function_handle') outer = wmlr_obj([],X,T,weights,logprior); y = compose_mv(outer,w,[]); return; end w = w(:); [K,N] = size(T); [dim,N2] = size(X); if N ~=N2 error('sizes of X and T incompatible'); end W = reshape(w,dim,K); % dim*K % make W double so that it works if X is sparse scores = double(W.')*X; % K*N scores = bsxfun(@plus,scores,logprior(:)); lsm = logsoftmax(scores); % K*N y = -sum(lsm.*T)*weights(:); deriv = @(dy) deriv_this(dy,lsm,X,T,weights); function [g,hess,linear] = deriv_this(dy,lsm,X,T,weights) sigma = exp(lsm); %posterior % K*N g0 = gradient(sigma,X,T,weights); g = g0*dy; hess = @(d) hess_this(d,dy,g0,sigma,X,weights); linear = false; function g = gradient(sigma,X,T,weights) E = sigma-T; %K*N G = X*double(bsxfun(@times,weights(:),E.')); %dim*K g = G(:); function [h,Jv] = hess_this(d,dy,g,sigma,X,weights) K = size(sigma,1); dim = length(d)/K; D = reshape(d,dim,K); P = double(D.')*X; % K*N sigmaP = sigma.*P; ssP = sum(sigmaP,1); % 1*N sssP = bsxfun(@times,sigma,ssP); %K*N h = X*double(bsxfun(@times,weights(:),(sigmaP-sssP).')); % dim*K h = dy*h(:); if nargout>1 Jv = d(:).'*g; end if nargin==0 test_this(); return; end function test_this() K = 3; N = 100; dim = 2; randn('state',0); means = randn(dim,K)*10; %signal X0 = randn(dim,K*N); % noise classf = zeros(1,K*N); ii = 1:N; T = zeros(K,N*K); for k=1:K X0(:,ii) = bsxfun(@plus,means(:,k),X0(:,ii)); classf(ii) = k; T(k,ii) = 1; ii = ii+N; end N = K*N; X = [X0;ones(1,N)]; weights = rand(1,N); obj = wmlr_obj([],X,T,weights,2); test_MV2DF(obj,randn((dim+1)*K,1));
github
bsxfan/meta-embeddings-master
boost_obj.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/boost_obj.m
1,556
utf_8
eaa722fa0cd7b1b492401c4e6adf807b
function [y,deriv] = boost_obj(w,T,weights,logit_prior) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Weighted binary classifier cross-entropy objective, based on 'boosting' % proper scoring rule. This rule places more emphasis on extreme scores, % than the logariothmic scoring rule. % % Differentiable inputs: % w: is vector of N detection scores (in log-likelihood-ratio format) % % Fixed parameters: % T: is vector of N labels: 1 for target and -1 for non-target. % weights: is N-vector of objective function weights, one per trial. % logit_prior: is logit(prior), this controls the region of interest if nargin==0 test_this(); return; end if isempty(w) y = @(w)boost_obj(w,T,weights,logit_prior); return; end if isa(w,'function_handle') outer = boost_obj([],T,weights,logit_prior); y = compose_mv(outer,w,[]); return; end w = w(:); scores = w.'; arg = bsxfun(@plus,scores,logit_prior).*T; wobj = exp(-arg/2).*weights; % 1*N y = sum(wobj); if nargout>1 deriv = @(dy) deriv_this(dy,wobj(:),T); end function [g,hess,linear] = deriv_this(dy,wobj,T) g0 = -0.5*wobj.*T(:); g = dy*g0; linear = false; hess = @(d) hessianprod(d,dy,g0,wobj); function [h,Jv] = hessianprod(d,dy,g0,wobj) h = dy*(0.25*wobj(:).*d(:)); if nargout>1 Jv = d.'*g0; end function test_this() N = 30; T = [ones(1,N/3),-ones(1,N/3),zeros(1,N/3)]; scores = randn(1,N); weights = [rand(1,2*N/3),zeros(1,N/3)]; f = @(w) brier_obj(w,T,weights,-2.23); f = @(w) boost_obj(w,T,weights,-2.23); test_MV2DF(f,scores(:));
github
bsxfan/meta-embeddings-master
neg_gaussll_taylor.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/neg_gaussll_taylor.m
1,332
utf_8
4efafbe09f47ca5947e223ca80f063c6
function [y,deriv] = neg_gaussll_taylor(w,x) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % This function represents the part of log N(x|0,W) that is dependent on % W = reshape(w,...), where w is variable and x is given. % % y = -0.5*x'*inv(W)*x - 0.5*log(det(W)), where W is positive definite and W = reshape(w,...) if nargin==0 test_this(); return; end if isempty(w) y = @(w)neg_gaussll_taylor(w,x); return; end if isa(w,'function_handle') outer = neg_gaussll_taylor([],x); y = compose_mv(outer,w,[]); return; end dim = length(x); W = reshape(w,dim,dim); [inv_map,logdet] = invchol_taylor(W); z = inv_map(x); y = 0.5*x'*z + 0.5*logdet; deriv = @(dy) deriv_this(dy,z,inv_map); end function [g,hess,linear] = deriv_this(dy,z,inv_map) G1 = z*z.'; G2 = inv_map(eye(length(z))); grad = 0.5*(G2(:)-G1(:)); g = dy*grad; linear = false; hess = @(d) hess_this(grad,z,inv_map,dy,d); end function [h,Jd] = hess_this(grad,z,inv_map,dy,d) dim = sqrt(length(d)); D = reshape(d,dim,dim); H1 = inv_map(D*z)*z' + z*inv_map(D'*z)'; H2 = inv_map(inv_map(D)'); h = 0.5*dy*(H1(:)-H2(:)); if nargout>1 Jd = grad.'*d(:); end end function test_this() m = 3; n = 10; w = []; A = UtU(w,n,m); %A is m-by-m x = randn(m,1); f = neg_gaussll_taylor(A,x); w = randn(m*n,1); test_MV2DF(f,w,true); end
github
bsxfan/meta-embeddings-master
brier_obj.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/brier_obj.m
1,722
utf_8
f68fae1776aa1a970b6f329e4c0d1027
function [y,deriv] = brier_obj(w,T,weights,logit_prior) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Weighted binary classifier cross-entropy objective, based on 'Brier' % quadratic proper scoring rule. This rule places less emphasis on extreme scores, % than the logariothmic scoring rule. % % Differentiable inputs: % w: is vector of N detection scores (in log-likelihood-ratio format) % % Fixed parameters: % T: is vector of N labels: 1 for target and -1 for non-target. % weights: is N-vector of objective function weights, one per trial. % logit_prior: is logit(prior), this controls the region of interest if nargin==0 test_this(); return; end if isempty(w) y = @(w)brier_obj(w,T,weights,logit_prior); return; end if isa(w,'function_handle') outer = brier_obj([],T,weights,logit_prior); y = compose_mv(outer,w,[]); return; end w = w(:); scores = w.'; arg = bsxfun(@plus,scores,logit_prior).*T; logp2 = -neglogsigmoid(-arg); wobj = 0.5*exp(2*logp2).*weights; % 1*N y = sum(wobj); if nargout>1 logp1 = -neglogsigmoid(arg); deriv = @(dy) deriv_this(dy,weights(:),T(:),logp1(:),logp2(:)); end function [g,hess,linear] = deriv_this(dy,weights,T,logp1,logp2) g0 = -exp(logp1+2*logp2).*weights.*T; g = dy*g0; linear = false; hess = @(d) hessianprod(d,dy,g0,weights,logp1,logp2); function [h,Jv] = hessianprod(d,dy,g0,weights,logp1,logp2) ddx = -exp(logp1+2*logp2); h = dy*(ddx.*(1-3*exp(logp1))).*weights.*d(:); if nargout>1 Jv = d.'*g0; end function test_this() N = 30; T = [ones(1,N/3),-ones(1,N/3),zeros(1,N/3)]; scores = randn(1,N); weights = [rand(1,2*N/3),zeros(1,N/3)]; f = @(w) brier_obj(w,T,weights,-2.23); test_MV2DF(f,scores(:));
github
bsxfan/meta-embeddings-master
gauss_ll.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/gauss_ll.m
1,461
utf_8
76707fbe20f1dae43305e2542e9644ce
function [y,deriv] = gauss_ll(w,x) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % This function represents the part of log N(x|0,W) that is dependent on % W = reshape(w,...), where w is variable and x is given. % % y = -0.5*x'*inv(W)*x - 0.5*log(det(W)), where W is positive definite and W = reshape(w,...) if nargin==0 test_this(); return; end if isempty(w) y = @(w)gauss_ll(w,x); return; end if isa(w,'function_handle') outer = gauss_ll([],x); y = compose_mv(outer,w,[]); return; end dim = length(x); W = reshape(w,dim,dim); if nargout>1 [inv_map,bi_inv_map,logdet,iW] = invchol_or_lu(W); z = inv_map(x); y = -0.5*x'*z - 0.5*logdet; deriv = @(dy) deriv_this(dy,z,inv_map,bi_inv_map,iW); else [inv_map,bi_inv_map,logdet] = invchol_or_lu(W); z = inv_map(x); y = -0.5*x'*z - 0.5*logdet; end function [g,hess,linear] = deriv_this(dy,z,inv_map,bi_inv_map,iW) G1 = z*z.'; G2 = iW.'; grad = 0.5*(G1(:)-G2(:)); g = dy*grad; linear = false; hess = @(d) hess_this(grad,z,inv_map,bi_inv_map,dy,d); function [h,Jd] = hess_this(grad,z,inv_map,bi_inv_map,dy,d) dim = sqrt(length(d)); D = reshape(d,dim,dim); H1 = inv_map(D*z)*z.' + z*inv_map(D.'*z).'; H2 = bi_inv_map(D).'; h = -0.5*dy*(H1(:)-H2(:)); if nargout>1 Jd = grad.'*d(:); end function test_this() m = 3; n = 10; w = []; A = UtU(w,n,m); %A is m-by-m x = randn(m,1); f = gauss_ll(A,x); w = randn(m*n,1); test_MV2DF(f,w,true);
github
bsxfan/meta-embeddings-master
cllr_obj.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/cllr_obj.m
1,611
utf_8
374952d66aa4641a000a48cc12baebad
function [y,deriv] = cllr_obj(w,T,weights,logit_prior) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Weighted binary classifier cross-entropy objective, based on logarithmic % cost function. % % Differentiable inputs: % w: is vector of N detection scores (in log-likelihood-ratio format) % % Fixed parameters: % T: is vector of N labels: 1 for target and -1 for non-target. % weights: is N-vector of objective function weights, one per trial. % logit_prior: is logit(prior), this controls the region of interest if nargin==0 test_this(); return; end if isempty(w) y = @(w)cllr_obj(w,T,weights,logit_prior); return; end if isa(w,'function_handle') outer = cllr_obj([],T,weights,logit_prior); y = compose_mv(outer,w,[]); return; end w = w(:); scores = w.'; arg = bsxfun(@plus,scores,logit_prior).*T; neglogp1 = neglogsigmoid(arg); % 1*N p1 = p(tar) y = neglogp1*weights(:); if nargout>1 neglogp2 = neglogsigmoid(-arg); % 1*N p2 = 1-p1 = p(non) deriv = @(dy) deriv_this(dy,-neglogp1(:),-neglogp2(:),T(:),weights(:)); end function [g,hess,linear] = deriv_this(dy,logp1,logp2,T,weights) g0 = -exp(logp2).*weights.*T; g = dy*g0; linear = false; hess = @(d) hessianprod(d,dy,g0,logp1,logp2,weights); function [h,Jv] = hessianprod(d,dy,g0,logp1,logp2,weights) h = dy*(exp(logp1+logp2).*weights(:).*d(:)); if nargout>1 Jv = d.'*g0; end function test_this() N = 30; T = [ones(1,N/3),-ones(1,N/3),zeros(1,N/3)]; W = randn(1,N); weights = [rand(1,2*N/3),zeros(1,N/3)]; f = @(w) cllr_obj(w,T,weights,-2.23); test_MV2DF(f,W(:));
github
bsxfan/meta-embeddings-master
mce_obj.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/mce_obj.m
1,711
utf_8
93cfa59b8a57d279ebbdb02376bd696c
function [y,deriv] = mce_obj(w,T,weights,logprior) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Weighted multiclass cross-entropy objective. % w is vectorized K-by-N score matrix W (to be optimized) % T is K-by-N, 0/1 class label matrix, with exactly one 1 per column. % weights is N-vector of objective function weights, one per trial. % logprior is logarithm of prior, if nargin==0 test_this(); return; end if isempty(w) y = @(w)mce_obj(w,T,weights,logprior); return; end if isa(w,'function_handle') outer = mce_obj([],T,weights,logprior); y = compose_mv(outer,w,[]); return; end w = w(:); [K,N] = size(T); scores = reshape(w,K,N); scores = bsxfun(@plus,scores,logprior(:)); lsm = logsoftmax(scores); % K*N y = -sum(lsm.*T)*weights(:); deriv = @(dy) deriv_this(dy,lsm,T,weights); function [g,hess,linear] = deriv_this(dy,lsm,T,weights) sigma = exp(lsm); %posterior % K*N g0 = gradient(sigma,T,weights); g = dy*g0; linear = false; hess = @(d) hessianprod(d,dy,g0,sigma,weights); function g = gradient(sigma,T,weights) E = sigma-T; %K*N G = bsxfun(@times,E,weights(:).'); %dim*K g = G(:); function [h,Jv] = hessianprod(d,dy,g0,sigma,weights) K = size(sigma,1); dim = length(d)/K; P = reshape(d,K,dim); sigmaP = sigma.*P; ssP = sum(sigmaP,1); % 1*N sssP = bsxfun(@times,sigma,ssP); %K*N h = bsxfun(@times,(sigmaP-sssP),weights(:).'); % dim*K h = dy*h(:); if nargout>1 Jv = d.'*g0; end function test_this() K = 3; N = 30; %T = [repmat([1;0;0],1,10),repmat([0;1;0],1,10),repmat([0;0;1],1,10)]; T = rand(K,N); T = bsxfun(@times,T,1./sum(T,1)); W = randn(K,N); weights = rand(1,N); f = @(w) mce_obj(w,T,weights,-1); test_MV2DF(f,W(:));
github
bsxfan/meta-embeddings-master
sum_ai_f_of_w_i.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/templates/sum_ai_f_of_w_i.m
1,367
utf_8
af9137c86c4b6c7456dbd1688c9ba0bb
function [y,deriv] = sum_ai_f_of_w_i(w,a,f,b) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Does y = sum_i a_i f(w_i) + b, where f is non-linear. % %Notes: % % f is a function handle, with behaviour as demonstrated in the test code % of this function. % % b is optional, defaults to 0 if omitted if nargin==0 test_this(); return; end if ~exist('b','var') b = 0; end if isempty(w) y = @(w)sum_ai_f_of_w_i(w,a,f,b); return; end if isa(w,'function_handle') outer = sum_ai_f_of_w_i([],a,f,b); y = compose_mv(outer,w,[]); return; end ntot = length(a); nz = find(a~=0); a = a(nz); if nargin==1 y = f(w(nz)); else [y,dfdw,f2] = f(w(nz)); deriv = @(Dy) deriv_this(Dy,dfdw.*a,f2,a,nz,ntot); end y = y(:); y = a.'*y + b; function [g,hess,linear] = deriv_this(Dy,g0,f2,a,nz,ntot) g = zeros(ntot,1); g(nz) = Dy*g0(:); hess = @(d) hess_this(d,g0,f2,Dy,a,nz,ntot); linear = false; function [h,Jd] = hess_this(d,g0,f2,Dy,a,nz,ntot) d = d(nz); hnz = f2(); hnz = hnz(:).*d(:); h = zeros(ntot,1); h(nz) = Dy*(hnz.*a); if nargout>1 Jd = g0.'*d(:); end function [y,ddx,f2] = test_f(x) y = log(x); if nargout>1 ddx = 1./x; f2 = @() -1./(x.^2); end function test_this() n = 10; a = randn(n,1); a = bsxfun(@max,a,0); b = 5; f = sum_ai_f_of_w_i([],a,@(x)test_f(x),b); w = 1+rand(n,1); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
KtimesW.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/KtimesW.m
726
utf_8
d53d40345ce7668d43a1efa9eb621335
function [y,deriv] = KtimesW(w,K) % This is an MV2DF . See MV2DF_API_DEFINITION.readme. % % % if nargin==0 test_this(); return; end if isempty(w) map = @(w) map_this(w,K); transmap = @(y) transmap_this(y,K); y = linTrans(w,map,transmap); return; end if isa(w,'function_handle') f = KtimesW([],K); y = compose_mv(f,w,[]); return; end f = KtimesW([],K); if nargout==1 y = f(w); else [y,deriv] = f(w); end function y = map_this(w,K) [m,n] = size(K); y = K*reshape(w,n,[]); y = y(:); function w = transmap_this(y,K) [m,n] = size(K); w = K.'*reshape(y,m,[]); function test_this() m = 3; n = 4; K = randn(m,n); r = 2; W = randn(n,r); f = KtimesW([],K); test_MV2DF(f,W(:));
github
bsxfan/meta-embeddings-master
scaleRows.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/scaleRows.m
798
utf_8
c848225d200f35d733b8bb76c2495127
function [y,deriv] = scaleRows(w,scales) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % w --> bsxfun(@times,reshape(w,m,[]),scales(:)) % % where m = length(scales); % % Note: this is a symmetric linear transform. if nargin==0 test_this(); return; end if isempty(w) map = @(w)map_this(w,scales); y = linTrans(w,map,map); return; end if isa(w,'function_handle') f = scaleRows([],scales); y = compose_mv(f,w,[]); return; end f = scaleRows([],scales); if nargout==1 y = f(w); else [y,deriv] = f(w); end function w = map_this(w,scales) n = length(scales); w = reshape(w,[],n); w = bsxfun(@times,w,scales(:)'); function test_this() K = 5; N = 10; M = randn(K,N); scales = randn(1,N); f = scaleRows([],scales); test_MV2DF(f,M(:));
github
bsxfan/meta-embeddings-master
sumcolumns_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/sumcolumns_fh.m
602
utf_8
7a2cd01c3b7076cda20fa6a96cae0069
function fh = sumcolumns_fh(m,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. % % w -> W = reshape(w,m,[]) -> sum(W,1)' if nargin==0 test_this(); return; end map = @(w) map_this(w,m); transmap = @(y) transmap_this(y,m); fh = linTrans([],map,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function w = transmap_this(y,m) w = repmat(y(:).',m,1); end function s = map_this(w,m) W = reshape(w,m,[]); s = sum(W,1); end function test_this() m = 3; n = 4; f = sumcolumns_fh(m); W = randn(m,n); test_MV2DF(f,W(:)); end
github
bsxfan/meta-embeddings-master
columnJofN_fh.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/columnJofN_fh.m
634
utf_8
23448c0cc436ac53b95d5e4ec48c7b35
function fh = columnJofN_fh(j,n,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. % % w -> W = reshape(w,[],n) -> W(:,j) if nargin==0 test_this(); return; end map = @(w) map_this(w,j,n); transmap = @(y) transmap_this(y,j,n); fh = linTrans([],map,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function w = transmap_this(y,j,n) W = zeros(length(y),n); W(:,j) = y; w = W(:); end function col = map_this(w,j,n) W = reshape(w,[],n); col = W(:,j); end function test_this() m = 3; n = 4; f = columnJofN_fh(2,4); W = randn(m,n); test_MV2DF(f,W(:)); end
github
bsxfan/meta-embeddings-master
scaleColumns.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/scaleColumns.m
811
utf_8
cacb0b80cb3f3871595674e741382d26
function [y,deriv] = scaleColumns(w,scales) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % w --> bsxfun(@times,reshape(w,[],n),scales(:)') % % where n = length(scales); % % Note: this is a symmetric linear transform. if nargin==0 test_this(); return; end if isempty(w) map = @(w)map_this(w,scales); y = linTrans(w,map,map); return; end if isa(w,'function_handle') f = scaleColumns([],scales); y = compose_mv(f,w,[]); return; end f = scaleColumns([],scales); if nargout==1 y = f(w); else [y,deriv] = f(w); end function w = map_this(w,scales) n = length(scales); w = reshape(w,[],n); w = bsxfun(@times,w,scales(:)'); function test_this() K = 5; N = 10; M = randn(K,N); scales = randn(1,N); f = scaleColumns([],scales); test_MV2DF(f,M(:));
github
bsxfan/meta-embeddings-master
subvec.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/subvec.m
733
utf_8
ed189df10ecad63eca1130710c559631
function [y,deriv] = subvec(w,size,first,length) % This is an MV2DF . See MV2DF_API_DEFINITION.readme. % % w --> w(first:first+length-1) % if nargin==0 test_this(); return; end last = first+length-1; if isempty(w) map = @(w) w(first:last); transmap = @(w) transmap_this(w,size,first,last); y = linTrans(w,map,transmap); return; end if isa(w,'function_handle') f = subvec([],size,first,length); y = compose_mv(f,w,[]); return; end f = subvec([],size,first,length); if nargout==1 y = f(w); else [y,deriv] = f(w); end function g = transmap_this(w,size,first,last) g = zeros(size,1); g(first:last) = w; function test_this() f = subvec([],10,2,4); test_MV2DF(f,randn(10,1));
github
bsxfan/meta-embeddings-master
identity_trans.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/identity_trans.m
495
utf_8
aec19df7ff1e1fa5079b22973d9122fc
function [y,deriv] = identity_trans(w) % This is an MV2DF . See MV2DF_API_DEFINITION.readme. % % w --> w % if nargin==0 test_this(); return; end if isempty(w) map = @(w) w; y = linTrans(w,map,map); return; end if isa(w,'function_handle') f = identity_trans([]); y = compose_mv(f,w,[]); return; end f = identity_trans([]); if nargout==1 y = f(w); else [y,deriv] = f(w); end function test_this() f = identity_trans([]); test_MV2DF(f,randn(5,1));
github
bsxfan/meta-embeddings-master
WtimesK.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/WtimesK.m
726
utf_8
20d6a4715d3fb8e2c51fc17f1a45e865
function [y,deriv] = WtimesK(w,K) % This is an MV2DF . See MV2DF_API_DEFINITION.readme. % % % if nargin==0 test_this(); return; end if isempty(w) map = @(w) map_this(w,K); transmap = @(y) transmap_this(y,K); y = linTrans(w,map,transmap); return; end if isa(w,'function_handle') f = WtimesK([],K); y = compose_mv(f,w,[]); return; end f = WtimesK([],K); if nargout==1 y = f(w); else [y,deriv] = f(w); end function y = map_this(w,K) [m,n] = size(K); y = reshape(w,[],m)*K; y = y(:); function w = transmap_this(y,K) [m,n] = size(K); w = reshape(y,[],n)*K.'; function test_this() m = 3; n = 4; K = randn(m,n); r = 2; W = randn(r,m); f = WtimesK([],K); test_MV2DF(f,W(:));
github
bsxfan/meta-embeddings-master
transpose_mv2df.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/transpose_mv2df.m
700
utf_8
58016f72134e4ccf6256f2ea1f952a43
function [y,deriv] = transpose_mv2df(w,M,N) % This is an MV2DF . See MV2DF_API_DEFINITION.readme. % % vec(A) --> vec(A'), % % where A is M by N % % Note: this is an orthogonal linear transform. if nargin==0 test_this(); return; end if isempty(w) map = @(w) reshape(reshape(w,M,N).',[],1); transmap = @(w) reshape(reshape(w,N,M).',[],1); y = linTrans(w,map,transmap); return; end if isa(w,'function_handle') f = transpose_mv2df([],M,N); y = compose_mv(f,w,[]); return; end f = transpose_mv2df([],M,N); if nargout==1 y = f(w); else [y,deriv] = f(w); end function test_this() M = 4; N = 5; f = transpose_mv2df([],M,N); test_MV2DF(f,randn(M*N,1));
github
bsxfan/meta-embeddings-master
fusion_mv2df.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/fusion_mv2df.m
1,182
utf_8
df0b186cde5dcc42aea6490f13d6d479
function [y,deriv] = fusion_mv2df(w,scores) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % The function is a 'score fusion' computed thus: % y.' = w(1:end-1).'*scores + w(end) % % Here w is the vector of fusion weights, one weight per system and % an offset. % % Parameters: % scores: is an M-by-T matrix of scores from M systems, for each of T % trials. % % Note (even though the fusion is affine from input scores to output % scores) this MV2DF is a linear transform from w to y. if nargin==0 test_this(); return; end if isempty(w) map = @(w) map_this(w,scores); transmap = @(w) transmap_this(w,scores); y = linTrans(w,map,transmap); return; end if isa(w,'function_handle') f = fusion_mv2df([],scores); y = compose_mv(f,w,[]); return; end f = fusion_mv2df([],scores); if nargout==1 y = f(w); else [y,deriv] = f(w); end function y = map_this(w,scores) y = w(1:end-1).'*scores + w(end); y = y(:); function y = transmap_this(x,scores) y = [scores*x;sum(x)]; function test_this() K = 5; N = 10; w = randn(N+1,1); scores = randn(N,K); f = fusion_mv2df([],scores); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
addSigmaI.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/addSigmaI.m
771
utf_8
78b1a5b40a699c613a11ce64085abe6e
function [y,deriv] = addSigmaI(w) % This is an MV2DF . See MV2DF_API_DEFINITION.readme. % % % if nargin==0 test_this(); return; end if isempty(w) map = @(w) map_this(w); transmap = @(w) transmap_this(w); y = linTrans(w,map,transmap); return; end if isa(w,'function_handle') f = addSigmaI([]); y = compose_mv(f,w,[]); return; end f = addSigmaI([]); if nargout==1 y = f(w); else [y,deriv] = f(w); end function y = map_this(w) w = w(:); y = w(1:end-1); sigma = w(end); dim = sqrt(length(y)); ii = 1:dim+1:dim*dim; y(ii) = w(ii)+sigma; function w = transmap_this(y) dim = sqrt(length(y)); ii = 1:dim+1:dim*dim; w = [y;sum(y(ii))]; function test_this() dim = 5; f = addSigmaI([]); test_MV2DF(f,randn(dim*dim+1,1));
github
bsxfan/meta-embeddings-master
addOffset.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/addOffset.m
1,057
utf_8
38390e8a3f92c5a6b760571e3ba340e3
function [y,deriv] = addOffset(w,K,N) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % w = [vec(A);b] --> vec(bsxfun(@plus,A,b)) % % This function retrieves a K by N matrix as well as a K-vector from w, % adds the K-vector to every column of the matrix % and outputs the vectorized result. % Note this is a linear transform. if nargin==0 test_this(); return; end if isempty(w) map = @(w) map_this(w,K,N); transmap = @(w) transmap_this(w,K,N); y = linTrans(w,map,transmap); return; end if isa(w,'function_handle') f = addOffset([],K,N); y = compose_mv(f,w,[]); return; end f = addOffset([],K,N); if nargout==1 y = f(w); else [y,deriv] = f(w); end function y = map_this(w,K,N) y = w(1:K*N); y = reshape(y,K,N); offs = w((K*N+1):end); y = bsxfun(@plus,y,offs(:)); y = y(:); function y = transmap_this(x,K,N) M = reshape(x,K,N); y = [x(1:K*N);sum(M,2)]; function test_this() K = 5; N = 10; M = randn(K,N); offs = randn(K,1); w = [M(:);offs]; f = addOffset([],K,N); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
const_mv2df.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/templates/const_mv2df.m
856
utf_8
541e86c2041370727a8705935c4d575e
function [y,deriv] = const_mv2df(w,const) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % y = const(:); % % This wraps the given constant into an MV2DF. The output, y, is % independent of input w. The derivatives are sparse zero vectors of the % appropriate size. if nargin==0 test_this(); return; end if isempty(w) y = @(w)const_mv2df(w,const); return; end if isa(w,'function_handle') outer = const_mv2df([],const); y = compose_mv(outer,w,[]); return; end w = w(:); y = const(:); deriv = @(g2) deriv_this(length(w),length(y)); function [g,hess,linear] = deriv_this(wsz,ysz) g = sparse(wsz,1); linear = true; hess = @(d) hess_this(ysz); function [h,Jd] = hess_this(ysz) h = []; if nargout>1 Jd = sparse(ysz,1); end function test_this() A = randn(4,5); f = const_mv2df([],A); test_MV2DF(f,randn(5,1));
github
bsxfan/meta-embeddings-master
linTrans.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/templates/linTrans.m
1,012
utf_8
5c26cd329441fa971c05127c464dfae5
function [y,deriv] = linTrans(w,map,transmap) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Applies linear transform y = map(w). It needs the transpose of map, % transmap for computing the gradient. map and transmap are function % handles. if nargin==0 test_this(); return; end if isempty(w) y = @(w)linTrans(w,map,transmap); return; end if isa(w,'function_handle') outer = linTrans([],map,transmap); y = compose_mv(outer,w,[]); return; end y = map(w); y = y(:); deriv = @(g2) deriv_this(g2,map,transmap); function [g,hess,linear] = deriv_this(g2,map,transmap) g = transmap(g2); g = g(:); %linear = false; % use this to test linearity of map, if in doubt linear = true; hess = @(d) hess_this(map,d); function [h,Jd] = hess_this(map,d) h = []; if nargout>1 Jd = map(d); Jd = Jd(:); end function test_this() A = randn(4,5); map = @(w) A*w; transmap = @(y) (y.'*A).'; % faster than A'*y, if A is big f = linTrans([],map,transmap); test_MV2DF(f,randn(5,1));
github
bsxfan/meta-embeddings-master
affineTrans.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/templates/affineTrans.m
1,378
utf_8
f1c4abd92c1dca63db5b0ccf3915a631
function [y,deriv] = affineTrans(w,affineMap,linMap,transMap) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Applies affine transform y = affineMap(w). It needs also needs % linMap, the linear part of the mapping, as well as transMap, the % transpose of linMap. All of affineMap, linMap and transMap are function % handles. % % Note, linMap(x) = J*x where J is the Jacobian of affineMap; and % transMap(y) = J'y. if nargin==0 test_this(); return; end if isempty(w) y = @(w)affineTrans(w,affineMap,linMap,transMap); return; end if isa(w,'function_handle') outer = affineTrans([],affineMap,linMap,transMap); y = compose_mv(outer,w,[]); return; end y = affineMap(w); y = y(:); deriv = @(g2) deriv_this(g2,linMap,transMap); function [g,hess,linear] = deriv_this(g2,linMap,transMap) g = transMap(g2); g = g(:); %linear = false; % use this to test linearity of affineMap, if in doubt linear = true; hess = @(d) hess_this(linMap,d); function [h,Jd] = hess_this(linMap,d) %h=zeros(size(d)); % use this to test linearity of affineMap, if in doubt h = []; if nargout>1 Jd = linMap(d); Jd = Jd(:); end function test_this() A = randn(4,5); k = randn(4,1); affineMap = @(w) A*w+k; linMap = @(w) A*w; transMap = @(y) (y.'*A).'; % faster than A'*y, if A is big f = affineTrans([],affineMap,linMap,transMap); test_MV2DF(f,randn(5,1));
github
bsxfan/meta-embeddings-master
logsoftmax_trunc_mv2df.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/logsoftmax_trunc_mv2df.m
1,380
utf_8
7933852f24348cedbc4c8750142e51de
function [y,deriv] = logsoftmax_trunc_mv2df(w,m) % This is a MV2DF. See MV2DF_API_DEFINITION.readme. % % Does: % (i) Reshapes w to m-by-n. % (ii) effectively (not physically) append a bottom row of zeros % (iii) Computes logsoftmax of each of n columns. % (iv) Omits last row (effectively) if nargin==0 test_this(); return; end if isempty(w) y = @(w)logsoftmax_trunc_mv2df(w,m); return; end if isa(w,'function_handle') outer = logsoftmax_trunc_mv2df([],m); y = compose_mv(outer,w,[]); return; end w = reshape(w,m,[]); y = logsoftmax_trunc(w); if nargout>1 deriv = @(Dy) deriv_this(Dy,exp(y)); end y = y(:); function [g,hess,linear] = deriv_this(Dy,smax) [m,n] = size(smax); Dy = reshape(Dy,m,n); sumDy = sum(Dy,1); g = Dy - bsxfun(@times,smax,sumDy); g = g(:); linear = false; hess = @(v) hess_this(v,sumDy,smax); function [h,Jv] = hess_this(V,sumDy,smax) [m,n] = size(smax); V = reshape(V,m,n); Vsmax = V.*smax; sumVsmax = sum(Vsmax,1); h = bsxfun(@times,smax,sumVsmax) - Vsmax; h = bsxfun(@times,h,sumDy); h = h(:); if nargout>1 Jv = bsxfun(@minus,V,sumVsmax); Jv = Jv(:); end function test_this() m = 10; n = 3; %A = randn(m); %map = @(x) reshape(A*reshape(x,m,[]),[],1); %transmap = @(y) reshape(A'*reshape(y,m,[]),[],1); %f = linTrans([],map,transmap); f = logsoftmax_trunc_mv2df([],m); test_MV2DF(f,randn(m*n,1));
github
bsxfan/meta-embeddings-master
mm_special.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/mm_special.m
1,465
utf_8
735b9c605bad33588197fcc0c0d59eb5
function [prod,deriv] = mm_special(w,extractA,extractB) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % [vec(A);vec(B)] --> vec(A*B) % % where % A is extractA(w) % B is extractB(w) if nargin==0 test_this(); return; end if isempty(w) prod = @(w)mm_special(w,extractA,extractB); return; end if isa(w,'function_handle') outer = mm_special([],extractA,extractB); prod = compose_mv(outer,w,[]); return; end w = w(:); A = extractA(w); [m,k] = size(A); B = extractB(w); [k2,n] = size(B); assert(k==k2,'inner matrix dimensions must agree'); M = A*B; prod = M(:); deriv = @(g2) deriv_this(g2); function [g,hess,linear] = deriv_this(g2) g = vJ_this(g2,A,B); linear = false; hess = @(w) hess_this(g2,w); end function [h,Jv] = hess_this(g2,w) h = vJ_this(g2,extractA(w),extractB(w)); if nargout>=2 Jv = Jv_this(w); end end function prod = Jv_this(w) Aw = extractA(w); Bw = extractB(w); M = Aw*B + A*Bw; prod = M(:); end function w = vJ_this(prod,A,B) M = reshape(prod,m,n); Bp = A.'*M; Ap = M*B.'; w = [Ap(:);Bp(:)]; end end function A = extractA_this(w,m,k) A = w(1:m*k); A = reshape(A,m,k); end function B = extractB_this(w,m,k,n) B = w(m*k+(1:k*n)); B = reshape(B,k,n); end function test_this() m = 4; k = 5; n = 6; A = randn(m,k); B = randn(k,n); w = [A(:);B(:)]; extractA = @(w) extractA_this(w,m,k); extractB = @(w) extractB_this(w,m,k,n); f = mm_special([],extractA,extractB); test_MV2DF(f,w); end
github
bsxfan/meta-embeddings-master
sums_of_squares.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/sums_of_squares.m
898
utf_8
1fa8d45eea9355807d8ef47606407b36
function [y,deriv] = sums_of_squares(w,m) % This is a MV2DF. See MV2DF_API_DEFINITION.readme. % Does: % (i) Reshapes w to m-by-n. % (ii) Computes sum of squares of each of n columns. % (iii) Transposes to output n-vector. if nargin==0 test_this(); return; end if isempty(w) y = @(w)sums_of_squares(w,m); return; end if isa(w,'function_handle') outer = sums_of_squares([],m); y = compose_mv(outer,w,[]); return; end M = reshape(w,m,[]); y = sum(M.^2,1); y = y(:); deriv = @(g2) deriv_this(g2,M); function [g,hess,linear] = deriv_this(g2,M) g = 2*bsxfun(@times,M,g2.'); g = g(:); linear = false; hess = @(d) hess_this(d,g2,M); function [h,Jv] = hess_this(d,g2,M) h = deriv_this(g2,reshape(d,size(M))); if nargout>1 Jv = 2*sum(reshape(d,size(M)).*M,1); Jv = Jv(:); end function test_this() f = sums_of_squares([],10); test_MV2DF(f,randn(10*4,1));
github
bsxfan/meta-embeddings-master
gemm.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/gemm.m
1,283
utf_8
b9245303ab8248f450ad033cde69bf29
function [prod,deriv] = gemm(w,m,k,n) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % [vec(A);vec(B)] --> vec(A*B) % % where % A is m-by-k % B is k-by-n if nargin==0 test_this(); return; end if isempty(w) prod = @(w)gemm(w,m,k,n); return; end if isa(w,'function_handle') outer = gemm([],m,k,n); prod = compose_mv(outer,w,[]); return; end w = w(:); A = extractA(w,m,k); B = extractB(w,m,k,n); M = A*B; prod = M(:); deriv = @(g2) deriv_this(g2,A,B,m,k,n); function [g,hess,linear] = deriv_this(g2,A,B,m,k,n) g = vJ_this(g2,A,B,m,n); linear = false; hess = @(w) hess_this(m,k,n,g2,A,B,w); function [h,Jv] = hess_this(m,k,n,g2,A,B,w) h = vJ_this(g2,... extractA(w,m,k),... extractB(w,m,k,n),... m,n); if nargout>=2 Jv = Jv_this(w,A,B,m,k,n); end function prod = Jv_this(w,A,B,m,k,n) Aw = extractA(w,m,k); Bw = extractB(w,m,k,n); M = Aw*B + A*Bw; prod = M(:); function w = vJ_this(prod,A,B,m,n) M = reshape(prod,m,n); Bp = A.'*M; Ap = M*B.'; w = [Ap(:);Bp(:)]; function A = extractA(w,m,k) A = w(1:m*k); A = reshape(A,m,k); function B = extractB(w,m,k,n) B = w(m*k+(1:k*n)); B = reshape(B,k,n); function test_this() A = randn(4,5); B = randn(5,4); w = [A(:);B(:)]; f = gemm([],4,5,4); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
XtKX.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/XtKX.m
849
utf_8
0298041dbd9ce1171c7cf66e0edb8a09
function [y,deriv] = XtKX(w,K) %This is an MV2DF. % % vec(X) --> vec(X'KX) % if nargin==0 test_this(); return; end m = size(K,1); if isempty(w) y = @(w) XtKX(w,K); return; end if isa(w,'function_handle') outer = XtKX([],K); y = compose_mv(outer,w,[]); return; end X = reshape(w,m,[]); n = size(X,2); y = X.'*K*X; y = y(:); deriv = @(dy) deriv_this(dy,K,X,n); function [g,hess,linear] = deriv_this(DY,K,X,n) linear = false; DY = reshape(DY,n,n).'; g = DY.'*X.'*K.' + DY*X.'*K; g = g.'; g = g(:); hess = @(dx) hess_this(dx,K,X,DY); function [h,Jv] = hess_this(DX,K,X,DY) m = size(K,1); DX = reshape(DX,m,[]); h = K*DX*DY + K.'*DX*DY.'; h = h(:); if nargin<2 return; end Jv = DX.'*K*X + X.'*K*DX; Jv = Jv(:); function test_this() K = randn(4); X = randn(4,3); f = XtKX([],K); test_MV2DF(f,X(:));
github
bsxfan/meta-embeddings-master
UtU.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/UtU.m
945
utf_8
086256cb24f7c7b69d69614ceff1519b
function [prod,deriv] = UtU(w,m,n) % This is a MV2DF. See MV2DF_API_DEFINITION.readme. % U = reshape(w,m,n), M = U'*U, prod = M(:). if nargin==0 test_this(); return; end if isempty(w) prod = @(w)UtU(w,m,n); return; end if isa(w,'function_handle') outer = UtU([],m,n); prod = compose_mv(outer,w,[]); return; end w = w(:); U = reshape(w,m,n); M = U.'*U; prod = M(:); deriv = @(g2) deriv_this(g2,U,m,n); function [g,hess,linear] = deriv_this(g2,U,m,n) g = vJ_this(g2,U,n); linear = false; hess = @(w) hess_this(w,g2,U,m,n); function [h,Jv] = hess_this(w,g2,U,m,n) h = vJ_this(g2,reshape(w,m,n),n); if nargout>=2 Jv = Jv_this(w,U,m,n); end function dy = Jv_this(dw,U,m,n) dU = reshape(dw,m,n); dM = U.'*dU; dM = dM+dM.'; dy = dM(:); function w = vJ_this(dy,U,n) dY = reshape(dy,n,n); dU = U*(dY+dY.'); w = dU(:); function test_this() m = 5; n = 3; f = UtU([],m,n); U = randn(m,n); test_MV2DF(f,U(:));
github
bsxfan/meta-embeddings-master
bsxtimes.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/bsxtimes.m
1,144
utf_8
599b65f85120f5dec9d1a62d06393c35
function [y,deriv] = bsxtimes(w,m,n) % This is an MV2DF % % w = [vec(A); vec(b) ] --> vec(bsxfun(@times,A,b)), % % where A is an m-by-n matrix and % b is a 1-by-n row. % if nargin==0 test_this(); return; end if isempty(w) y = @(w) bsxtimes(w,m,n); return; end if isa(w,'function_handle') f = bsxtimes([],m,n); y = compose_mv(f,w,[]); return; end [A,b] = extract(w,m,n); y = bsxfun(@times,A,b); y = y(:); deriv = @(Dy) deriv_this(Dy,A,b); function [g,hess,linear] = deriv_this(Dy,A,b) g = gradient(Dy,A,b); linear = false; hess = @(v) hess_this(v,Dy,A,b); function [h,Jv] = hess_this(v,Dy,A,b) [m,n] = size(A); [vA,vb] = extract(v,m,n); h = gradient(Dy,vA,vb); if nargout>1 Jv = bsxfun(@times,vA,b); Jv = Jv + bsxfun(@times,A,vb); Jv = Jv(:); end function [A,b] = extract(w,m,n) A = reshape(w(1:m*n),m,n); b = w(m*n+1:end).'; function g = gradient(Dy,A,b) Dy = reshape(Dy,size(A)); gA = bsxfun(@times,Dy,b); gb = sum(Dy.*A,1); g = [gA(:);gb(:)]; function test_this() m = 5; n = 10; A = randn(m,n); b = randn(1,n); w = [A(:);b(:)]; f = bsxtimes([],m,n); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
calibrateScores.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/calibrateScores.m
1,095
utf_8
36a554ff63a06324896dbea86ca33308
function [y,deriv] = calibrateScores(w,m,n) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % [vec(A);scal;offs] --> vec(bsxfun(@plus,scal*A,b)) % % This function retrieves from w: % (i) an m-by-n matrix, 'scores' % (ii) a scalar 'scal', and % (iii) an m-vector, 'offset' % % Then it scales the scores and adds the offset vector to every column. if nargin==0 test_this(); return; end if isempty(w) scoreSz = m*n; wSz = scoreSz+m+1; at = 1; scores = subvec(w,wSz,at,scoreSz); at = at + scoreSz; scal = subvec(w,wSz,at,1); at = at + 1; offs = subvec(w,wSz,at,m); scores = gemm(stack(w,scores,scal),scoreSz,1,1); scores = addOffset(stack(w,scores,offs),m,n); y = scores; return; end if isa(w,'function_handle') f = calibrateScores([],m,n); y = compose_mv(f,w,[]); return; end f = calibrateScores([],m,n); [y,deriv] = f(w); function test_this() m = 5; n = 10; scores = randn(m,n); offs = randn(m,1); scal = 3; f = calibrateScores([],m,n); test_MV2DF(f,[scores(:);scal;offs]);
github
bsxfan/meta-embeddings-master
solve_AXeqB.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/solve_AXeqB.m
1,054
utf_8
cff7830e92caa23fabdd038a4e53750d
function [y,deriv] = solve_AXeqB(w,m) % This is an MV2DF. % % [A(:);B(:)] --> inv(A) * B % if nargin==0 test_this(); return; end if isempty(w) y = @(w)solve_AXeqB(w,m); return; end if isa(w,'function_handle') outer = solve_AXeqB([],m); y = compose_mv(outer,w,[]); return; end [A,B,n] = extract(w,m); y = A\B; deriv = @(dy) deriv_this(dy,m,n,A,A.',y); y = y(:); function [g,hess,linear] = deriv_this(dy,m,n,A,At,X) DXt = reshape(dy,m,n); DBt = At\DXt; DAt = -DBt*X.'; g = [DAt(:);DBt(:)]; linear = false; hess = @(dw) hess_this(dw,m,A,At,X,DBt); function [h,Jv] = hess_this(dw,m,A,At,X,DBt) [dA,dB] = extract(dw,m); D_DBt = -(At\dA.')*DBt; DX = A\(dB-dA*X); D_DAt = -(D_DBt*X.'+DBt*DX.'); h = [D_DAt(:);D_DBt(:)]; if nargout>1 Jv = A\(dB-dA*X); Jv = Jv(:); end function [A,B,n] = extract(w,m) mm = m^2; A = w(1:mm); A = reshape(A,m,m); B = w(mm+1:end); B = reshape(B,m,[]); n = size(B,2); function test_this() A = randn(5); B = randn(5,1); f = solve_AXeqB([],5); test_MV2DF(f,[A(:);B(:)]);
github
bsxfan/meta-embeddings-master
logsoftmax_mv2df.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/logsoftmax_mv2df.m
1,248
utf_8
1c29a9da21772e72c800bb7be4025fe6
function [y,deriv] = logsoftmax_mv2df(w,m) % This is a MV2DF. See MV2DF_API_DEFINITION.readme. % % Does: % (i) Reshapes w to m-by-n. % (ii) Computes logsoftmax of each of n columns. if nargin==0 test_this(); return; end if isempty(w) y = @(w)logsoftmax_mv2df(w,m); return; end if isa(w,'function_handle') outer = logsoftmax_mv2df([],m); y = compose_mv(outer,w,[]); return; end w = reshape(w,m,[]); y = logsoftmax(w); if nargout>1 deriv = @(Dy) deriv_this(Dy,exp(y)); end y = y(:); function [g,hess,linear] = deriv_this(Dy,smax) [m,n] = size(smax); Dy = reshape(Dy,m,n); sumDy = sum(Dy,1); g = Dy - bsxfun(@times,smax,sumDy); g = g(:); linear = false; hess = @(v) hess_this(v,sumDy,smax); function [h,Jv] = hess_this(V,sumDy,smax) [m,n] = size(smax); V = reshape(V,m,n); Vsmax = V.*smax; sumVsmax = sum(Vsmax,1); h = bsxfun(@times,smax,sumVsmax) - Vsmax; h = bsxfun(@times,h,sumDy); h = h(:); if nargout>1 Jv = bsxfun(@minus,V,sumVsmax); Jv = Jv(:); end function test_this() m = 10; n = 3; %A = randn(m); %map = @(x) reshape(A*reshape(x,m,[]),[],1); %transmap = @(y) reshape(A'*reshape(y,m,[]),[],1); %f = linTrans([],map,transmap); f = logsoftmax_mv2df([],m); test_MV2DF(f,randn(m*n,1));
github
bsxfan/meta-embeddings-master
sqdist.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/sqdist.m
1,170
utf_8
bfa9c639ce948848c20501fec93af59c
function [y,deriv] = sqdist(w,dim) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % If W = reshape(w,dim,n), then Y = vec of symmetric n-by-n matrix of % 1/2 squared euclidian distances between all columns of W. if nargin==0 test_this(); return; end if isempty(w) y = @(w)sqdist(w,dim); return; end if isa(w,'function_handle') outer = sqdist([],dim); y = compose_mv(outer,w,[]); return; end X = reshape(w,dim,[]); N = size(X,2); XX = 0.5*sum(X.^2,1); y = bsxfun(@minus,XX.',X.'*X); y = bsxfun(@plus,y,XX); y = y(:); deriv = @(dy) deriv_this(dy,X,N); function [G,hess,linear] = deriv_this(DY,X,N) DY = reshape(DY,N,N); sumDY = sum(DY,1)+sum(DY,2).'; DYDY = DY+DY.'; G = bsxfun(@times,X,sumDY)-X*DYDY; G = G(:); linear = false; hess = @(d) hess_this(d,DYDY,sumDY,X); function [H,Jv] = hess_this(D,DYDY,sumDY,X) D = reshape(D,size(X)); H = bsxfun(@times,D,sumDY)-D*DYDY; H = H(:); if nargout>=2 DtX = D.'*X; xd = sum(X.*D,1); Jv = bsxfun(@minus,xd,DtX + DtX.'); Jv = bsxfun(@plus,Jv,xd.'); Jv = Jv(:); end function test_this() dim = 4; X = randn(dim,5); w = X(:); f = sqdist([],dim); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
dottimes.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/dottimes.m
884
utf_8
7e8e3dedc670c1f93364db61f3d2b41d
function [y,deriv] = dottimes(w) % This is an MV2DF % % [a; b ] --> a.*b % % where length(a) == length(b) % if nargin==0 test_this(); return; end if isempty(w) y = @(w) dottimes(w); return; end if isa(w,'function_handle') f = dottimes([]); y = compose_mv(f,w,[]); return; end w = w(:); [a,b] = extract(w); y = a.*b; deriv = @(Dy) deriv_this(Dy,a,b); function [g,hess,linear] = deriv_this(Dy,a,b) g = gradient(Dy,a,b); linear = false; hess = @(v) hess_this(v,Dy,a,b); function [h,Jv] = hess_this(v,Dy,a,b) [va,vb] = extract(v); h = gradient(Dy,va,vb); if nargout>1 Jv = va.*b + a.*vb; end function [a,b] = extract(w) h = length(w)/2; a = w(1:h); b = w(h+1:end); function g = gradient(Dy,a,b) g = [Dy.*b;Dy.*a]; function test_this() n = 10; a = randn(1,n); b = randn(1,n); w = [a(:);b(:)]; f = dottimes([]); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
solveChol_AXeqB.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/solveChol_AXeqB.m
1,391
utf_8
f2ded36f846a5e9904fd8299ba4a5ed1
function [y,deriv] = solveChol_AXeqB(w,m) % This is an MV2DF. % % [A(:);B(:)] --> inv(A) * B % % We assume A is positive definite and we solve using Choleski if nargin==0 test_this(); return; end if isempty(w) y = @(w)solveChol_AXeqB(w,m); return; end if isa(w,'function_handle') outer = solveChol_AXeqB([],m); y = compose_mv(outer,w,[]); return; end [A,B,n] = extract(w,m); if isreal(A) R = chol(A); solve = @(B) R\(R.'\B); else %complex solve = @(B) A\B; end y = solve(B); deriv = @(dy) deriv_this(dy,m,n,solve,y); y = y(:); function [g,hess,linear] = deriv_this(dy,m,n,solve,X) DXt = reshape(dy,m,n); DBt = solve(DXt); DAt = -DBt*X.'; g = [DAt(:);DBt(:)]; linear = false; hess = @(dw) hess_this(dw,m,solve,X,DBt); function [h,Jv] = hess_this(dw,m,solve,X,DBt) [dA,dB] = extract(dw,m); D_DBt = -solve(dA.'*DBt); DX = solve(dB-dA*X); D_DAt = -(D_DBt*X.'+DBt*DX.'); h = [D_DAt(:);D_DBt(:)]; if nargout>1 Jv = solve(dB-dA*X); Jv = Jv(:); end function [A,B,n] = extract(w,m) mm = m^2; A = w(1:mm); A = reshape(A,m,m); B = w(mm+1:end); B = reshape(B,m,[]); n = size(B,2); function test_this() m = 3; n = 10; k = 8; Usz = m*n; Bsz = m*k; Wsz = Usz+Bsz; w = []; U = subvec(w,Wsz,1,Usz); B = subvec(w,Wsz,Usz+1,Bsz); A = UtU(U,n,m); AB = stack(w,A,B); f = solveChol_AXeqB(AB,m); w = randn(Wsz,1); test_MV2DF(f,w,true);
github
bsxfan/meta-embeddings-master
test_MV2DF.m
.m
meta-embeddings-master/code/Niko/matlab/fous-y-tout/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/test/test_MV2DF.m
2,104
utf_8
1f7eea1823322c4c0741c86792fc73c4
function test_MV2DF(f,x0,do_cstep) %id_in = identity_trans([]); %id_out = identity_trans([]); %f = f(id_in); %f = id_out(f); x0 = x0(:); if ~exist('do_cstep','var') do_cstep = 1; end if do_cstep Jc = cstepJacobian(f,x0); end Jr = rstepJacobian(f,x0); [y0,deriv] = f(x0); m = length(y0); n = length(x0); J2 = zeros(size(Jr)); for i=1:m; y = zeros(m,1); y(i) = 1; J2(i,:) = deriv(y)'; end if do_cstep c_err = max(max(abs(Jc-J2))); else c_err = nan; end r_err = max(max(abs(Jr-J2))); fprintf('test gradient : cstep err = %g, rstep err = %g\n',c_err,r_err); g2 = randn(m,1); [dummy,hess,linear] = deriv(g2); if true %~linear rHess = @(dx) rstep_approxHess(dx,g2,f,x0); if do_cstep cHess = @(dx) cstep_approxHess(dx,g2,f,x0); else cHess = @(dx) nan(size(dx)); end end J1 = zeros(size(Jr)); if true %~linear H1 = zeros(n,n); H2 = zeros(n,n); Hr = zeros(n,n); Hc = zeros(n,n); end for j=1:n; x = zeros(n,1); x(j) = 1; [h1,jx] = hess(x); h2 = hess(x); J1(:,j) = jx; if ~linear H1(:,j) = h1; H2(:,j) = h2; end Hr(:,j) = rHess(x); Hc(:,j) = cHess(x); end if do_cstep c_err = max(max(abs(Jc-J1))); else c_err = nan; end r_err = max(max(abs(Jr-J1))); fprintf('test Jacobian : cstep err = %g, rstep err = %g\n',c_err,r_err); fprintf('test Jacobian-gradient'': %g\n',max(max(abs(J1-J2)))); if false %linear fprintf('function claims to be linear, not testing Hessians\n'); return; end r_err = max(max(abs(H1-Hr))); c_err = max(max(abs(H1-Hc))); rc_err = max(max(abs(Hr-Hc))); fprintf('test Hess prod: cstep err = %g, rstep err = %g, cstep-rstep = %g\n',c_err,r_err,rc_err); fprintf('test H1-H2: %g\n',max(max(abs(H1-H2)))); function x = rstep_approxHess(dx,dy,f,x0) alpha = sqrt(eps); x2 = x0 + alpha*dx; [dummy,deriv2] = f(x2); x1 = x0 - alpha*dx; [dummy,deriv1] = f(x1); g2 = deriv2(dy); g1 = deriv1(dy); x = (g2-g1)/(2*alpha); function p = cstep_approxHess(dx,dy,f,x0) x = x0 + 1e-20i*dx; [dummy,deriv] = f(x); g = deriv(dy); p = 1e20*imag(g);