plateform
stringclasses
1 value
repo_name
stringlengths
13
113
name
stringlengths
3
74
ext
stringclasses
1 value
path
stringlengths
12
229
size
int64
23
843k
source_encoding
stringclasses
9 values
md5
stringlengths
32
32
text
stringlengths
23
843k
github
bsxfan/meta-embeddings-master
quality_fuser_v1.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/aside/quality_fuser_v1.m
2,225
utf_8
64802a2f8ee68bcd9f60a31166e64fdb
function [fusion,params] = quality_fuser_v1(w,scores,train_vecs,test_vecs,train_ndx,test_ndx,ddim) % % Inputs: % % scores: the primary detection scores, for training % D-by-T matrix of T scores for D input systems % % train_vecs: K1-by-M matrix, one column-vector for each of M training % segments % % test_vecs: K2-by-N matrix, one column-vector for each of N training % segemnts % % train_ndx: 1-by-T index where train_ndx(t) is the index into train_vecs % for trial t. % % test_ndx: 1-by-T index where test_ndx(t) is the index into test_vecs % for trial t. % ddim: dimension of subspace for quality distandce calculation, % where ddim <= min(K1,K2) if nargin==0 test_this(); return; end % Check data dimensions [K1,M] = size(train_vecs); [K2,N] = size(test_vecs); assert(ddim<min(K1,K2)); [D,T] = size(scores); assert(T == length(train_ndx)); assert(T == length(test_ndx)); assert(max(train_ndx)<=M); assert(max(test_ndx)<=N); % Create building blocks [linfusion,params1] = linear_fuser([],scores); [train_quality,params2] = sigmoid_logdistance(params1.tail,train_vecs,ddim); train_distributor = duplicator_fh(train_ndx,size(train_vecs,2)); train_quality = train_distributor(train_quality); [test_quality,params3] = sigmoid_logdistance(params2.tail,test_vecs,ddim); test_distributor = duplicator_fh(test_ndx,size(test_vecs,2)); test_quality = test_distributor(test_quality); params.get_w0 = @(ssat) [params1.get_w0(); params2.get_w0(ssat); params3.get_w0(ssat)]; params.tail = params3.tail; % Assemble building blocks % combine train and test quality quality = dottimes_of_functions([],train_quality,test_quality); % modulate linear fusion with quality fusion = dottimes_of_functions([],quality,linfusion); if ~isempty(w) fusion = fusion(w); end end function test_this() D = 2; N = 5; T = 3; Q = 4; ndx = ceil(T.*rand(1,N)); scores = randn(D,N); train = randn(Q,T); test = randn(Q,T); ddim = 2; ssat = 0.999; [fusion,params] = quality_fuser_v1([],scores,train,test,ndx,ndx,ddim); w0 = params.get_w0(ssat); test_MV2DF(fusion,w0); quality_fuser_v1(w0,scores,train,test,ndx,ndx,ddim), end
github
bsxfan/meta-embeddings-master
quality_fuser_v2.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/aside/quality_fuser_v2.m
1,827
utf_8
702d7721d2e75e4164bd1fc9b7ba4c57
function [fusion,params] = quality_fuser_v2(w,scores,train_vecs,test_vecs,train_ndx,test_ndx,ddim) % % Inputs: % % scores: the primary detection scores, for training % D-by-T matrix of T scores for D input systems % % train_vecs: K1-by-M matrix, one column-vector for each of M training % segemnts % % test_vecs: K2-by-N matrix, one column-vector for each of N training % segemnts % % train_ndx: 1-by-T index where train_ndx(t) is the index into train_vecs % for trial t. % % test_ndx: 1-by-T index where test_ndx(t) is the index into test_vecs % for trial t. % ddim: dimension of subspace for quality distandce calculation, % where ddim <= min(K1,K2) if nargin==0 test_this(); return; end % Check data dimensions [K1,M] = size(train_vecs); [K2,N] = size(test_vecs); assert(ddim<min(K1,K2)); [D,T] = size(scores); assert(T == length(train_ndx)); assert(T == length(test_ndx)); assert(max(train_ndx)<=M); assert(max(test_ndx)<=N); % Create building blocks [linfusion,params1] = linear_fuser([],scores); [quality,params2] = prod_sigmoid_logdist(params1.tail,train_vecs,test_vecs,train_ndx,test_ndx,ddim); params.get_w0 = @(ssat) [params1.get_w0(); params2.get_w0(ssat)]; params.tail = params2.tail; % Assemble building blocks % modulate linear fusion with quality fusion = dottimes_of_functions([],quality,linfusion); if ~isempty(w) fusion = fusion(w); end end function test_this() D = 2; N = 5; T = 3; Q = 4; ndx = ceil(T.*rand(1,N)); scores = randn(D,N); train = randn(Q,T); test = randn(Q,T); ddim = 2; ssat = 0.99; [fusion,params] = quality_fuser_v2([],scores,train,test,ndx,ndx,ddim); w0 = params.get_w0(ssat); test_MV2DF(fusion,w0); quality_fuser_v2(w0,scores,train,test,ndx,ndx,ddim), end
github
bsxfan/meta-embeddings-master
quality_fuser_v4.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/systems/aside/quality_fuser_v4.m
1,217
utf_8
d5315c3a78ba4e1277f9493c119d0cc8
function [fusion,params] = quality_fuser_v4(w,scores,quality_inputs) % % Inputs: % % scores: the primary detection scores, for training % D-by-T matrix of T scores for D input systems % % quality_input: K-by-T matrix of quality measures % % Output: % fusion: is numeric if w is numeric, or a handle to an MV2DF, representing: % % y= (alpha'*scores+beta) * sigmoid( gamma'*quality_inputs + delta) % if nargin==0 test_this(); return; end % Check data dimensions [D,T] = size(scores); [K,T2] = size(quality_inputs); assert(T==T2); % Create building blocks [linfusion,params1] = linear_fuser([],scores); [quality,params2] = fused_sigmoid(params1.tail,quality_inputs); params.get_w0 = @(ssat) [params1.get_w0(); params2.get_w0(ssat)]; params.tail = params2.tail; % Assemble building blocks % modulate linear fusion with quality fusion = dottimes_of_functions([],quality,linfusion); if ~isempty(w) fusion = fusion(w); end end function test_this() D = 4; T = 5; K = 3; scores = randn(D,T); Q = randn(K,T); ssat = 0.99; [fusion,params] = quality_fuser_v4([],scores,Q); w0 = params.get_w0(ssat); test_MV2DF(fusion,w0); w0(D+1)=1; quality_fuser_v4(w0,scores,Q), end
github
bsxfan/meta-embeddings-master
sigmoid_logdistance.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/sigmoid_logdistance.m
1,561
utf_8
b124d29ef74e835d894f8dd7de72c760
function [sld,params] = sigmoid_logdistance(w,input_data,ddim) % % Algorithm: sld = sigmoid( % log( % sum(bsxfun(@minus,M*input_data,c).^2,1) % )) % % % Inputs: % w: is vec([M,c]), where M is ddim-by-D and c is ddim-by-1 % Use w=[] to let output sld be an MV2DF function handle. % % input_data: D-by-T matrix % % ddim: the first dimension of the W matrix given as the first % parameter to run_sys % % Outputs: % sld: function handle (if w=[]), or numeric % params.get_w0(ssat): returns w0 for optimization initialization, % 0<ssat<1 is required average sigmoid output. % params.tail: is tail of parameter w, which is not consumed by this % function. if nargin==0 test_this(); return; end datadim = size(input_data,1); wsz = ddim*(datadim+1); [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(ssat) init_w0(ssat); params.tail = wtail; dist = square_distance_mv2df(whead,input_data,ddim); sld = one_over_one_plus_w_mv2df(dist); function w0 = init_w0(ssat) W0 = randn(ddim,datadim+1); W0(:,end) = 0; % centroid from which distances are computed d0 = (1-ssat)/ssat; d = square_distance_mv2df(W0(:),input_data,ddim); W0 = sqrt(d0/median(d))*W0; w0 = W0(:); end end function test_this() K = 5; N = 10; data = randn(N,K); ddim = 3; ssat = 0.99; [sys,params] = sigmoid_logdistance([],data,ddim); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); dist = sigmoid_logdistance(w0,data,ddim), end
github
bsxfan/meta-embeddings-master
QtoLLH.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/QtoLLH.m
612
utf_8
e0bc4e7d0bfd4082fc37fb474bc44c8c
function [LLH,w0] = QtoLLH(w,Q,n) % if nargin==0 test_this(); return; end if ~exist('Q','var') || isempty(Q) LLH = sprintf(['QtoLLH:',repmat(' %g',1,length(w))],w); return; end [m,k] = size(Q); wsz = m*n; if nargout>1, w0 = zeros(wsz,1); end LLH = linTrans(w,@(w)map_this(w),@(w)transmap_this(w)); function y = map_this(w) w = reshape(w,n,m); y = w*Q; end function w = transmap_this(y) y = reshape(y,n,k); w = y*Q.'; end end function test_this() Q = randn(2,10); [sys,w0] = QtoLLH([],Q,3); test_MV2DF(sys,w0); end
github
bsxfan/meta-embeddings-master
fused_sigmoid.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/fused_sigmoid.m
1,293
utf_8
1f35e45a3c945008307dd1222a281bb8
function [ps,params] = fused_sigmoid(w,input_data) % % Algorithm: ps = sigmoid( alpha'*input_data +beta) % % % Inputs: % w: is [alpha; beta], where alpha is D-by-1 and beta is scalar. % Use w=[] to let output ps be an MV2DF function handle. % If w is a function handle to an MV2DF then ps is the function handle % to the composition of w and this function. % % input_data: D-by-T matrix % % % Outputs: % ps: function handle (if w=[], or w is handle), or numeric T-by-1 % params.get_w0(ssat): returns w0 for optimization initialization, % 0<ssat<1 is required average sigmoid output. % params.tail: is tail of parameter w, which is not consumed by this % function. if nargin==0 test_this(); return; end [dim,n] = size(input_data); wsz = dim+1; [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(ssat) init_w0(ssat,dim); params.tail = wtail; y = fusion_mv2df(whead,input_data); ps = sigmoid_mv2df(y); function w0 = init_w0(ssat,dim) alpha = zeros(dim,1); beta = logit(ssat); w0 = [alpha;beta]; end end function test_this() K = 5; T = 10; data = randn(K,T); ssat = 0.99; [sys,params] = fused_sigmoid([],data); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); ps = fused_sigmoid(w0,data), end
github
bsxfan/meta-embeddings-master
sigmoid_log_sumsqdist.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/sigmoid_log_sumsqdist.m
1,638
utf_8
0b9f81df4bc93fe52ac7dbaa98594160
function [sig,params] = sigmoid_log_sumsqdist(w,data1,data2,ndx1,ndx2,ddim) % % Similar to prod_sigmoid_logdist, but adds square distances from two sides % before doing sigmoid(log()). % if nargin==0 test_this(); return; end datadim = size(data1,1); assert(datadim==size(data2,1),'data1 and data2 must have same number of rows'); assert(length(ndx1)==length(ndx2)); assert(max(ndx1)<=size(data1,2)); assert(max(ndx2)<=size(data2,2)); wsz = ddim*(datadim+1); [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(ssat) init_w0(ssat); params.tail = wtail; sqd1 = square_distance_mv2df([],data1,ddim); %Don't put whead in here, sqd2 = square_distance_mv2df([],data2,ddim); %or here. Will cause whead to be called twice. % distribute over trials distrib = duplicator_fh(ndx1,size(data1,2)); sqd1 = distrib(sqd1); distrib = duplicator_fh(ndx2,size(data2,2)); sqd2 = distrib(sqd2); sumsq_dist = sum_of_functions([],[1,1],sqd1,sqd2); sig = one_over_one_plus_w_mv2df(sumsq_dist); sig = sig(whead); %Finally plug whead in here. function w0 = init_w0(ssat) W0 = randn(ddim,datadim+1); %subspace projector W0(:,end) = 0; % centroid from which distances are computed d0 = (1-ssat)/ssat; d = sumsq_dist(W0(:)); W0 = sqrt(d0/median(d))*W0; w0 = W0(:); end end function test_this() K = 5; N1 = 10; N2 = 2*N1; data1 = randn(K,N1); data2 = randn(K,N2); ndx1 = [1:N1,1:N1]; ndx2 = 1:N2; ddim = 3; ssat = 0.99; [sys,params] = sigmoid_log_sumsqdist([],data1,data2,ndx1,ndx2,ddim); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); dist = sigmoid_log_sumsqdist(w0,data1,data2,ndx1,ndx2,ddim), end
github
bsxfan/meta-embeddings-master
prmtrzd_sig_log_dist.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/prmtrzd_sig_log_dist.m
1,767
utf_8
edf7387841000e5933bda732bca5b79b
function [ps,params] = prmtrzd_sig_log_dist(w,input_data,ddim) % % Algorithm: ps = sigmoid( % offs+scal*log( % sum(bsxfun(@minus,M*input_data,c).^2,1) % )) % % % Inputs: % w: is [ vec(M); c; scal; offs], where M is ddim-by-D; c is ddim-by-1; % and scal and offs are scalar. % Use w=[] to let output ps be an MV2DF function handle. % If w is a function handle to an MV2DF then ps is the function handle % to the composition of w and this function. % % input_data: D-by-T matrix % % ddim: the first dimension of the M matrix % % Outputs: % ps: function handle (if w=[], or w is handle), or numeric T-by-1 % params.get_w0(ssat): returns w0 for optimization initialization, % 0<ssat<1 is required average sigmoid output. % params.tail: is tail of parameter w, which is not consumed by this % function. if nargin==0 test_this(); return; end [datadim,n] = size(input_data); Mc_sz = ddim*(datadim+1); wsz = Mc_sz + 2; [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(ssat) init_w0(ssat); params.tail = wtail; [M_c,scal_offs] = splitvec_fh(Mc_sz,w); ld = log_distance_mv2df(M_c,input_data,ddim); tld = scale_and_translate(w,ld,scal_offs,1,n); ps = sigmoid_mv2df(tld); function w0 = init_w0(ssat) M = randn(ddim,datadim); c = zeros(ddim,1); scal = 1; offs = 0; w0 = [M(:);c;scal;offs]; x = ld(w0); y = logit(ssat); scal = y/median(x); w0(end-1) = scal; end end function test_this() K = 5; N = 10; data = randn(N,K); ddim = 3; ssat = 0.99; [sys,params] = prmtrzd_sig_log_dist([],data,ddim); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); ps = prmtrzd_sig_log_dist(w0,data,ddim), end
github
bsxfan/meta-embeddings-master
QQtoLLH.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/QQtoLLH.m
623
utf_8
73d37a5922aafaa7dd4dd6e5a2cfbe51
function [LLH,w0] = QQtoLLH(w,qleft,qright,n) % if nargin==0 test_this(); return; end qleft = [qleft;ones(1,size(qleft,2))]; qright = [qright;ones(1,size(qright,2))]; qdim = size(qleft,1); qdim2 = size(qright,1); assert(qdim==qdim2); q2 = qdim*(qdim+1)/2; wsz = n*q2; if nargout>1, w0 = zeros(wsz,1); end lh = cell(1,n); tail = w; for i=1:n [wi,tail] = splitvec_fh(q2,tail); lh{i} = AWB_fh(qleft',qright,tril_to_symm_fh(qdim,wi)); end LLH = interleave(w,lh); end function test_this() qleft = randn(3,3); qright = randn(3,2); [sys,w0] = QQtoLLH([],qleft,qright,2); test_MV2DF(sys,w0); end
github
bsxfan/meta-embeddings-master
QQtoP.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/QQtoP.m
771
utf_8
0a940f8a8a56510a32ad6a45accddc02
function [P,params] = QQtoP(w,qleft,qright,n) % if nargin==0 test_this(); return; end qleft = [qleft;ones(1,size(qleft,2))]; qright = [qright;ones(1,size(qright,2))]; [qdim,nleft] = size(qleft); [qdim2,nright] = size(qright); assert(qdim==qdim2); q2 = qdim*(qdim+1)/2; wsz = n*q2; [whead,wtail] = splitvec_fh(wsz); params.get_w0 = @() zeros(wsz,1); params.tail = wtail; lh = cell(1,n); for i=1:n [wi,whead] = splitvec_fh(q2,whead); lh{i} = AWB_fh(qleft',qright,tril_to_symm_fh(qdim,wi)); end P = exp_mv2df(logsoftmax_mv2df(interleave(w,lh),n)); %P = interleave(w,lh); end function test_this() qleft = randn(3,3); qright = randn(3,2); [sys,params] = QQtoP([],qleft,qright,2); w0 = params.get_w0(); test_MV2DF(sys,w0); P = sys(w0), end
github
bsxfan/meta-embeddings-master
prod_sigmoid_logdist.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/prod_sigmoid_logdist.m
2,506
utf_8
f31a256c5434fca4b6c0641d23a2ebc1
function [sig,params] = prod_sigmoid_logdist(w,data1,data2,ndx1,ndx2,ddim) % % Algorithm: sig = distribute(ndx1,sigmoid( % log( % sum(bsxfun(@minus,M*data_1,c).^2,1) % ))) % * % distribute(ndx2,sigmoid( % log( % sum(bsxfun(@minus,M*data_2,c).^2,1) % ))) % % % Inputs: % w: is vec([M,c]), where M is ddim-by-D and c is ddim-by-1 % Use w=[] to let output sld be an MV2DF function handle. % % data_1: D-by-T1 matrix % data_2: D-by-T2 matrix % ndx1,ndx2: indices of size 1 by T to distribute T1 and T2 segs over T % trials % % ddim: the first dimension of the M matrix % % Outputs: % sig: function handle (if w=[]), or numeric % params.get_w0(ssat): returns w0 for optimization initialization, % 0<ssat<1 is required average sigmoid output. % params.tail: is tail of parameter w, which is not consumed by this % function. if nargin==0 test_this(); return; end datadim = size(data1,1); assert(datadim==size(data2,1),'data1 and data2 must have same number of rows'); assert(length(ndx1)==length(ndx2)); assert(max(ndx1)<=size(data1,2)); assert(max(ndx2)<=size(data2,2)); wsz = ddim*(datadim+1); [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(ssat) init_w0(ssat); params.tail = wtail; sqd1 = square_distance_mv2df([],data1,ddim); %Don't put whead in here, sqd2 = square_distance_mv2df([],data2,ddim); %or here. Will cause whead to be called twice. sig1 = one_over_one_plus_w_mv2df(sqd1); sig2 = one_over_one_plus_w_mv2df(sqd2); % distribute over trials distrib = duplicator_fh(ndx1,size(data1,2)); sig1 = distrib(sig1); distrib = duplicator_fh(ndx2,size(data2,2)); sig2 = distrib(sig2); sigh = dottimes_of_functions([],sig1,sig2); sig = sigh(whead); function w0 = init_w0(ssat) W0 = randn(ddim,datadim+1); %subspace projector W0(:,end) = 0; % centroid from which distances are computed d0 = (1-ssat)/ssat; s = sigh(W0(:)); d = (1-s)./s; W0 = sqrt(d0/median(d))*W0; w0 = W0(:); end end function test_this() K = 5; N1 = 10; N2 = 2*N1; data1 = randn(K,N1); data2 = randn(K,N2); ndx1 = [1:N1,1:N1]; ndx2 = 1:N2; ddim = 3; ssat = 0.01; [sys,params] = prod_sigmoid_logdist([],data1,data2,ndx1,ndx2,ddim); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); sig = prod_sigmoid_logdist(w0,data1,data2,ndx1,ndx2,ddim), end
github
bsxfan/meta-embeddings-master
outerprod_of_sigmoids.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/outerprod_of_sigmoids.m
1,033
utf_8
2577209cada6747a9615d0ce3b375b7f
function [Q,params] = outerprod_of_sigmoids(w,qleft,qright) % if nargin==0 test_this(); return; end [qdim,nleft] = size(qleft); [qdim2,nright] = size(qright); assert(qdim==qdim2); wsz = qdim+1; [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(ssat) init_w0(ssat); params.tail = wtail; % fleft = sigmoid_mv2df(fusion_mv2df([],qleft)); %Don't put whead in here, % fright = sigmoid_mv2df(fusion_mv2df([],qright)); %or here. Will cause whead to be called twice. fleft = fusion_mv2df([],qleft); %Don't put whead in here, fright = fusion_mv2df([],qright); %or here. Will cause whead to be called twice. Q = outerprod_of_functions(whead,fleft,fright,nleft,nright); function w0 = init_w0(ssat) w0 = zeros(wsz,1); offs = logit(ssat); w0(end) = offs; end end function test_this() qleft = randn(3,10); qright = randn(3,20); ssat = 0.99; [sys,params] = outerprod_of_sigmoids([],qleft,qright); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); sig = outerprod_of_sigmoids(w0,qleft,qright), end
github
bsxfan/meta-embeddings-master
parallel_cal.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/parallel_cal.m
983
utf_8
252822b934d5469fc96448f80d2f3e90
function [calscores,w0] = parallel_cal(w,scores,wfuse) % if nargin==0 test_this(); return; end if ~exist('scores','var') || isempty(scores) calscores = sprintf(['parallel calibration:',repmat(' %g',1,length(w))],w); return; end [m,n] = size(scores); if nargout>1, w0 = init_w0(wfuse); end calscores = linTrans(w,@(w)map_this(w),@(w)transmap_this(w)); function w0 = init_w0(wfuse) assert(length(wfuse)-1==m); scal = wfuse(1:end-1); offs = wfuse(end); W = [scal*(m+1);((m+1)/m)*offs*ones(m,1)]; w0 = W(:); end function y = map_this(w) w = reshape(w,m,2); y = bsxfun(@times,scores,w(:,1)); y = bsxfun(@plus,y,w(:,2)); end function w = transmap_this(y) y = reshape(y,m,n); w = [sum(y.*scores,2),sum(y,2)]; end end function test_this() scores = randn(4,10); [sys,w0] = parallel_cal([],scores,(1:5)'); test_MV2DF(sys,w0); end
github
bsxfan/meta-embeddings-master
parallel_cal_augm.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/parallel_cal_augm.m
1,115
utf_8
f5a8bba6d164ab5577c8429ce5835305
function [calscores,params] = parallel_cal_augm(w,scores) % if nargin==0 test_this(); return; end if ~exist('scores','var') || isempty(scores) calscores = sprintf(['parallel calibration:',repmat(' %g',1,length(w))],w); return; end [m,n] = size(scores); scores = [scores;zeros(1,n)]; wsz = 2*m; [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(wfuse) init_w0(wfuse); params.tail = wtail; waugm = augmentmatrix_fh(m,0,whead); calscores = linTrans(waugm,@(w)map_this(w),@(w)transmap_this(w)); function w0 = init_w0(wfuse) scal = wfuse(1:end-1); offs = wfuse(end); W = [scal*(m+1);((m+1)/m)*offs*ones(m,1)]; w0 = W(:); end function y = map_this(w) w = reshape(w,m+1,2); y = bsxfun(@times,scores,w(:,1)); y = bsxfun(@plus,y,w(:,2)); end function w = transmap_this(y) y = reshape(y,m+1,n); w = [sum(y.*scores,2),sum(y,2)]; end end function test_this() scores = randn(4,10); [sys,params] = parallel_cal_augm([],scores); w0 = params.get_w0(); test_MV2DF(sys,w0); end
github
bsxfan/meta-embeddings-master
prod_of_prmtrzd_sigmoids.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/prod_of_prmtrzd_sigmoids.m
1,537
utf_8
6c18de0879f128ada38d483ace60b57f
function [ps,params] = prod_of_prmtrzd_sigmoids(w,input_data) % % Algorithm: ps = prod_i sigmoid( alpha_i*input_data(i,:) + beta_i) % % % Inputs: % w: is vec([alpha; beta]), where alpha and beta are 1-by-D. % Use w=[] to let output ps be an MV2DF function handle. % If w is a function handle to an MV2DF then ps is the function handle % to the composition of w and this function. % % input_data: D-by-T matrix % % % Outputs: % ps: function handle (if w=[], or w is handle), or numeric T-by-1 % params.get_w0(ssat): returns w0 for optimization initialization, % 0<ssat<1 is required average sigmoid output. % params.tail: is tail of parameter w, which is not consumed by this % function. if nargin==0 test_this(); return; end m = size(input_data,1); prms = cell(1,m); [ps,prms{1}] = prmtrzd_sigmoid([],input_data(1,:)); for i=2:m [ps2,prms{i}] = prmtrzd_sigmoid(prms{i-1}.tail,input_data(i,:)); ps = dottimes_of_functions([],ps,ps2); end if ~isempty(w) ps = ps(w); end params.get_w0 = @(ssat) init_w0(ssat,m); params.tail = prms{m}.tail; function w0 = init_w0(ssat,m) ssat = ssat^(1/m); w0 = zeros(m*2,1); at = 1; for j=1:m w0(at:at+1) = prms{j}.get_w0(ssat); at = at + 2; end end end function test_this() K = 3; T = 10; data = randn(K,T); ssat = 0.99; [sys,params] = prod_of_prmtrzd_sigmoids([],data); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); ps = prod_of_prmtrzd_sigmoids(w0,data), end
github
bsxfan/meta-embeddings-master
prmtrzd_sigmoid.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/quality_modules/prmtrzd_sigmoid.m
1,312
utf_8
616d28e7f84f188fcd7759c98a9c3c66
function [ps,params] = prmtrzd_sigmoid(w,input_data) % % Algorithm: ps = sigmoid( w0+w1'*input_data ), where % w = [w1;w0]; w0 is scalar; and w1 is vector % % % Inputs: % w = [w1;w0]; w0 is scalar; and w1 is vector. % Use w=[] to let output ps be an MV2DF function handle. % If w is a function handle to an MV2DF then ps is the function handle % to the composition of w and this function. % % input_data: D-by-T matrix % % % Outputs: % ps: function handle (if w=[], or w is handle), or numeric T-by-1 % params.get_w0(ssat): returns w0 for optimization initialization, % 0<ssat<1 is required average sigmoid output. % params.tail: is tail of parameter w, which is not consumed by this % function. if nargin==0 test_this(); return; end m = size(input_data,1); wsz = m+1; [whead,wtail] = splitvec_fh(wsz,w); params.get_w0 = @(ssat) init_w0(ssat); params.tail = wtail; y = fusion_mv2df(whead,input_data); ps = sigmoid_mv2df(y); function w0 = init_w0(ssat) w0 = zeros(wsz,1); offs = logit(ssat); w0(end) = offs; end end function test_this() D = 3; K = 5; data = randn(D,K); ssat = 0.99; [sys,params] = prmtrzd_sigmoid([],data); w0 = params.get_w0(ssat); test_MV2DF(sys,w0); ps = prmtrzd_sigmoid(w0,data), end
github
bsxfan/meta-embeddings-master
augmentmatrix_fh.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/augmentmatrix_fh.m
826
utf_8
d2182cb06b78c3d43519f09b297ddad2
function fh = augmentmatrix_fh(m,value,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. % % Algorithm: y = [reshape(w,m,n);ones(1,n)](:) if nargin==0 test_this(); return; end function y = map_this(w) n = length(w)/m; y = [reshape(w,m,n);value*ones(1,n)]; end function y = linmap_this(w) n = length(w)/m; y = [reshape(w,m,n);zeros(1,n)]; end function w = transmap_this(y) y = reshape(y,m+1,[]); w = y(1:m,:); end map = @(w) map_this(w); linmap = @(w) linmap_this(w); transmap = @(y) transmap_this(y); fh = affineTrans([],map,linmap,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function test_this() m = 3; f = augmentmatrix_fh(m,1); test_MV2DF(f,randn(m*4,1)); end
github
bsxfan/meta-embeddings-master
bsx_col_plus_row.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/bsx_col_plus_row.m
912
utf_8
59d5f6f7ea9d75509fbc6bf64b63b465
function fh = bsx_col_plus_row(m,n,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. % % Algorithm: col = w(1:m) % row = w(m+1:end) % y = bsxfun(@plus,col(:),row(:)'), % if nargin==0 test_this(); return; end function y = map_this(w) if isempty(m), m = length(w)-n; end col = w(1:m); row = w(m+1:end); y = bsxfun(@plus,col(:),row(:).'); end function w = transmap_this(y,sz) if isempty(m), m = sz-n; end y = reshape(y,m,[]); w=zeros(sz,1); w(1:m) = sum(y,2); w(m+1:end) = sum(y,1).'; end map = @(w) map_this(w); transmap = @(y,sz) transmap_this(y,sz); fh = linTrans_adaptive([],map,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function test_this() m = 2; n = 3; w = randn(m+n,1); f = bsx_col_plus_row(m,n); test_MV2DF(f,w); end
github
bsxfan/meta-embeddings-master
duplicator_fh.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/duplicator_fh.m
805
utf_8
890c37f077bde2305a0f1c545b71c36a
function f = duplicator_fh(duplication_indices,xdim,w) % % This factory creates a function handle to an MV2DF, which represents the % function: % % y = x(duplication_indices) % if nargin==0 test_this(); return; end map = @(x) x(duplication_indices); %xdim = max(duplication_indices); ydim = length(duplication_indices); c = zeros(1,ydim); r = zeros(1,ydim); at = 1; for i=1:xdim ci = find(duplication_indices==i); n = length(ci); r(at:at+n-1) = i; c(at:at+n-1) = ci; at = at + n; end reverse = sparse(r,c,1,xdim,ydim); transmap = @(y) reverse*y; f = @(w) linTrans(w,map,transmap); if exist('w','var') && ~isempty(w) f = f(w); end end function test_this() dup = [ 1 3 1 3]; x = [ 1 2 3 4]; f = duplicator_fh(dup,length(x)); y = f(x), test_MV2DF(f,x); end
github
bsxfan/meta-embeddings-master
splitvec_fh.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/splitvec_fh.m
1,343
utf_8
aff993bc1037dc1d6673762983fd5497
function [head,tail] = splitvec_fh(head_size,w) % % % If head_size <0 then tail_size = - head_size if nargin==0 test_this(); return; end tail_size = - head_size; function w = transmap_head(y,sz) w=zeros(sz,1); w(1:head_size)=y; end function w = transmap_tail(y,sz) w=zeros(sz,1); w(head_size+1:end)=y; end function w = transmap_head2(y,sz) w=zeros(sz,1); w(1:end-tail_size) = y; end function w = transmap_tail2(y,sz) w=zeros(sz,1); w(1+end-tail_size:end) = y; end if head_size>0 map_head = @(w) w(1:head_size); map_tail = @(w) w(head_size+1:end); head = linTrans_adaptive([],map_head,@(y,sz)transmap_head(y,sz)); tail = linTrans_adaptive([],map_tail,@(y,sz)transmap_tail(y,sz)); elseif head_size<0 map_head = @(w) w(1:end-tail_size); map_tail = @(w) w(1+end-tail_size:end); head = linTrans_adaptive([],map_head,@(y,sz)transmap_head2(y,sz)); tail = linTrans_adaptive([],map_tail,@(y,sz)transmap_tail2(y,sz)); else error('head size cannot be 0') end if exist('w','var') && ~isempty(w) head = head(w); tail = tail(w); end end function test_this() [head,tail] = splitvec_fh(2); fprintf('testing head:\n'); test_MV2DF(head,[1 2 3 4 5]); fprintf('\n\n\ntesting tail:\n'); test_MV2DF(tail,[1 2 3 4 5]); end
github
bsxfan/meta-embeddings-master
log_distance_mv2df.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/log_distance_mv2df.m
1,968
utf_8
ab190182251a8ee9a8cce755c6615e99
function [y,deriv] = log_distance_mv2df(w,input_data,new_dim) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % The function projects each column of input_data to a subspace and then % computes log distance from a centroid. The input_data is fixed, but % the projection and centroid parameters are variable. % % W = reshape(w); % y.' = log sum((W(:,1:end-1).'*input_data - W(:,end)).^2,1) % % W is the augmented matrix [M c] where M maps an input vector % to a lower dimensional space and c is the centroid in % the lower dimensional space. % % Parameters: % w: the vectorized version of the W matrix % input_data: is an K-by-T matrix of input vectors of length K, for % each of T trials. % new_dim: the dimension of vectors in the lower dimensional space. % if nargin==0 test_this(); return; end if isempty(w) [dim, num_trials] = size(input_data); map = @(w) map_this(w,input_data,dim,new_dim); transmap = @(w) transmap_this(w,input_data,num_trials,new_dim); delta = linTrans(w,map,transmap); y = logsumsquares_fh(new_dim,1,delta); return; end if isa(w,'function_handle') f = log_distance_mv2df([],input_data,new_dim); y = compose_mv(f,w,[]); return; end f = log_distance_mv2df([],input_data,new_dim); if nargout==1 y = f(w); else [y,deriv] = f(w); end function y = map_this(w,input_data,dim,new_dim) % V = [input_data; ones(1,num_trials)]; W = reshape(w,new_dim,dim+1); y = bsxfun(@minus,W(:,1:end-1)*input_data,W(:,end)); y = y(:); function dx = transmap_this(dy,input_data,num_trials,new_dim) dY = reshape(dy,new_dim,num_trials); % V = [input_data; ones(1,num_trials)]; % Vt = V.'; % dX = dY*Vt; dYt = dY.'; dYtSum = sum(dYt,1); dX = [input_data*dYt;-dYtSum].'; dx = dX(:); function test_this() K = 5; N = 10; P = 3; M = randn(P,N); c = randn(P,1); W = [M c]; w = W(:); input_data = randn(N,K); f = log_distance_mv2df([],input_data,P); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
AWB_fh.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/AWB_fh.m
675
utf_8
3ab5ec4ad82fe2f901f95abf30fb3193
function fh = AWB_fh(A,B,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. % % Algorithm: Y = A*reshape(w,..)*B if nargin==0 test_this(); return; end [m,n] = size(A); [r,s] = size(B); function y = map_this(w) w = reshape(w,n,r); y = A*w*B; end function w = transmap_this(y) y = reshape(y,m,s); w = A.'*y*B.'; end map = @(y) map_this(y); transmap = @(y) transmap_this(y); fh = linTrans([],map,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function test_this() A = randn(2,3); B = randn(4,5); f = AWB_fh(A,B); test_MV2DF(f,randn(3*4,1)); end
github
bsxfan/meta-embeddings-master
xoverxplusalpha.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/xoverxplusalpha.m
792
utf_8
9fbd612d42a50cee70f2b05dce2bf16c
function [y,deriv] = xoverxplusalpha(w,x) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % alpha --> x./(x+alpha) % if nargin==0 test_this(); return; end if isempty(w) y = @(w)xoverxplusalpha(w,x); return; end if isa(w,'function_handle') f = xoverxplusalpha([],x); y = compose_mv(f,w,[]); return; end x = x(:); assert(numel(w)==1); y = x./(x+w); deriv = @(Dy) deriv_this(Dy,x,w); end function [g,hess,linear] = deriv_this(Dy,x,w) g0 = -x./(x+w).^2; g = Dy.'*g0; linear = false; hess = @(Dw) hess_this(Dw,Dy,x,w,g0); end function [h,Jv] = hess_this(Dw,Dy,x,w,g0) h = 2*Dw * Dy.'*(x./(x+w).^3); if nargin>1 Jv = Dw*g0; end end function test_this() x = randn(1,100); w = randn(1); f = xoverxplusalpha([],x); test_MV2DF(f,w); end
github
bsxfan/meta-embeddings-master
tril_to_symm_fh.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/tril_to_symm_fh.m
786
utf_8
9ea53a1f6c15720e67c1c446d7dfad43
function fh = tril_to_symm_fh(m,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. % % Algorithm: w is vector of sizem*(m+1)/2 % w -> m-by-m lower triangular matrix Y % Y -> Y + Y' if nargin==0 test_this(); return; end indx = tril(true(m)); function y = map_this(w) y = zeros(m); y(indx(:)) = w; y = y + y.'; end function w = transmap_this(y) y = reshape(y,m,m); y = y + y.'; w = y(indx(:)); end map = @(w) map_this(w); transmap = @(y) transmap_this(y); fh = linTrans([],map,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function test_this() m=3; n = m*(m+1)/2; f = tril_to_symm_fh(m); test_MV2DF(f,randn(n,1)); end
github
bsxfan/meta-embeddings-master
square_distance_mv2df.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/square_distance_mv2df.m
1,835
utf_8
a5d544c6956f70a3c3afdec634a2c891
function [y,deriv] = square_distance_mv2df(w,input_data,new_dim) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % The function computes the square distance of the vectors for each trial. % y.' = sum((W(:,1:end-1).'*input_data + W(:,end)).^2,1) % % W is the augmented matrix [M c] where M maps a score vector % to a lower dimensional space and c is an offset vector in % the lower dimensional space. % % Parameters: % w: the vectorized version of the W matrix % input_data: is an M-by-T matrix of input vectors of length M, for each of T % trials. % new_dim: the dimension of vectors in the lower dimensional space. % if nargin==0 test_this(); return; end if isempty(w) [dim, num_trials] = size(input_data); map = @(w) map_this(w,input_data,dim,new_dim); transmap = @(w) transmap_this(w,input_data,num_trials,new_dim); delta = linTrans(w,map,transmap); y = sums_of_squares(delta,new_dim); return; end if isa(w,'function_handle') f = square_distance_mv2df([],input_data,new_dim); y = compose_mv(f,w,[]); return; end f = square_distance_mv2df([],input_data,new_dim); if nargout==1 y = f(w); else [y,deriv] = f(w); end function y = map_this(w,input_data,dim,new_dim) % V = [input_data; ones(1,num_trials)]; W = reshape(w,new_dim,dim+1); y = bsxfun(@minus,W(:,1:end-1)*input_data,W(:,end)); y = y(:); function dx = transmap_this(dy,input_data,num_trials,new_dim) dY = reshape(dy,new_dim,num_trials); % V = [input_data; ones(1,num_trials)]; % Vt = V.'; % dX = dY*Vt; dYt = dY.'; dYtSum = sum(dYt,1); dX = [input_data*dYt;-dYtSum].'; dx = dX(:); function test_this() K = 5; N = 10; P = 3; M = randn(P,N); c = randn(P,1); W = [M c]; w = W(:); input_data = randn(N,K); f = square_distance_mv2df([],input_data,P); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
addtotranspose_fh.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/addtotranspose_fh.m
493
utf_8
c009e482a302e2825fb3f59940bcc79e
function fh = addtotranspose_fh(m,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. if nargin==0 test_this(); return; end function y = map_this(w) w = reshape(w,m,m); y = w+w.'; end map = @(y) map_this(y); transmap = @(y) map_this(y); fh = linTrans([],map,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function test_this() m=3; f = addtotranspose_fh(3); test_MV2DF(f,randn(m*m,1)); end
github
bsxfan/meta-embeddings-master
subvec_fh.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/subvec_fh.m
544
utf_8
a8942d310965ca178a123eb3f4a78f21
function fh = subvec_fh(first,len,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. if nargin==0 test_this(); return; end map = @(w) w(first:first+len-1); function w = transmap_this(y,sz) w=zeros(sz,1); w(first:first+len-1)=y; end transmap = @(y,sz) transmap_this(y,sz); fh = linTrans_adaptive([],map,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function test_this() first = 2; len = 3; f = subvec_fh(first,len); test_MV2DF(f,randn(5,1)); end
github
bsxfan/meta-embeddings-master
linTrans_adaptive.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/applications/fusion2class/mv2df_function_library/templates/linTrans_adaptive.m
1,173
utf_8
66276c8cd337da71a4e14efc67112765
function [y,deriv] = linTrans_adaptive(w,map,transmap) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Applies linear transform y = map(w). It needs the transpose of map, % transmap for computing the gradient. map and transmap are function % handles. if nargin==0 test_this(); return; end if isempty(w) y = @(w)linTrans_adaptive(w,map,transmap); return; end if isa(w,'function_handle') outer = linTrans_adaptive([],map,transmap); y = compose_mv(outer,w,[]); return; end y = map(w); y = y(:); deriv = @(g2) deriv_this(g2,map,transmap,numel(w)); end function [g,hess,linear] = deriv_this(g2,map,transmap,wlen) g = transmap(g2,wlen); g = g(:); %linear = false; % use this to test linearity of map, if in doubt linear = true; hess = @(d) hess_this(map,d); end function [h,Jd] = hess_this(map,d) h = []; if nargout>1 Jd = map(d); Jd = Jd(:); end end function test_this() first = 2; len = 3; map = @(w) w(first:first+len-1); function w = transmap_test(y,sz) w=zeros(sz,1); w(first:first+len-1)=y; end transmap = @(y,sz) transmap_test(y,sz); f = linTrans_adaptive([],map,transmap); test_MV2DF(f,randn(5,1)); end
github
bsxfan/meta-embeddings-master
logsumexp_fh.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/logsumexp_fh.m
1,287
utf_8
764511ba624a62ac12e572a26a5e7aa2
function f = logsumexp_fh(m,direction,w) % This is a factory for a function handle to an MV2DF, which represents % the vectorization of the logsumexp function. The whole mapping works like % this, in MATLAB-style psuedocode: % % F: R^(m*n) --> R^n, where y = F(x) is computed thus: % % n = length(x)/m % If direction=1, X = reshape(x,m,n), or % if direction=1, X = reshape(x,n,m). % y = log(sum(exp(X),direction)) % % Inputs: % m: the number of inputs to each individual logsumexp calculation. % direction: 1 sums down columns, or 2 sums accross rows. % w: optional, if ssupplied % % Outputs: % f: a function handle to the MV2DF described above. % % see: MV2DF_API_DEFINITION.readme if nargin==0 test_this(); return; end f = vectorized_function([],@(X)F0(X,direction),m,direction); if exist('w','var') && ~isempty(w) f = f(w); end end function [y,f1] = F0(X,dr) M = max(X,[],dr); y = log(sum(exp(bsxfun(@minus,X,M)),dr))+M; f1 = @() F1(X,y,dr); end function [J,f2,linear] = F1(X,y,dr) linear = false; J = exp(bsxfun(@minus,X,y)); f2 = @(dX) F2(dX,J,dr); end function H = F2(dX,J,dr) H = J.*bsxfun(@minus,dX,sum(dX.*J,dr)); end function test_this() m = 4;n = 10; f = logsumexp_fh(m,1); X = randn(n,m); test_MV2DF(f,X(:)); end
github
bsxfan/meta-embeddings-master
one_over_one_plus_w_mv2df.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/one_over_one_plus_w_mv2df.m
717
utf_8
d735233c52193c323d03cdb85d0948f5
function [y,deriv] = one_over_one_plus_w_mv2df(w) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % y = 1 ./ (1 + w) if nargin==0 test_this(); return; end if isempty(w) y = @(w)one_over_one_plus_w_mv2df(w); return; end if isa(w,'function_handle') outer = one_over_one_plus_w_mv2df([]); y = compose_mv(outer,w,[]); return; end w = w(:); y = 1 ./ (1 + w); deriv = @(dy) deriv_this(dy,y); function [g,hess,linear] = deriv_this(dy,y) linear = false; g = -dy.*(y.^2); hess = @(d) hess_this(d,dy,y); function [h,Jv] = hess_this(d,dy,y) h = 2*dy.*d.*(y.^3); if nargout>1 Jv = -d.*(y.^2); end function test_this() f = one_over_one_plus_w_mv2df([]); test_MV2DF(f,randn(3,1));
github
bsxfan/meta-embeddings-master
sigmoid_mv2df.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/sigmoid_mv2df.m
758
utf_8
e0591c88d68032fcf2a300fe7f2e8df0
function [y,deriv] = sigmoid_mv2df(w) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % y = sigmoid(w) = 1./(1+exp(-w)), vectorized as MATLAB usually does. if nargin==0 test_this(); return; end if isempty(w) y = @(w)sigmoid_mv2df(w); return; end if isa(w,'function_handle') outer = sigmoid_mv2df([]); y = compose_mv(outer,w,[]); return; end w = w(:); y = sigmoid(w); y1 = sigmoid(-w); deriv = @(dy) deriv_this(dy,y,y1); function [g,hess,linear] = deriv_this(dy,y,y1) linear = false; g = dy.*y.*y1; hess = @(d) hess_this(d,dy,y,y1); function [h,Jv] = hess_this(d,dy,y,y1) h = dy.*d.*(y.*y1.^2 - y.^2.*y1); if nargout>1 Jv = d.*y.*y1; end function test_this() f = sigmoid_mv2df([]); test_MV2DF(f,randn(3,1));
github
bsxfan/meta-embeddings-master
neglogsigmoid_fh.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/neglogsigmoid_fh.m
1,075
utf_8
dc180d133fc039197aa99a5e4186c6a7
function f = neglogsigmoid_fh(w) % This is a factory for a function handle to an MV2DF, which represents % the vectorization of the logsigmoid function. The mapping is, in % MATLAB-style code: % % y = log(sigmoid(w)) = log(1./1+exp(-w)) = -log(1+exp(-w)) % % Inputs: % m: the number of inputs to each individual logsumexp calculation. % direction: 1 sums down columns, or 2 sums accross rows. % w: optional, if ssupplied % % Outputs: % f: a function handle to the MV2DF described above. % % see: MV2DF_API_DEFINITION.readme if nargin==0 test_this(); return; end f = vectorized_function([],@(x)F0(x)); if exist('w','var') && ~isempty(w) f = f(w); end end function [y,f1] = F0(x) logp1 = -neglogsigmoid(x); logp2 = -neglogsigmoid(-x); y = -logp1; f1 = @() F1(logp1,logp2); end function [J,f2,linear] = F1(logp1,logp2) linear = false; J = -exp(logp2); f2 = @(dx) F2(dx,logp1,logp2); end function h = F2(dx,logp1,logp2) h = dx.*exp(logp1+logp2); end function test_this() n = 10; f = neglogsigmoid_fh([]); x = randn(n,1); test_MV2DF(f,x); end
github
bsxfan/meta-embeddings-master
logsumsquares_fh.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/logsumsquares_fh.m
1,275
utf_8
c1e543f6680e7257b1f55ff61d967598
function f = logsumsquares_fh(m,direction,w) % This is a factory for a function handle to an MV2DF, which represents % the vectorization of the logsumsquares function. The whole mapping works like % this, in MATLAB-style psuedocode: % % F: R^(m*n) --> R^n, where y = F(x) is computed thus: % % n = length(x)/m % If direction=1, X = reshape(x,m,n), or % if direction=1, X = reshape(x,n,m). % y = log(sum(X.^2,direction)) % % Inputs: % m: the number of inputs to each individual logsumexp calculation. % direction: 1 sums down columns, or 2 sums accross rows. % % % Outputs: % f: a function handle to the MV2DF described above. % % see: MV2DF_API_DEFINITION.readme if nargin==0 test_this(); return; end f = vectorized_function([],@(X)F0(X,direction),m,direction); if exist('w','var') && ~isempty(w) f = f(w); end end function [y,f1] = F0(X,dr) ssq = sum(X.^2,dr); y = log(ssq); f1 = @() F1(X,ssq,dr); end function [J,f2,linear] = F1(X,s,dr) linear = false; J = bsxfun(@times,X,2./s); f2 = @(dX) F2(dX,X,s,dr); end function H = F2(dX,X,s,dr) H = bsxfun(@times,dX,2./s) - bsxfun(@times,X,4*sum(X.*dX,dr)./(s.^2)); end function test_this() m = 4;n = 10; f = logsumsquares_fh(m,1); X = randn(n,m); test_MV2DF(f,X(:)); end
github
bsxfan/meta-embeddings-master
expneg_mv2df.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/expneg_mv2df.m
675
utf_8
f12485f16e7f66d9deb530df461bdcdc
function [y,deriv] = expneg_mv2df(w) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % y = exp(-w), vectorized as MATLAB usually does. if nargin==0 test_this(); return; end if isempty(w) y = @(w)expneg_mv2df(w); return; end if isa(w,'function_handle') outer = expneg_mv2df([]); y = compose_mv(outer,w,[]); return; end w = w(:); y = exp(-w); deriv = @(dy) deriv_this(dy,y); function [g,hess,linear] = deriv_this(dy,y) linear = false; g = -dy.*y; hess = @(d) hess_this(d,dy,y); function [h,Jv] = hess_this(d,dy,y) h = dy.*y.*d; if nargout>1 Jv = -d.*y; end function test_this() f = expneg_mv2df([]); test_MV2DF(f,randn(3,1));
github
bsxfan/meta-embeddings-master
square_mv2df.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/square_mv2df.m
634
utf_8
f7604570a85ea6be67d98ae414127642
function [y,deriv] = square_mv2df(w) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % y = w.^2 if nargin==0 test_this(); return; end if isempty(w) y = @(w)square_mv2df(w); return; end if isa(w,'function_handle') outer = square_mv2df([]); y = compose_mv(outer,w,[]); return; end w = w(:); y = w.^2; deriv = @(dy) deriv_this(dy,w); function [g,hess,linear] = deriv_this(dy,w) linear = false; g = 2*dy.*w; hess = @(d) hess_this(d,dy,w); function [h,Jv] = hess_this(d,dy,w) h = 2*dy.*d; if nargout>1 Jv = 2*w.*d; end function test_this() f = square_mv2df([]); test_MV2DF(f,randn(3,1));
github
bsxfan/meta-embeddings-master
logsigmoid_fh.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/logsigmoid_fh.m
1,068
utf_8
65bf6e2f03af50449d9492d02f7e3c98
function f = logsigmoid_fh(w) % This is a factory for a function handle to an MV2DF, which represents % the vectorization of the logsigmoid function. The mapping is, in % MATLAB-style code: % % y = log(sigmoid(w)) = log(1./1+exp(-w)) = -log(1+exp(-w)) % % Inputs: % m: the number of inputs to each individual logsumexp calculation. % direction: 1 sums down columns, or 2 sums accross rows. % w: optional, if ssupplied % % Outputs: % f: a function handle to the MV2DF described above. % % see: MV2DF_API_DEFINITION.readme if nargin==0 test_this(); return; end f = vectorized_function([],@(x)F0(x)); if exist('w','var') && ~isempty(w) f = f(w); end end function [y,f1] = F0(x) logp1 = -neglogsigmoid(x); logp2 = -neglogsigmoid(-x); y = logp1; f1 = @() F1(logp1,logp2); end function [J,f2,linear] = F1(logp1,logp2) linear = false; J = exp(logp2); f2 = @(dx) F2(dx,logp1,logp2); end function h = F2(dx,logp1,logp2) h = -dx.*exp(logp1+logp2); end function test_this() n = 10; f = logsigmoid_fh([]); x = randn(n,1); test_MV2DF(f,x); end
github
bsxfan/meta-embeddings-master
exp_mv2df.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/exp_mv2df.m
659
utf_8
410b48565ed23cbda996866e44dfb2fa
function [y,deriv] = exp_mv2df(w) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % y = exp(w), vectorized as MATLAB usually does. if nargin==0 test_this(); return; end if isempty(w) y = @(w)exp_mv2df(w); return; end if isa(w,'function_handle') outer = exp_mv2df([]); y = compose_mv(outer,w,[]); return; end w = w(:); y = exp(w); deriv = @(dy) deriv_this(dy,y); function [g,hess,linear] = deriv_this(dy,y) linear = false; g = dy.*y; hess = @(d) hess_this(d,dy,y); function [h,Jv] = hess_this(d,dy,y) h = dy.*y.*d; if nargout>1 Jv = d.*y; end function test_this() f = exp_mv2df([]); test_MV2DF(f,randn(3,1));
github
bsxfan/meta-embeddings-master
vectorized_function.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/vector/templates/vectorized_function.m
4,600
utf_8
9c5431b821aa6587c3849945d31dd1fd
function [y,deriv] = vectorized_function(w,f,m,direction) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % This template vectorizes the given function F: R^m -> R as follows: % k = length(w)/m; % If direction=1, X = reshape(w,m,k), y(j) = F(X(:,j)), or % if direction=2, X = reshape(w,k,m), y(i) = F(X(i,:)), % so that length(y) = k. % % Input parameters: % w: As with every MV2DF, w can be [], a vector, or a function handle to % another MV2DF. % f: is a function handle to an m-file that represents the function % F: R^m -> R, as well as its first and second derivatives. % % m: The input dimension to F. % (optional, default m = 1) % % direction: is used as explained above to determine whether columns, % or rows of X are processed by F. % (optional, default direction = 2) % % Function f works as follows: % (Note that f, f1 and f2 have to know the required direction, it is % not passed to them.) % [y,f1] = f(X), where X and y are as defined above. % % Function f1 works as follows: % [J,f2] = f1(), where size(J) = size(X). % Column/row i of J is the gradient of y(i) w.r.t. % column/row i of W. % f2 is a function handle to 2nd order derivatives. % If 2nd order derivatives are 0, then f2 should be []. % % Function f2 works as follows: % H = f2(dX), where size(dX) = size(X). % If direction=1, H(:,j) = H_i * dX(:,j), or % if direction=2, H(i,:) = dX(i,:)* H_i, where % H_i is Hessian of y(i), w.r.t. colum/row i of X. % % if nargin==0 test_this(); return; end if ~exist('m','var') m = 1; direction = 2; end if isempty(w) y = @(w)vectorized_function(w,f,m,direction); return; end if isa(w,'function_handle') outer = vectorized_function([],f,m,direction); y = compose_mv(outer,w,[]); return; end if direction==1 W = reshape(w,m,[]); elseif direction==2 W = reshape(w,[],m); else error('illegal direction %i',direction); end if nargout==1 y = f(W); else [y,f1] = f(W); deriv = @(dy) deriv_this(dy,f1,direction); end y = y(:); end function [g,hess,linear] = deriv_this(dy,f1,direction) if direction==1 dy = dy(:).'; else dy = dy(:); end if nargout==1 J = f1(); g = reshape(bsxfun(@times,J,dy),[],1); else [J,f2] = f1(); linear = isempty(f2); g = reshape(bsxfun(@times,J,dy),[],1); hess = @(d) hess_this(d,f2,J,dy,direction); end end function [h,Jv] = hess_this(dx,f2,J,dy,direction) dX = reshape(dx,size(J)); if isempty(f2) h = []; else h = reshape(bsxfun(@times,dy,f2(dX)),[],1); end if nargout>1 Jv = sum(dX.*J,direction); Jv = Jv(:); end end %%%%%%%%%%%%%%%%%%%% Example function: z = x^2 + y^3 %%%%%%%%%%%%%%%%%%%% % example function: z = x^2 + y^3 function [z,f1] = x2y3(X,direction) if direction==1 x = X(1,:); y = X(2,:); else x = X(:,1); y = X(:,2); end z = x.^2+y.^3; f1 = @() f1_x2y3(x,y,direction); end % example function 1st derivative: z = x^2 + y^2 function [J,f2] = f1_x2y3(x,y,direction) if direction==1 J = [2*x;3*y.^2]; else J = [2*x,3*y.^2]; end f2 = @(dxy) f2_x2y3(dxy,y,direction); end % example function 2nd derivative: z = x^2 + y^2 function H = f2_x2y3(dxy,y,direction) if direction==1 H = dxy.*[2*ones(size(y));6*y]; else H = dxy.*[2*ones(size(y)),6*y]; end end %%%%%%%%%%%%%%%%%%%% Example function: z = x*y^2 %%%%%%%%%%%%%%%%%%%% % example function: z = x*y^2 function [z,f1] = xy2(X,direction) if direction==1 x = X(1,:); y = X(2,:); else x = X(:,1); y = X(:,2); end y2 = y.^2; z = x.*+y2; f1 = @() f1_xy2(x,y,y2,direction); end % example function 1st derivative: z = x*y^2 function [J,f2] = f1_xy2(x,y,y2,direction) if direction==1 J = [y2;2*x.*y]; else J = [y2,2*x.*y]; end f2 = @(dxy) f2_xy2(dxy,x,y,direction); end % example function 2nd derivative: z = x*y^2 function H = f2_xy2(dxy,x,y,direction) if direction==1 dx = dxy(1,:); dy = dxy(2,:); H = [2*y.*dy;2*y.*dx+2*x.*dy]; else dx = dxy(:,1); dy = dxy(:,2); H = [2*y.*dy,2*y.*dx+2*x.*dy]; end end function test_this() k = 5; m = 2; dr = 1; fprintf('Testing x^2+y^2 in direction %i:\n\n',dr); f = vectorized_function([],@(X)x2y3(X,dr),2,dr); test_MV2DF(f,randn(k*m,1)); dr = 2; fprintf('\n\n\n\nTesting x*y^2 in direction %i:\n\n',dr); f = vectorized_function([],@(X)xy2(X,dr),2,dr); test_MV2DF(f,randn(k*m,1)); end
github
bsxfan/meta-embeddings-master
logdet_chol.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/logdet_chol.m
1,185
utf_8
706e5c1e5b5b660da50408bd221522a0
function [y,deriv] = logdet_chol(w) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % y = log(det(W)), where W is positive definite and W = reshape(w,...) if nargin==0 test_this(); return; end if isempty(w) y = @(w)logdet_chol(w); return; end if isa(w,'function_handle') outer = logdet_chol([]); y = compose_mv(outer,w,[]); return; end dim = sqrt(length(w)); W = reshape(w,dim,dim); if nargout>1 %[inv_map,bi_inv_map,logdet,iW] = invchol2(W); [inv_map,bi_inv_map,logdet,iW] = invchol_or_lu(W); y = logdet; deriv = @(dy) deriv_this(dy,bi_inv_map,iW); else %[inv_map,bi_inv_map,logdet] = invchol2(W); [inv_map,bi_inv_map,logdet] = invchol_or_lu(W); y = logdet; end function [g,hess,linear] = deriv_this(dy,bi_inv_map,iW) G = iW.'; grad = G(:); g = dy*grad; linear = false; hess = @(d) hess_this(grad,bi_inv_map,dy,d); function [h,Jd] = hess_this(grad,bi_inv_map,dy,d) dim = sqrt(length(d)); D = reshape(d,dim,dim); H = - dy*bi_inv_map(D).'; h = H(:); if nargout>1 Jd = grad.'*d(:); end function test_this() m = 3; n = 10; w = []; A = UtU(w,n,m); f = logdet_chol(A); w = randn(m*n,1); test_MV2DF(f,w,true);
github
bsxfan/meta-embeddings-master
sumsquares_penalty.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/sumsquares_penalty.m
916
utf_8
8f40fb9f94c7424808c89e165ec9960c
function [y,deriv] = sumsquares_penalty(w,lambda) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % See code for details. if nargin==0 test_this(); return; end if isempty(w) y = @(w)sumsquares_penalty(w,lambda); return; end if isa(w,'function_handle') outer = sumsquares_penalty([],lambda); y = compose_mv(outer,w,[]); return; end w = w(:); if isscalar(lambda) lambda = lambda*ones(size(w)); else lambda = lambda(:); end y = 0.5*w.'*(lambda.*w); deriv = @(dy) deriv_this(dy,lambda,lambda.*w); function [g,hess,linear] = deriv_this(dy,lambda,lambda_w) linear = false; g = dy*lambda_w; hess = @(d) hess_this(d,dy,lambda,lambda_w); function [h,Jv] = hess_this(d,dy,lambda,lambda_w) h = dy*lambda.*d; if nargout>1 Jv = d(:).'*lambda_w; end function test_this() lambda = randn(10,1); f = sumsquares_penalty([],lambda); test_MV2DF(f,randn(size(lambda)));
github
bsxfan/meta-embeddings-master
wmlr_obj.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/wmlr_obj.m
2,299
utf_8
f450d5fdd89f4854a123b7d7947d32c3
function [y,deriv] = wmlr_obj(w,X,T,weights,logprior); % This is a SCAL2DF. See SCAL2DF_API_DEFINITION.readme. % Weighted multiclass linear logistic regression objective function. % w is vectorized D-by-K parameter matrix W (to be optimized) % X is D-by-N data matrix, for N trials % T is K-by-N, 0/1 class label matrix, with exactly one 1 per column. % weights is N-vector of objective function weights, one per trial. % logprior is logarithm of prior, % % The K-by-N log-likelihood matrix is % bsxfun(@plus,W'*X,logprior(:)); if nargin==0 test_this(); return; end if isempty(w) y = @(w)wmlr_obj(w,X,T,weights,logprior); return; end if isa(w,'function_handle') outer = wmlr_obj([],X,T,weights,logprior); y = compose_mv(outer,w,[]); return; end w = w(:); [K,N] = size(T); [dim,N2] = size(X); if N ~=N2 error('sizes of X and T incompatible'); end W = reshape(w,dim,K); % dim*K % make W double so that it works if X is sparse scores = double(W.')*X; % K*N scores = bsxfun(@plus,scores,logprior(:)); lsm = logsoftmax(scores); % K*N y = -sum(lsm.*T)*weights(:); deriv = @(dy) deriv_this(dy,lsm,X,T,weights); function [g,hess,linear] = deriv_this(dy,lsm,X,T,weights) sigma = exp(lsm); %posterior % K*N g0 = gradient(sigma,X,T,weights); g = g0*dy; hess = @(d) hess_this(d,dy,g0,sigma,X,weights); linear = false; function g = gradient(sigma,X,T,weights) E = sigma-T; %K*N G = X*double(bsxfun(@times,weights(:),E.')); %dim*K g = G(:); function [h,Jv] = hess_this(d,dy,g,sigma,X,weights) K = size(sigma,1); dim = length(d)/K; D = reshape(d,dim,K); P = double(D.')*X; % K*N sigmaP = sigma.*P; ssP = sum(sigmaP,1); % 1*N sssP = bsxfun(@times,sigma,ssP); %K*N h = X*double(bsxfun(@times,weights(:),(sigmaP-sssP).')); % dim*K h = dy*h(:); if nargout>1 Jv = d(:).'*g; end if nargin==0 test_this(); return; end function test_this() K = 3; N = 100; dim = 2; randn('state',0); means = randn(dim,K)*10; %signal X0 = randn(dim,K*N); % noise classf = zeros(1,K*N); ii = 1:N; T = zeros(K,N*K); for k=1:K X0(:,ii) = bsxfun(@plus,means(:,k),X0(:,ii)); classf(ii) = k; T(k,ii) = 1; ii = ii+N; end N = K*N; X = [X0;ones(1,N)]; weights = rand(1,N); obj = wmlr_obj([],X,T,weights,2); test_MV2DF(obj,randn((dim+1)*K,1));
github
bsxfan/meta-embeddings-master
boost_obj.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/boost_obj.m
1,556
utf_8
eaa722fa0cd7b1b492401c4e6adf807b
function [y,deriv] = boost_obj(w,T,weights,logit_prior) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Weighted binary classifier cross-entropy objective, based on 'boosting' % proper scoring rule. This rule places more emphasis on extreme scores, % than the logariothmic scoring rule. % % Differentiable inputs: % w: is vector of N detection scores (in log-likelihood-ratio format) % % Fixed parameters: % T: is vector of N labels: 1 for target and -1 for non-target. % weights: is N-vector of objective function weights, one per trial. % logit_prior: is logit(prior), this controls the region of interest if nargin==0 test_this(); return; end if isempty(w) y = @(w)boost_obj(w,T,weights,logit_prior); return; end if isa(w,'function_handle') outer = boost_obj([],T,weights,logit_prior); y = compose_mv(outer,w,[]); return; end w = w(:); scores = w.'; arg = bsxfun(@plus,scores,logit_prior).*T; wobj = exp(-arg/2).*weights; % 1*N y = sum(wobj); if nargout>1 deriv = @(dy) deriv_this(dy,wobj(:),T); end function [g,hess,linear] = deriv_this(dy,wobj,T) g0 = -0.5*wobj.*T(:); g = dy*g0; linear = false; hess = @(d) hessianprod(d,dy,g0,wobj); function [h,Jv] = hessianprod(d,dy,g0,wobj) h = dy*(0.25*wobj(:).*d(:)); if nargout>1 Jv = d.'*g0; end function test_this() N = 30; T = [ones(1,N/3),-ones(1,N/3),zeros(1,N/3)]; scores = randn(1,N); weights = [rand(1,2*N/3),zeros(1,N/3)]; f = @(w) brier_obj(w,T,weights,-2.23); f = @(w) boost_obj(w,T,weights,-2.23); test_MV2DF(f,scores(:));
github
bsxfan/meta-embeddings-master
neg_gaussll_taylor.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/neg_gaussll_taylor.m
1,332
utf_8
4efafbe09f47ca5947e223ca80f063c6
function [y,deriv] = neg_gaussll_taylor(w,x) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % This function represents the part of log N(x|0,W) that is dependent on % W = reshape(w,...), where w is variable and x is given. % % y = -0.5*x'*inv(W)*x - 0.5*log(det(W)), where W is positive definite and W = reshape(w,...) if nargin==0 test_this(); return; end if isempty(w) y = @(w)neg_gaussll_taylor(w,x); return; end if isa(w,'function_handle') outer = neg_gaussll_taylor([],x); y = compose_mv(outer,w,[]); return; end dim = length(x); W = reshape(w,dim,dim); [inv_map,logdet] = invchol_taylor(W); z = inv_map(x); y = 0.5*x'*z + 0.5*logdet; deriv = @(dy) deriv_this(dy,z,inv_map); end function [g,hess,linear] = deriv_this(dy,z,inv_map) G1 = z*z.'; G2 = inv_map(eye(length(z))); grad = 0.5*(G2(:)-G1(:)); g = dy*grad; linear = false; hess = @(d) hess_this(grad,z,inv_map,dy,d); end function [h,Jd] = hess_this(grad,z,inv_map,dy,d) dim = sqrt(length(d)); D = reshape(d,dim,dim); H1 = inv_map(D*z)*z' + z*inv_map(D'*z)'; H2 = inv_map(inv_map(D)'); h = 0.5*dy*(H1(:)-H2(:)); if nargout>1 Jd = grad.'*d(:); end end function test_this() m = 3; n = 10; w = []; A = UtU(w,n,m); %A is m-by-m x = randn(m,1); f = neg_gaussll_taylor(A,x); w = randn(m*n,1); test_MV2DF(f,w,true); end
github
bsxfan/meta-embeddings-master
brier_obj.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/brier_obj.m
1,722
utf_8
f68fae1776aa1a970b6f329e4c0d1027
function [y,deriv] = brier_obj(w,T,weights,logit_prior) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Weighted binary classifier cross-entropy objective, based on 'Brier' % quadratic proper scoring rule. This rule places less emphasis on extreme scores, % than the logariothmic scoring rule. % % Differentiable inputs: % w: is vector of N detection scores (in log-likelihood-ratio format) % % Fixed parameters: % T: is vector of N labels: 1 for target and -1 for non-target. % weights: is N-vector of objective function weights, one per trial. % logit_prior: is logit(prior), this controls the region of interest if nargin==0 test_this(); return; end if isempty(w) y = @(w)brier_obj(w,T,weights,logit_prior); return; end if isa(w,'function_handle') outer = brier_obj([],T,weights,logit_prior); y = compose_mv(outer,w,[]); return; end w = w(:); scores = w.'; arg = bsxfun(@plus,scores,logit_prior).*T; logp2 = -neglogsigmoid(-arg); wobj = 0.5*exp(2*logp2).*weights; % 1*N y = sum(wobj); if nargout>1 logp1 = -neglogsigmoid(arg); deriv = @(dy) deriv_this(dy,weights(:),T(:),logp1(:),logp2(:)); end function [g,hess,linear] = deriv_this(dy,weights,T,logp1,logp2) g0 = -exp(logp1+2*logp2).*weights.*T; g = dy*g0; linear = false; hess = @(d) hessianprod(d,dy,g0,weights,logp1,logp2); function [h,Jv] = hessianprod(d,dy,g0,weights,logp1,logp2) ddx = -exp(logp1+2*logp2); h = dy*(ddx.*(1-3*exp(logp1))).*weights.*d(:); if nargout>1 Jv = d.'*g0; end function test_this() N = 30; T = [ones(1,N/3),-ones(1,N/3),zeros(1,N/3)]; scores = randn(1,N); weights = [rand(1,2*N/3),zeros(1,N/3)]; f = @(w) brier_obj(w,T,weights,-2.23); test_MV2DF(f,scores(:));
github
bsxfan/meta-embeddings-master
gauss_ll.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/gauss_ll.m
1,461
utf_8
76707fbe20f1dae43305e2542e9644ce
function [y,deriv] = gauss_ll(w,x) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % This function represents the part of log N(x|0,W) that is dependent on % W = reshape(w,...), where w is variable and x is given. % % y = -0.5*x'*inv(W)*x - 0.5*log(det(W)), where W is positive definite and W = reshape(w,...) if nargin==0 test_this(); return; end if isempty(w) y = @(w)gauss_ll(w,x); return; end if isa(w,'function_handle') outer = gauss_ll([],x); y = compose_mv(outer,w,[]); return; end dim = length(x); W = reshape(w,dim,dim); if nargout>1 [inv_map,bi_inv_map,logdet,iW] = invchol_or_lu(W); z = inv_map(x); y = -0.5*x'*z - 0.5*logdet; deriv = @(dy) deriv_this(dy,z,inv_map,bi_inv_map,iW); else [inv_map,bi_inv_map,logdet] = invchol_or_lu(W); z = inv_map(x); y = -0.5*x'*z - 0.5*logdet; end function [g,hess,linear] = deriv_this(dy,z,inv_map,bi_inv_map,iW) G1 = z*z.'; G2 = iW.'; grad = 0.5*(G1(:)-G2(:)); g = dy*grad; linear = false; hess = @(d) hess_this(grad,z,inv_map,bi_inv_map,dy,d); function [h,Jd] = hess_this(grad,z,inv_map,bi_inv_map,dy,d) dim = sqrt(length(d)); D = reshape(d,dim,dim); H1 = inv_map(D*z)*z.' + z*inv_map(D.'*z).'; H2 = bi_inv_map(D).'; h = -0.5*dy*(H1(:)-H2(:)); if nargout>1 Jd = grad.'*d(:); end function test_this() m = 3; n = 10; w = []; A = UtU(w,n,m); %A is m-by-m x = randn(m,1); f = gauss_ll(A,x); w = randn(m*n,1); test_MV2DF(f,w,true);
github
bsxfan/meta-embeddings-master
cllr_obj.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/cllr_obj.m
1,611
utf_8
374952d66aa4641a000a48cc12baebad
function [y,deriv] = cllr_obj(w,T,weights,logit_prior) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Weighted binary classifier cross-entropy objective, based on logarithmic % cost function. % % Differentiable inputs: % w: is vector of N detection scores (in log-likelihood-ratio format) % % Fixed parameters: % T: is vector of N labels: 1 for target and -1 for non-target. % weights: is N-vector of objective function weights, one per trial. % logit_prior: is logit(prior), this controls the region of interest if nargin==0 test_this(); return; end if isempty(w) y = @(w)cllr_obj(w,T,weights,logit_prior); return; end if isa(w,'function_handle') outer = cllr_obj([],T,weights,logit_prior); y = compose_mv(outer,w,[]); return; end w = w(:); scores = w.'; arg = bsxfun(@plus,scores,logit_prior).*T; neglogp1 = neglogsigmoid(arg); % 1*N p1 = p(tar) y = neglogp1*weights(:); if nargout>1 neglogp2 = neglogsigmoid(-arg); % 1*N p2 = 1-p1 = p(non) deriv = @(dy) deriv_this(dy,-neglogp1(:),-neglogp2(:),T(:),weights(:)); end function [g,hess,linear] = deriv_this(dy,logp1,logp2,T,weights) g0 = -exp(logp2).*weights.*T; g = dy*g0; linear = false; hess = @(d) hessianprod(d,dy,g0,logp1,logp2,weights); function [h,Jv] = hessianprod(d,dy,g0,logp1,logp2,weights) h = dy*(exp(logp1+logp2).*weights(:).*d(:)); if nargout>1 Jv = d.'*g0; end function test_this() N = 30; T = [ones(1,N/3),-ones(1,N/3),zeros(1,N/3)]; W = randn(1,N); weights = [rand(1,2*N/3),zeros(1,N/3)]; f = @(w) cllr_obj(w,T,weights,-2.23); test_MV2DF(f,W(:));
github
bsxfan/meta-embeddings-master
mce_obj.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/mce_obj.m
1,711
utf_8
93cfa59b8a57d279ebbdb02376bd696c
function [y,deriv] = mce_obj(w,T,weights,logprior) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Weighted multiclass cross-entropy objective. % w is vectorized K-by-N score matrix W (to be optimized) % T is K-by-N, 0/1 class label matrix, with exactly one 1 per column. % weights is N-vector of objective function weights, one per trial. % logprior is logarithm of prior, if nargin==0 test_this(); return; end if isempty(w) y = @(w)mce_obj(w,T,weights,logprior); return; end if isa(w,'function_handle') outer = mce_obj([],T,weights,logprior); y = compose_mv(outer,w,[]); return; end w = w(:); [K,N] = size(T); scores = reshape(w,K,N); scores = bsxfun(@plus,scores,logprior(:)); lsm = logsoftmax(scores); % K*N y = -sum(lsm.*T)*weights(:); deriv = @(dy) deriv_this(dy,lsm,T,weights); function [g,hess,linear] = deriv_this(dy,lsm,T,weights) sigma = exp(lsm); %posterior % K*N g0 = gradient(sigma,T,weights); g = dy*g0; linear = false; hess = @(d) hessianprod(d,dy,g0,sigma,weights); function g = gradient(sigma,T,weights) E = sigma-T; %K*N G = bsxfun(@times,E,weights(:).'); %dim*K g = G(:); function [h,Jv] = hessianprod(d,dy,g0,sigma,weights) K = size(sigma,1); dim = length(d)/K; P = reshape(d,K,dim); sigmaP = sigma.*P; ssP = sum(sigmaP,1); % 1*N sssP = bsxfun(@times,sigma,ssP); %K*N h = bsxfun(@times,(sigmaP-sssP),weights(:).'); % dim*K h = dy*h(:); if nargout>1 Jv = d.'*g0; end function test_this() K = 3; N = 30; %T = [repmat([1;0;0],1,10),repmat([0;1;0],1,10),repmat([0;0;1],1,10)]; T = rand(K,N); T = bsxfun(@times,T,1./sum(T,1)); W = randn(K,N); weights = rand(1,N); f = @(w) mce_obj(w,T,weights,-1); test_MV2DF(f,W(:));
github
bsxfan/meta-embeddings-master
sum_ai_f_of_w_i.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/scalar/templates/sum_ai_f_of_w_i.m
1,367
utf_8
af9137c86c4b6c7456dbd1688c9ba0bb
function [y,deriv] = sum_ai_f_of_w_i(w,a,f,b) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Does y = sum_i a_i f(w_i) + b, where f is non-linear. % %Notes: % % f is a function handle, with behaviour as demonstrated in the test code % of this function. % % b is optional, defaults to 0 if omitted if nargin==0 test_this(); return; end if ~exist('b','var') b = 0; end if isempty(w) y = @(w)sum_ai_f_of_w_i(w,a,f,b); return; end if isa(w,'function_handle') outer = sum_ai_f_of_w_i([],a,f,b); y = compose_mv(outer,w,[]); return; end ntot = length(a); nz = find(a~=0); a = a(nz); if nargin==1 y = f(w(nz)); else [y,dfdw,f2] = f(w(nz)); deriv = @(Dy) deriv_this(Dy,dfdw.*a,f2,a,nz,ntot); end y = y(:); y = a.'*y + b; function [g,hess,linear] = deriv_this(Dy,g0,f2,a,nz,ntot) g = zeros(ntot,1); g(nz) = Dy*g0(:); hess = @(d) hess_this(d,g0,f2,Dy,a,nz,ntot); linear = false; function [h,Jd] = hess_this(d,g0,f2,Dy,a,nz,ntot) d = d(nz); hnz = f2(); hnz = hnz(:).*d(:); h = zeros(ntot,1); h(nz) = Dy*(hnz.*a); if nargout>1 Jd = g0.'*d(:); end function [y,ddx,f2] = test_f(x) y = log(x); if nargout>1 ddx = 1./x; f2 = @() -1./(x.^2); end function test_this() n = 10; a = randn(n,1); a = bsxfun(@max,a,0); b = 5; f = sum_ai_f_of_w_i([],a,@(x)test_f(x),b); w = 1+rand(n,1); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
KtimesW.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/KtimesW.m
726
utf_8
d53d40345ce7668d43a1efa9eb621335
function [y,deriv] = KtimesW(w,K) % This is an MV2DF . See MV2DF_API_DEFINITION.readme. % % % if nargin==0 test_this(); return; end if isempty(w) map = @(w) map_this(w,K); transmap = @(y) transmap_this(y,K); y = linTrans(w,map,transmap); return; end if isa(w,'function_handle') f = KtimesW([],K); y = compose_mv(f,w,[]); return; end f = KtimesW([],K); if nargout==1 y = f(w); else [y,deriv] = f(w); end function y = map_this(w,K) [m,n] = size(K); y = K*reshape(w,n,[]); y = y(:); function w = transmap_this(y,K) [m,n] = size(K); w = K.'*reshape(y,m,[]); function test_this() m = 3; n = 4; K = randn(m,n); r = 2; W = randn(n,r); f = KtimesW([],K); test_MV2DF(f,W(:));
github
bsxfan/meta-embeddings-master
scaleRows.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/scaleRows.m
798
utf_8
c848225d200f35d733b8bb76c2495127
function [y,deriv] = scaleRows(w,scales) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % w --> bsxfun(@times,reshape(w,m,[]),scales(:)) % % where m = length(scales); % % Note: this is a symmetric linear transform. if nargin==0 test_this(); return; end if isempty(w) map = @(w)map_this(w,scales); y = linTrans(w,map,map); return; end if isa(w,'function_handle') f = scaleRows([],scales); y = compose_mv(f,w,[]); return; end f = scaleRows([],scales); if nargout==1 y = f(w); else [y,deriv] = f(w); end function w = map_this(w,scales) n = length(scales); w = reshape(w,[],n); w = bsxfun(@times,w,scales(:)'); function test_this() K = 5; N = 10; M = randn(K,N); scales = randn(1,N); f = scaleRows([],scales); test_MV2DF(f,M(:));
github
bsxfan/meta-embeddings-master
sumcolumns_fh.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/sumcolumns_fh.m
602
utf_8
7a2cd01c3b7076cda20fa6a96cae0069
function fh = sumcolumns_fh(m,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. % % w -> W = reshape(w,m,[]) -> sum(W,1)' if nargin==0 test_this(); return; end map = @(w) map_this(w,m); transmap = @(y) transmap_this(y,m); fh = linTrans([],map,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function w = transmap_this(y,m) w = repmat(y(:).',m,1); end function s = map_this(w,m) W = reshape(w,m,[]); s = sum(W,1); end function test_this() m = 3; n = 4; f = sumcolumns_fh(m); W = randn(m,n); test_MV2DF(f,W(:)); end
github
bsxfan/meta-embeddings-master
columnJofN_fh.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/columnJofN_fh.m
634
utf_8
23448c0cc436ac53b95d5e4ec48c7b35
function fh = columnJofN_fh(j,n,w) % This is almost an MV2DF, but it does not return derivatives on numeric % input, w. % % w -> W = reshape(w,[],n) -> W(:,j) if nargin==0 test_this(); return; end map = @(w) map_this(w,j,n); transmap = @(y) transmap_this(y,j,n); fh = linTrans([],map,transmap); if exist('w','var') && ~isempty(w) fh = fh(w); end end function w = transmap_this(y,j,n) W = zeros(length(y),n); W(:,j) = y; w = W(:); end function col = map_this(w,j,n) W = reshape(w,[],n); col = W(:,j); end function test_this() m = 3; n = 4; f = columnJofN_fh(2,4); W = randn(m,n); test_MV2DF(f,W(:)); end
github
bsxfan/meta-embeddings-master
scaleColumns.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/scaleColumns.m
811
utf_8
cacb0b80cb3f3871595674e741382d26
function [y,deriv] = scaleColumns(w,scales) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % w --> bsxfun(@times,reshape(w,[],n),scales(:)') % % where n = length(scales); % % Note: this is a symmetric linear transform. if nargin==0 test_this(); return; end if isempty(w) map = @(w)map_this(w,scales); y = linTrans(w,map,map); return; end if isa(w,'function_handle') f = scaleColumns([],scales); y = compose_mv(f,w,[]); return; end f = scaleColumns([],scales); if nargout==1 y = f(w); else [y,deriv] = f(w); end function w = map_this(w,scales) n = length(scales); w = reshape(w,[],n); w = bsxfun(@times,w,scales(:)'); function test_this() K = 5; N = 10; M = randn(K,N); scales = randn(1,N); f = scaleColumns([],scales); test_MV2DF(f,M(:));
github
bsxfan/meta-embeddings-master
subvec.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/subvec.m
733
utf_8
ed189df10ecad63eca1130710c559631
function [y,deriv] = subvec(w,size,first,length) % This is an MV2DF . See MV2DF_API_DEFINITION.readme. % % w --> w(first:first+length-1) % if nargin==0 test_this(); return; end last = first+length-1; if isempty(w) map = @(w) w(first:last); transmap = @(w) transmap_this(w,size,first,last); y = linTrans(w,map,transmap); return; end if isa(w,'function_handle') f = subvec([],size,first,length); y = compose_mv(f,w,[]); return; end f = subvec([],size,first,length); if nargout==1 y = f(w); else [y,deriv] = f(w); end function g = transmap_this(w,size,first,last) g = zeros(size,1); g(first:last) = w; function test_this() f = subvec([],10,2,4); test_MV2DF(f,randn(10,1));
github
bsxfan/meta-embeddings-master
identity_trans.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/identity_trans.m
495
utf_8
aec19df7ff1e1fa5079b22973d9122fc
function [y,deriv] = identity_trans(w) % This is an MV2DF . See MV2DF_API_DEFINITION.readme. % % w --> w % if nargin==0 test_this(); return; end if isempty(w) map = @(w) w; y = linTrans(w,map,map); return; end if isa(w,'function_handle') f = identity_trans([]); y = compose_mv(f,w,[]); return; end f = identity_trans([]); if nargout==1 y = f(w); else [y,deriv] = f(w); end function test_this() f = identity_trans([]); test_MV2DF(f,randn(5,1));
github
bsxfan/meta-embeddings-master
WtimesK.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/WtimesK.m
726
utf_8
20d6a4715d3fb8e2c51fc17f1a45e865
function [y,deriv] = WtimesK(w,K) % This is an MV2DF . See MV2DF_API_DEFINITION.readme. % % % if nargin==0 test_this(); return; end if isempty(w) map = @(w) map_this(w,K); transmap = @(y) transmap_this(y,K); y = linTrans(w,map,transmap); return; end if isa(w,'function_handle') f = WtimesK([],K); y = compose_mv(f,w,[]); return; end f = WtimesK([],K); if nargout==1 y = f(w); else [y,deriv] = f(w); end function y = map_this(w,K) [m,n] = size(K); y = reshape(w,[],m)*K; y = y(:); function w = transmap_this(y,K) [m,n] = size(K); w = reshape(y,[],n)*K.'; function test_this() m = 3; n = 4; K = randn(m,n); r = 2; W = randn(r,m); f = WtimesK([],K); test_MV2DF(f,W(:));
github
bsxfan/meta-embeddings-master
transpose_mv2df.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/transpose_mv2df.m
700
utf_8
58016f72134e4ccf6256f2ea1f952a43
function [y,deriv] = transpose_mv2df(w,M,N) % This is an MV2DF . See MV2DF_API_DEFINITION.readme. % % vec(A) --> vec(A'), % % where A is M by N % % Note: this is an orthogonal linear transform. if nargin==0 test_this(); return; end if isempty(w) map = @(w) reshape(reshape(w,M,N).',[],1); transmap = @(w) reshape(reshape(w,N,M).',[],1); y = linTrans(w,map,transmap); return; end if isa(w,'function_handle') f = transpose_mv2df([],M,N); y = compose_mv(f,w,[]); return; end f = transpose_mv2df([],M,N); if nargout==1 y = f(w); else [y,deriv] = f(w); end function test_this() M = 4; N = 5; f = transpose_mv2df([],M,N); test_MV2DF(f,randn(M*N,1));
github
bsxfan/meta-embeddings-master
fusion_mv2df.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/fusion_mv2df.m
1,182
utf_8
df0b186cde5dcc42aea6490f13d6d479
function [y,deriv] = fusion_mv2df(w,scores) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % The function is a 'score fusion' computed thus: % y.' = w(1:end-1).'*scores + w(end) % % Here w is the vector of fusion weights, one weight per system and % an offset. % % Parameters: % scores: is an M-by-T matrix of scores from M systems, for each of T % trials. % % Note (even though the fusion is affine from input scores to output % scores) this MV2DF is a linear transform from w to y. if nargin==0 test_this(); return; end if isempty(w) map = @(w) map_this(w,scores); transmap = @(w) transmap_this(w,scores); y = linTrans(w,map,transmap); return; end if isa(w,'function_handle') f = fusion_mv2df([],scores); y = compose_mv(f,w,[]); return; end f = fusion_mv2df([],scores); if nargout==1 y = f(w); else [y,deriv] = f(w); end function y = map_this(w,scores) y = w(1:end-1).'*scores + w(end); y = y(:); function y = transmap_this(x,scores) y = [scores*x;sum(x)]; function test_this() K = 5; N = 10; w = randn(N+1,1); scores = randn(N,K); f = fusion_mv2df([],scores); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
addSigmaI.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/addSigmaI.m
771
utf_8
78b1a5b40a699c613a11ce64085abe6e
function [y,deriv] = addSigmaI(w) % This is an MV2DF . See MV2DF_API_DEFINITION.readme. % % % if nargin==0 test_this(); return; end if isempty(w) map = @(w) map_this(w); transmap = @(w) transmap_this(w); y = linTrans(w,map,transmap); return; end if isa(w,'function_handle') f = addSigmaI([]); y = compose_mv(f,w,[]); return; end f = addSigmaI([]); if nargout==1 y = f(w); else [y,deriv] = f(w); end function y = map_this(w) w = w(:); y = w(1:end-1); sigma = w(end); dim = sqrt(length(y)); ii = 1:dim+1:dim*dim; y(ii) = w(ii)+sigma; function w = transmap_this(y) dim = sqrt(length(y)); ii = 1:dim+1:dim*dim; w = [y;sum(y(ii))]; function test_this() dim = 5; f = addSigmaI([]); test_MV2DF(f,randn(dim*dim+1,1));
github
bsxfan/meta-embeddings-master
addOffset.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/addOffset.m
1,057
utf_8
38390e8a3f92c5a6b760571e3ba340e3
function [y,deriv] = addOffset(w,K,N) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % w = [vec(A);b] --> vec(bsxfun(@plus,A,b)) % % This function retrieves a K by N matrix as well as a K-vector from w, % adds the K-vector to every column of the matrix % and outputs the vectorized result. % Note this is a linear transform. if nargin==0 test_this(); return; end if isempty(w) map = @(w) map_this(w,K,N); transmap = @(w) transmap_this(w,K,N); y = linTrans(w,map,transmap); return; end if isa(w,'function_handle') f = addOffset([],K,N); y = compose_mv(f,w,[]); return; end f = addOffset([],K,N); if nargout==1 y = f(w); else [y,deriv] = f(w); end function y = map_this(w,K,N) y = w(1:K*N); y = reshape(y,K,N); offs = w((K*N+1):end); y = bsxfun(@plus,y,offs(:)); y = y(:); function y = transmap_this(x,K,N) M = reshape(x,K,N); y = [x(1:K*N);sum(M,2)]; function test_this() K = 5; N = 10; M = randn(K,N); offs = randn(K,1); w = [M(:);offs]; f = addOffset([],K,N); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
const_mv2df.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/templates/const_mv2df.m
856
utf_8
541e86c2041370727a8705935c4d575e
function [y,deriv] = const_mv2df(w,const) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % y = const(:); % % This wraps the given constant into an MV2DF. The output, y, is % independent of input w. The derivatives are sparse zero vectors of the % appropriate size. if nargin==0 test_this(); return; end if isempty(w) y = @(w)const_mv2df(w,const); return; end if isa(w,'function_handle') outer = const_mv2df([],const); y = compose_mv(outer,w,[]); return; end w = w(:); y = const(:); deriv = @(g2) deriv_this(length(w),length(y)); function [g,hess,linear] = deriv_this(wsz,ysz) g = sparse(wsz,1); linear = true; hess = @(d) hess_this(ysz); function [h,Jd] = hess_this(ysz) h = []; if nargout>1 Jd = sparse(ysz,1); end function test_this() A = randn(4,5); f = const_mv2df([],A); test_MV2DF(f,randn(5,1));
github
bsxfan/meta-embeddings-master
linTrans.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/templates/linTrans.m
1,012
utf_8
5c26cd329441fa971c05127c464dfae5
function [y,deriv] = linTrans(w,map,transmap) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Applies linear transform y = map(w). It needs the transpose of map, % transmap for computing the gradient. map and transmap are function % handles. if nargin==0 test_this(); return; end if isempty(w) y = @(w)linTrans(w,map,transmap); return; end if isa(w,'function_handle') outer = linTrans([],map,transmap); y = compose_mv(outer,w,[]); return; end y = map(w); y = y(:); deriv = @(g2) deriv_this(g2,map,transmap); function [g,hess,linear] = deriv_this(g2,map,transmap) g = transmap(g2); g = g(:); %linear = false; % use this to test linearity of map, if in doubt linear = true; hess = @(d) hess_this(map,d); function [h,Jd] = hess_this(map,d) h = []; if nargout>1 Jd = map(d); Jd = Jd(:); end function test_this() A = randn(4,5); map = @(w) A*w; transmap = @(y) (y.'*A).'; % faster than A'*y, if A is big f = linTrans([],map,transmap); test_MV2DF(f,randn(5,1));
github
bsxfan/meta-embeddings-master
affineTrans.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/linear/templates/affineTrans.m
1,378
utf_8
f1c4abd92c1dca63db5b0ccf3915a631
function [y,deriv] = affineTrans(w,affineMap,linMap,transMap) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Applies affine transform y = affineMap(w). It needs also needs % linMap, the linear part of the mapping, as well as transMap, the % transpose of linMap. All of affineMap, linMap and transMap are function % handles. % % Note, linMap(x) = J*x where J is the Jacobian of affineMap; and % transMap(y) = J'y. if nargin==0 test_this(); return; end if isempty(w) y = @(w)affineTrans(w,affineMap,linMap,transMap); return; end if isa(w,'function_handle') outer = affineTrans([],affineMap,linMap,transMap); y = compose_mv(outer,w,[]); return; end y = affineMap(w); y = y(:); deriv = @(g2) deriv_this(g2,linMap,transMap); function [g,hess,linear] = deriv_this(g2,linMap,transMap) g = transMap(g2); g = g(:); %linear = false; % use this to test linearity of affineMap, if in doubt linear = true; hess = @(d) hess_this(linMap,d); function [h,Jd] = hess_this(linMap,d) %h=zeros(size(d)); % use this to test linearity of affineMap, if in doubt h = []; if nargout>1 Jd = linMap(d); Jd = Jd(:); end function test_this() A = randn(4,5); k = randn(4,1); affineMap = @(w) A*w+k; linMap = @(w) A*w; transMap = @(y) (y.'*A).'; % faster than A'*y, if A is big f = affineTrans([],affineMap,linMap,transMap); test_MV2DF(f,randn(5,1));
github
bsxfan/meta-embeddings-master
logsoftmax_trunc_mv2df.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/logsoftmax_trunc_mv2df.m
1,380
utf_8
7933852f24348cedbc4c8750142e51de
function [y,deriv] = logsoftmax_trunc_mv2df(w,m) % This is a MV2DF. See MV2DF_API_DEFINITION.readme. % % Does: % (i) Reshapes w to m-by-n. % (ii) effectively (not physically) append a bottom row of zeros % (iii) Computes logsoftmax of each of n columns. % (iv) Omits last row (effectively) if nargin==0 test_this(); return; end if isempty(w) y = @(w)logsoftmax_trunc_mv2df(w,m); return; end if isa(w,'function_handle') outer = logsoftmax_trunc_mv2df([],m); y = compose_mv(outer,w,[]); return; end w = reshape(w,m,[]); y = logsoftmax_trunc(w); if nargout>1 deriv = @(Dy) deriv_this(Dy,exp(y)); end y = y(:); function [g,hess,linear] = deriv_this(Dy,smax) [m,n] = size(smax); Dy = reshape(Dy,m,n); sumDy = sum(Dy,1); g = Dy - bsxfun(@times,smax,sumDy); g = g(:); linear = false; hess = @(v) hess_this(v,sumDy,smax); function [h,Jv] = hess_this(V,sumDy,smax) [m,n] = size(smax); V = reshape(V,m,n); Vsmax = V.*smax; sumVsmax = sum(Vsmax,1); h = bsxfun(@times,smax,sumVsmax) - Vsmax; h = bsxfun(@times,h,sumDy); h = h(:); if nargout>1 Jv = bsxfun(@minus,V,sumVsmax); Jv = Jv(:); end function test_this() m = 10; n = 3; %A = randn(m); %map = @(x) reshape(A*reshape(x,m,[]),[],1); %transmap = @(y) reshape(A'*reshape(y,m,[]),[],1); %f = linTrans([],map,transmap); f = logsoftmax_trunc_mv2df([],m); test_MV2DF(f,randn(m*n,1));
github
bsxfan/meta-embeddings-master
mm_special.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/mm_special.m
1,465
utf_8
735b9c605bad33588197fcc0c0d59eb5
function [prod,deriv] = mm_special(w,extractA,extractB) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % [vec(A);vec(B)] --> vec(A*B) % % where % A is extractA(w) % B is extractB(w) if nargin==0 test_this(); return; end if isempty(w) prod = @(w)mm_special(w,extractA,extractB); return; end if isa(w,'function_handle') outer = mm_special([],extractA,extractB); prod = compose_mv(outer,w,[]); return; end w = w(:); A = extractA(w); [m,k] = size(A); B = extractB(w); [k2,n] = size(B); assert(k==k2,'inner matrix dimensions must agree'); M = A*B; prod = M(:); deriv = @(g2) deriv_this(g2); function [g,hess,linear] = deriv_this(g2) g = vJ_this(g2,A,B); linear = false; hess = @(w) hess_this(g2,w); end function [h,Jv] = hess_this(g2,w) h = vJ_this(g2,extractA(w),extractB(w)); if nargout>=2 Jv = Jv_this(w); end end function prod = Jv_this(w) Aw = extractA(w); Bw = extractB(w); M = Aw*B + A*Bw; prod = M(:); end function w = vJ_this(prod,A,B) M = reshape(prod,m,n); Bp = A.'*M; Ap = M*B.'; w = [Ap(:);Bp(:)]; end end function A = extractA_this(w,m,k) A = w(1:m*k); A = reshape(A,m,k); end function B = extractB_this(w,m,k,n) B = w(m*k+(1:k*n)); B = reshape(B,k,n); end function test_this() m = 4; k = 5; n = 6; A = randn(m,k); B = randn(k,n); w = [A(:);B(:)]; extractA = @(w) extractA_this(w,m,k); extractB = @(w) extractB_this(w,m,k,n); f = mm_special([],extractA,extractB); test_MV2DF(f,w); end
github
bsxfan/meta-embeddings-master
sums_of_squares.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/sums_of_squares.m
898
utf_8
1fa8d45eea9355807d8ef47606407b36
function [y,deriv] = sums_of_squares(w,m) % This is a MV2DF. See MV2DF_API_DEFINITION.readme. % Does: % (i) Reshapes w to m-by-n. % (ii) Computes sum of squares of each of n columns. % (iii) Transposes to output n-vector. if nargin==0 test_this(); return; end if isempty(w) y = @(w)sums_of_squares(w,m); return; end if isa(w,'function_handle') outer = sums_of_squares([],m); y = compose_mv(outer,w,[]); return; end M = reshape(w,m,[]); y = sum(M.^2,1); y = y(:); deriv = @(g2) deriv_this(g2,M); function [g,hess,linear] = deriv_this(g2,M) g = 2*bsxfun(@times,M,g2.'); g = g(:); linear = false; hess = @(d) hess_this(d,g2,M); function [h,Jv] = hess_this(d,g2,M) h = deriv_this(g2,reshape(d,size(M))); if nargout>1 Jv = 2*sum(reshape(d,size(M)).*M,1); Jv = Jv(:); end function test_this() f = sums_of_squares([],10); test_MV2DF(f,randn(10*4,1));
github
bsxfan/meta-embeddings-master
gemm.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/gemm.m
1,283
utf_8
b9245303ab8248f450ad033cde69bf29
function [prod,deriv] = gemm(w,m,k,n) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % [vec(A);vec(B)] --> vec(A*B) % % where % A is m-by-k % B is k-by-n if nargin==0 test_this(); return; end if isempty(w) prod = @(w)gemm(w,m,k,n); return; end if isa(w,'function_handle') outer = gemm([],m,k,n); prod = compose_mv(outer,w,[]); return; end w = w(:); A = extractA(w,m,k); B = extractB(w,m,k,n); M = A*B; prod = M(:); deriv = @(g2) deriv_this(g2,A,B,m,k,n); function [g,hess,linear] = deriv_this(g2,A,B,m,k,n) g = vJ_this(g2,A,B,m,n); linear = false; hess = @(w) hess_this(m,k,n,g2,A,B,w); function [h,Jv] = hess_this(m,k,n,g2,A,B,w) h = vJ_this(g2,... extractA(w,m,k),... extractB(w,m,k,n),... m,n); if nargout>=2 Jv = Jv_this(w,A,B,m,k,n); end function prod = Jv_this(w,A,B,m,k,n) Aw = extractA(w,m,k); Bw = extractB(w,m,k,n); M = Aw*B + A*Bw; prod = M(:); function w = vJ_this(prod,A,B,m,n) M = reshape(prod,m,n); Bp = A.'*M; Ap = M*B.'; w = [Ap(:);Bp(:)]; function A = extractA(w,m,k) A = w(1:m*k); A = reshape(A,m,k); function B = extractB(w,m,k,n) B = w(m*k+(1:k*n)); B = reshape(B,k,n); function test_this() A = randn(4,5); B = randn(5,4); w = [A(:);B(:)]; f = gemm([],4,5,4); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
XtKX.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/XtKX.m
849
utf_8
0298041dbd9ce1171c7cf66e0edb8a09
function [y,deriv] = XtKX(w,K) %This is an MV2DF. % % vec(X) --> vec(X'KX) % if nargin==0 test_this(); return; end m = size(K,1); if isempty(w) y = @(w) XtKX(w,K); return; end if isa(w,'function_handle') outer = XtKX([],K); y = compose_mv(outer,w,[]); return; end X = reshape(w,m,[]); n = size(X,2); y = X.'*K*X; y = y(:); deriv = @(dy) deriv_this(dy,K,X,n); function [g,hess,linear] = deriv_this(DY,K,X,n) linear = false; DY = reshape(DY,n,n).'; g = DY.'*X.'*K.' + DY*X.'*K; g = g.'; g = g(:); hess = @(dx) hess_this(dx,K,X,DY); function [h,Jv] = hess_this(DX,K,X,DY) m = size(K,1); DX = reshape(DX,m,[]); h = K*DX*DY + K.'*DX*DY.'; h = h(:); if nargin<2 return; end Jv = DX.'*K*X + X.'*K*DX; Jv = Jv(:); function test_this() K = randn(4); X = randn(4,3); f = XtKX([],K); test_MV2DF(f,X(:));
github
bsxfan/meta-embeddings-master
UtU.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/UtU.m
945
utf_8
086256cb24f7c7b69d69614ceff1519b
function [prod,deriv] = UtU(w,m,n) % This is a MV2DF. See MV2DF_API_DEFINITION.readme. % U = reshape(w,m,n), M = U'*U, prod = M(:). if nargin==0 test_this(); return; end if isempty(w) prod = @(w)UtU(w,m,n); return; end if isa(w,'function_handle') outer = UtU([],m,n); prod = compose_mv(outer,w,[]); return; end w = w(:); U = reshape(w,m,n); M = U.'*U; prod = M(:); deriv = @(g2) deriv_this(g2,U,m,n); function [g,hess,linear] = deriv_this(g2,U,m,n) g = vJ_this(g2,U,n); linear = false; hess = @(w) hess_this(w,g2,U,m,n); function [h,Jv] = hess_this(w,g2,U,m,n) h = vJ_this(g2,reshape(w,m,n),n); if nargout>=2 Jv = Jv_this(w,U,m,n); end function dy = Jv_this(dw,U,m,n) dU = reshape(dw,m,n); dM = U.'*dU; dM = dM+dM.'; dy = dM(:); function w = vJ_this(dy,U,n) dY = reshape(dy,n,n); dU = U*(dY+dY.'); w = dU(:); function test_this() m = 5; n = 3; f = UtU([],m,n); U = randn(m,n); test_MV2DF(f,U(:));
github
bsxfan/meta-embeddings-master
bsxtimes.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/bsxtimes.m
1,144
utf_8
599b65f85120f5dec9d1a62d06393c35
function [y,deriv] = bsxtimes(w,m,n) % This is an MV2DF % % w = [vec(A); vec(b) ] --> vec(bsxfun(@times,A,b)), % % where A is an m-by-n matrix and % b is a 1-by-n row. % if nargin==0 test_this(); return; end if isempty(w) y = @(w) bsxtimes(w,m,n); return; end if isa(w,'function_handle') f = bsxtimes([],m,n); y = compose_mv(f,w,[]); return; end [A,b] = extract(w,m,n); y = bsxfun(@times,A,b); y = y(:); deriv = @(Dy) deriv_this(Dy,A,b); function [g,hess,linear] = deriv_this(Dy,A,b) g = gradient(Dy,A,b); linear = false; hess = @(v) hess_this(v,Dy,A,b); function [h,Jv] = hess_this(v,Dy,A,b) [m,n] = size(A); [vA,vb] = extract(v,m,n); h = gradient(Dy,vA,vb); if nargout>1 Jv = bsxfun(@times,vA,b); Jv = Jv + bsxfun(@times,A,vb); Jv = Jv(:); end function [A,b] = extract(w,m,n) A = reshape(w(1:m*n),m,n); b = w(m*n+1:end).'; function g = gradient(Dy,A,b) Dy = reshape(Dy,size(A)); gA = bsxfun(@times,Dy,b); gb = sum(Dy.*A,1); g = [gA(:);gb(:)]; function test_this() m = 5; n = 10; A = randn(m,n); b = randn(1,n); w = [A(:);b(:)]; f = bsxtimes([],m,n); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
calibrateScores.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/calibrateScores.m
1,095
utf_8
36a554ff63a06324896dbea86ca33308
function [y,deriv] = calibrateScores(w,m,n) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % [vec(A);scal;offs] --> vec(bsxfun(@plus,scal*A,b)) % % This function retrieves from w: % (i) an m-by-n matrix, 'scores' % (ii) a scalar 'scal', and % (iii) an m-vector, 'offset' % % Then it scales the scores and adds the offset vector to every column. if nargin==0 test_this(); return; end if isempty(w) scoreSz = m*n; wSz = scoreSz+m+1; at = 1; scores = subvec(w,wSz,at,scoreSz); at = at + scoreSz; scal = subvec(w,wSz,at,1); at = at + 1; offs = subvec(w,wSz,at,m); scores = gemm(stack(w,scores,scal),scoreSz,1,1); scores = addOffset(stack(w,scores,offs),m,n); y = scores; return; end if isa(w,'function_handle') f = calibrateScores([],m,n); y = compose_mv(f,w,[]); return; end f = calibrateScores([],m,n); [y,deriv] = f(w); function test_this() m = 5; n = 10; scores = randn(m,n); offs = randn(m,1); scal = 3; f = calibrateScores([],m,n); test_MV2DF(f,[scores(:);scal;offs]);
github
bsxfan/meta-embeddings-master
solve_AXeqB.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/solve_AXeqB.m
1,054
utf_8
cff7830e92caa23fabdd038a4e53750d
function [y,deriv] = solve_AXeqB(w,m) % This is an MV2DF. % % [A(:);B(:)] --> inv(A) * B % if nargin==0 test_this(); return; end if isempty(w) y = @(w)solve_AXeqB(w,m); return; end if isa(w,'function_handle') outer = solve_AXeqB([],m); y = compose_mv(outer,w,[]); return; end [A,B,n] = extract(w,m); y = A\B; deriv = @(dy) deriv_this(dy,m,n,A,A.',y); y = y(:); function [g,hess,linear] = deriv_this(dy,m,n,A,At,X) DXt = reshape(dy,m,n); DBt = At\DXt; DAt = -DBt*X.'; g = [DAt(:);DBt(:)]; linear = false; hess = @(dw) hess_this(dw,m,A,At,X,DBt); function [h,Jv] = hess_this(dw,m,A,At,X,DBt) [dA,dB] = extract(dw,m); D_DBt = -(At\dA.')*DBt; DX = A\(dB-dA*X); D_DAt = -(D_DBt*X.'+DBt*DX.'); h = [D_DAt(:);D_DBt(:)]; if nargout>1 Jv = A\(dB-dA*X); Jv = Jv(:); end function [A,B,n] = extract(w,m) mm = m^2; A = w(1:mm); A = reshape(A,m,m); B = w(mm+1:end); B = reshape(B,m,[]); n = size(B,2); function test_this() A = randn(5); B = randn(5,1); f = solve_AXeqB([],5); test_MV2DF(f,[A(:);B(:)]);
github
bsxfan/meta-embeddings-master
logsoftmax_mv2df.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/logsoftmax_mv2df.m
1,248
utf_8
1c29a9da21772e72c800bb7be4025fe6
function [y,deriv] = logsoftmax_mv2df(w,m) % This is a MV2DF. See MV2DF_API_DEFINITION.readme. % % Does: % (i) Reshapes w to m-by-n. % (ii) Computes logsoftmax of each of n columns. if nargin==0 test_this(); return; end if isempty(w) y = @(w)logsoftmax_mv2df(w,m); return; end if isa(w,'function_handle') outer = logsoftmax_mv2df([],m); y = compose_mv(outer,w,[]); return; end w = reshape(w,m,[]); y = logsoftmax(w); if nargout>1 deriv = @(Dy) deriv_this(Dy,exp(y)); end y = y(:); function [g,hess,linear] = deriv_this(Dy,smax) [m,n] = size(smax); Dy = reshape(Dy,m,n); sumDy = sum(Dy,1); g = Dy - bsxfun(@times,smax,sumDy); g = g(:); linear = false; hess = @(v) hess_this(v,sumDy,smax); function [h,Jv] = hess_this(V,sumDy,smax) [m,n] = size(smax); V = reshape(V,m,n); Vsmax = V.*smax; sumVsmax = sum(Vsmax,1); h = bsxfun(@times,smax,sumVsmax) - Vsmax; h = bsxfun(@times,h,sumDy); h = h(:); if nargout>1 Jv = bsxfun(@minus,V,sumVsmax); Jv = Jv(:); end function test_this() m = 10; n = 3; %A = randn(m); %map = @(x) reshape(A*reshape(x,m,[]),[],1); %transmap = @(y) reshape(A'*reshape(y,m,[]),[],1); %f = linTrans([],map,transmap); f = logsoftmax_mv2df([],m); test_MV2DF(f,randn(m*n,1));
github
bsxfan/meta-embeddings-master
sqdist.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/sqdist.m
1,170
utf_8
bfa9c639ce948848c20501fec93af59c
function [y,deriv] = sqdist(w,dim) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % If W = reshape(w,dim,n), then Y = vec of symmetric n-by-n matrix of % 1/2 squared euclidian distances between all columns of W. if nargin==0 test_this(); return; end if isempty(w) y = @(w)sqdist(w,dim); return; end if isa(w,'function_handle') outer = sqdist([],dim); y = compose_mv(outer,w,[]); return; end X = reshape(w,dim,[]); N = size(X,2); XX = 0.5*sum(X.^2,1); y = bsxfun(@minus,XX.',X.'*X); y = bsxfun(@plus,y,XX); y = y(:); deriv = @(dy) deriv_this(dy,X,N); function [G,hess,linear] = deriv_this(DY,X,N) DY = reshape(DY,N,N); sumDY = sum(DY,1)+sum(DY,2).'; DYDY = DY+DY.'; G = bsxfun(@times,X,sumDY)-X*DYDY; G = G(:); linear = false; hess = @(d) hess_this(d,DYDY,sumDY,X); function [H,Jv] = hess_this(D,DYDY,sumDY,X) D = reshape(D,size(X)); H = bsxfun(@times,D,sumDY)-D*DYDY; H = H(:); if nargout>=2 DtX = D.'*X; xd = sum(X.*D,1); Jv = bsxfun(@minus,xd,DtX + DtX.'); Jv = bsxfun(@plus,Jv,xd.'); Jv = Jv(:); end function test_this() dim = 4; X = randn(dim,5); w = X(:); f = sqdist([],dim); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
dottimes.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/dottimes.m
884
utf_8
7e8e3dedc670c1f93364db61f3d2b41d
function [y,deriv] = dottimes(w) % This is an MV2DF % % [a; b ] --> a.*b % % where length(a) == length(b) % if nargin==0 test_this(); return; end if isempty(w) y = @(w) dottimes(w); return; end if isa(w,'function_handle') f = dottimes([]); y = compose_mv(f,w,[]); return; end w = w(:); [a,b] = extract(w); y = a.*b; deriv = @(Dy) deriv_this(Dy,a,b); function [g,hess,linear] = deriv_this(Dy,a,b) g = gradient(Dy,a,b); linear = false; hess = @(v) hess_this(v,Dy,a,b); function [h,Jv] = hess_this(v,Dy,a,b) [va,vb] = extract(v); h = gradient(Dy,va,vb); if nargout>1 Jv = va.*b + a.*vb; end function [a,b] = extract(w) h = length(w)/2; a = w(1:h); b = w(h+1:end); function g = gradient(Dy,a,b) g = [Dy.*b;Dy.*a]; function test_this() n = 10; a = randn(1,n); b = randn(1,n); w = [a(:);b(:)]; f = dottimes([]); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
solveChol_AXeqB.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/multivariate/solveChol_AXeqB.m
1,391
utf_8
f2ded36f846a5e9904fd8299ba4a5ed1
function [y,deriv] = solveChol_AXeqB(w,m) % This is an MV2DF. % % [A(:);B(:)] --> inv(A) * B % % We assume A is positive definite and we solve using Choleski if nargin==0 test_this(); return; end if isempty(w) y = @(w)solveChol_AXeqB(w,m); return; end if isa(w,'function_handle') outer = solveChol_AXeqB([],m); y = compose_mv(outer,w,[]); return; end [A,B,n] = extract(w,m); if isreal(A) R = chol(A); solve = @(B) R\(R.'\B); else %complex solve = @(B) A\B; end y = solve(B); deriv = @(dy) deriv_this(dy,m,n,solve,y); y = y(:); function [g,hess,linear] = deriv_this(dy,m,n,solve,X) DXt = reshape(dy,m,n); DBt = solve(DXt); DAt = -DBt*X.'; g = [DAt(:);DBt(:)]; linear = false; hess = @(dw) hess_this(dw,m,solve,X,DBt); function [h,Jv] = hess_this(dw,m,solve,X,DBt) [dA,dB] = extract(dw,m); D_DBt = -solve(dA.'*DBt); DX = solve(dB-dA*X); D_DAt = -(D_DBt*X.'+DBt*DX.'); h = [D_DAt(:);D_DBt(:)]; if nargout>1 Jv = solve(dB-dA*X); Jv = Jv(:); end function [A,B,n] = extract(w,m) mm = m^2; A = w(1:mm); A = reshape(A,m,m); B = w(mm+1:end); B = reshape(B,m,[]); n = size(B,2); function test_this() m = 3; n = 10; k = 8; Usz = m*n; Bsz = m*k; Wsz = Usz+Bsz; w = []; U = subvec(w,Wsz,1,Usz); B = subvec(w,Wsz,Usz+1,Bsz); A = UtU(U,n,m); AB = stack(w,A,B); f = solveChol_AXeqB(AB,m); w = randn(Wsz,1); test_MV2DF(f,w,true);
github
bsxfan/meta-embeddings-master
test_MV2DF.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/test/test_MV2DF.m
2,104
utf_8
1f7eea1823322c4c0741c86792fc73c4
function test_MV2DF(f,x0,do_cstep) %id_in = identity_trans([]); %id_out = identity_trans([]); %f = f(id_in); %f = id_out(f); x0 = x0(:); if ~exist('do_cstep','var') do_cstep = 1; end if do_cstep Jc = cstepJacobian(f,x0); end Jr = rstepJacobian(f,x0); [y0,deriv] = f(x0); m = length(y0); n = length(x0); J2 = zeros(size(Jr)); for i=1:m; y = zeros(m,1); y(i) = 1; J2(i,:) = deriv(y)'; end if do_cstep c_err = max(max(abs(Jc-J2))); else c_err = nan; end r_err = max(max(abs(Jr-J2))); fprintf('test gradient : cstep err = %g, rstep err = %g\n',c_err,r_err); g2 = randn(m,1); [dummy,hess,linear] = deriv(g2); if true %~linear rHess = @(dx) rstep_approxHess(dx,g2,f,x0); if do_cstep cHess = @(dx) cstep_approxHess(dx,g2,f,x0); else cHess = @(dx) nan(size(dx)); end end J1 = zeros(size(Jr)); if true %~linear H1 = zeros(n,n); H2 = zeros(n,n); Hr = zeros(n,n); Hc = zeros(n,n); end for j=1:n; x = zeros(n,1); x(j) = 1; [h1,jx] = hess(x); h2 = hess(x); J1(:,j) = jx; if ~linear H1(:,j) = h1; H2(:,j) = h2; end Hr(:,j) = rHess(x); Hc(:,j) = cHess(x); end if do_cstep c_err = max(max(abs(Jc-J1))); else c_err = nan; end r_err = max(max(abs(Jr-J1))); fprintf('test Jacobian : cstep err = %g, rstep err = %g\n',c_err,r_err); fprintf('test Jacobian-gradient'': %g\n',max(max(abs(J1-J2)))); if false %linear fprintf('function claims to be linear, not testing Hessians\n'); return; end r_err = max(max(abs(H1-Hr))); c_err = max(max(abs(H1-Hc))); rc_err = max(max(abs(Hr-Hc))); fprintf('test Hess prod: cstep err = %g, rstep err = %g, cstep-rstep = %g\n',c_err,r_err,rc_err); fprintf('test H1-H2: %g\n',max(max(abs(H1-H2)))); function x = rstep_approxHess(dx,dy,f,x0) alpha = sqrt(eps); x2 = x0 + alpha*dx; [dummy,deriv2] = f(x2); x1 = x0 - alpha*dx; [dummy,deriv1] = f(x1); g2 = deriv2(dy); g1 = deriv1(dy); x = (g2-g1)/(2*alpha); function p = cstep_approxHess(dx,dy,f,x0) x = x0 + 1e-20i*dx; [dummy,deriv] = f(x); g = deriv(dy); p = 1e20*imag(g);
github
bsxfan/meta-embeddings-master
tracer.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/test/tracer.m
1,081
utf_8
5e8d7ea9aefc9d1c1cc8161546bd9483
function [w,deriv] = tracer(w,vstring,gstring,jstring) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % % Applies linear transform y = map(w). It needs the transpose of map, % transmap for computing the gradient. map and transmap are function % handles. if nargin==0 test_this(); return; end if nargin<2 vstring=[]; end if nargin<3 gstring=[]; end if nargin<4 jstring=[]; end if isempty(w) w = @(x)tracer(x,vstring,gstring,jstring); return; end if isa(w,'function_handle') outer = tracer([],vstring,gstring,jstring); w = compose_mv(outer,w,[]); return; end if ~isempty(vstring) fprintf('%s\n',vstring); end deriv = @(g2) deriv_this(g2,gstring,jstring); function [g,hess,linear] = deriv_this(g,gstring,jstring) if ~isempty(gstring) fprintf('%s\n',gstring); end linear = true; hess = @(d) hess_this(d,jstring); function [h,Jd] = hess_this(Jd,jstring) h = []; if nargout>1 if ~isempty(jstring) fprintf('%s\n',jstring); end end function test_this() f = tracer([],'V','G','J'); test_MV2DF(f,randn(5,1));
github
bsxfan/meta-embeddings-master
test_MV2DF_noHess.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_library/test/test_MV2DF_noHess.m
1,125
utf_8
2af48174c2441c011dffbf316b93612d
function test_MV2DF_noHess(f,x0) %id_in = identity_trans([]); %id_out = identity_trans([]); %f = f(id_in); %f = id_out(f); x0 = x0(:); Jc = cstepJacobian(f,x0); Jr = rstepJacobian(f,x0); [y0,deriv] = f(x0); m = length(y0); n = length(x0); J2 = zeros(size(Jr)); for i=1:m; y = zeros(m,1); y(i) = 1; J2(i,:) = deriv(y)'; end c_err = max(max(abs(Jc-J2))); r_err = max(max(abs(Jr-J2))); fprintf('test gradient : cstep err = %g, rstep err = %g\n',c_err,r_err); g2 = randn(m,1); rHess = @(dx) rstep_approxHess(dx,g2,f,x0); cHess = @(dx) cstep_approxHess(dx,g2,f,x0); Hr = zeros(n,n); Hc = zeros(n,n); for j=1:n; x = zeros(n,1); x(j) = 1; Hr(:,j) = rHess(x); Hc(:,j) = cHess(x); end rc_err = max(max(abs(Hr-Hc))); fprintf('test Hess prod: cstep-rstep = %g\n',rc_err); function x = rstep_approxHess(dx,dy,f,x0) alpha = sqrt(eps); x2 = x0 + alpha*dx; [dummy,deriv2] = f(x2); x1 = x0 - alpha*dx; [dummy,deriv1] = f(x1); g2 = deriv2(dy); g1 = deriv1(dy); x = (g2-g1)/(2*alpha); function p = cstep_approxHess(dx,dy,f,x0) x = x0 + 1e-20i*dx; [dummy,deriv] = f(x); g = deriv(dy); p = 1e20*imag(g);
github
bsxfan/meta-embeddings-master
inv_lu2.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/utils/inv_lu2.m
1,044
utf_8
8fa16d13c5b1ad8e681b3f2ba0f9b2c9
function [inv_map,bi_inv_map,logdet,invA] = inv_lu2(A) % INV_LU2 % Does a LU decomposition on A and returns logdet, inverse and % two function handles that respectively map X to A\X and A\X/A. % if nargin==0 test_this(); return; end [L,T,p] = lu(A,'vector'); P = sparse(p,1:length(p),1); % P*A = L*U % L is lower triangular, with unit diagonal and unit determinant % T is upper triangular, det(T) = prod(diag(T), may have negative values on diagonal % P is a permuation matrix: P' = inv(P) and det(P) is +1 or -1 % inv_map = @(X) T\(L\(P*X)); % inv(A)*X*inv(A) bi_inv_map = @(X) ((inv_map(X)/T)/L)*P; if nargout>2 logdet = sum(log(diag(T)))-log(det(P)); if nargout>3 invA = T\(L\P); end end function test_this() dim = 3; A = randn(dim)+sqrt(-1)*randn(dim); [inv_map,bi_inv_map,logdet,iA] = inv_lu2(A); [logdet,log(det(A))] X = randn(dim,3); Y1 = A\X, Y2 = inv_map(X) Z1 = (A\X)/A Z2 = bi_inv_map(X) iA, inv(A)
github
bsxfan/meta-embeddings-master
invchol2.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/utils/invchol2.m
968
utf_8
936256e3c3a28ed65ad0c15d9fbb04cd
function [inv_map,bi_inv_map,logdet,invA] = invchol2(A) % INVCHOL2 % Does a Cholesky decomposition on A and returns logdet, inverse and % two function handles that respectively map X to A\X and A\X/A. % if nargin==0 test_this(); return; end if isreal(A) R = chol(A); %R'*R = A inv_map = @(X) R\(R'\X); % inv(A)*X*inv(A) bi_inv_map = @(X) (inv_map(X)/R)/(R'); if nargout>2 logdet = 2*sum(log(diag(R))); if nargout>3 invA = inv_map(eye(size(A))); end end else inv_map = @(X) A\X; % inv(A)*X*inv(A) bi_inv_map = @(X) (A\X)/A; if nargout>2 logdet = log(det(A)); if nargout>3 invA = inv_map(eye(size(A))); end end end function test_this() dim = 3; r = randn(dim,2*dim); A = r*r'; [inv_map,bi_inv_map,logdet,iA] = invchol2(A); [logdet,log(det(A))] X = randn(dim,3); Y1 = A\X, Y2 = inv_map(X) Z1 = (A\X)/A Z2 = bi_inv_map(X) iA, inv(A)
github
bsxfan/meta-embeddings-master
invchol_or_lu.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/utils/invchol_or_lu.m
1,418
utf_8
468d08dd52dd54bb77a471a5e2d0a856
function [inv_map,bi_inv_map,logdet,invA] = invchol_or_lu(A) % INVCHOL_OR_LU % Does a Cholesky decomposition on A and returns logdet, inverse and % two function handles that respectively map X to A\X and A\X/A. % if nargin==0 test_this(); return; end if isreal(A) R = chol(A); %R'*R = A inv_map = @(X) R\(R'\X); % inv(A)*X*inv(A) bi_inv_map = @(X) (inv_map(X)/R)/(R'); if nargout>2 logdet = 2*sum(log(diag(R))); if nargout>3 invA = inv_map(eye(size(A))); end end else [L,T,p] = lu(A,'vector'); P = sparse(p,1:length(p),1); % P*A = L*U % L is lower triangular, with unit diagonal and unit determinant % T is upper triangular, det(T) = prod(diag(T), may have negative values on diagonal % P is a permuation matrix: P' = inv(P) and det(P) is +1 or -1 % inv_map = @(X) T\(L\(P*X)); % inv(A)*X*inv(A) bi_inv_map = @(X) ((inv_map(X)/T)/L)*P; if nargout>2 logdet = sum(log(diag(T)))-log(det(P)); if nargout>3 invA = T\(L\P); end end end function test_this() dim = 3; A = randn(dim)+sqrt(-1)*randn(dim); [inv_map,bi_inv_map,logdet,iA] = invchol_or_lu(A); [logdet,log(det(A))] X = randn(dim,3); Y1 = A\X, Y2 = inv_map(X) Z1 = (A\X)/A Z2 = bi_inv_map(X) iA, inv(A)
github
bsxfan/meta-embeddings-master
invchol_taylor.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/utils/invchol_taylor.m
1,241
utf_8
0f52f57c84dc1ae326e32c031541c496
function [inv_map,logdet] = invchol_taylor(A) % Does a Cholesky decomposition on A and returns: % inv_map: a function handle to solve for X in AX = B % logdet (of A) % % This code is designed to work correctly if A has a small complex % perturbation, such as used in complex step differentiation, even though % the complex A is not positive definite. if nargin==0 test_this(); return; end if isreal(A) R = chol(A); %R'*R = A inv_map = @(X) R\(R'\X); if nargout>1 logdet = 2*sum(log(diag(R))); end if nargout>2 invA = inv_map(eye(size(A))); end else R = chol(real(A)); rmap = @(X) R\(R'\X); P = rmap(imag(A)); inv_map = @(X) inv_map_complex(X,rmap,P); if nargout>1 logdet = 2*sum(log(diag(R))) + i*trace(P); end end end function Y = inv_map_complex(X,rmap,P) Z = rmap(X); Y = Z - i*P*Z; end function test_this() dim = 20; R = randn(dim,dim+1); C = R*R'; C = C + 1.0e-20i*randn(dim); [map,logdet] = invchol_taylor(C); x = randn(dim,1); maps = imag([map(x),C\x]), logdets = imag([logdet,log(det(C))]) maps = real([map(x),C\x]), logdets = real([logdet,log(det(C))]) end
github
bsxfan/meta-embeddings-master
train_system.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/discrim_training/train_system.m
4,969
utf_8
69b1726b853595599d4a79414b256c8b
function [w,mce,divergence,w_pen,c_pen,optimizerState,converged] = train_system(classf,system,penalizer,W0,lambda,confusion,maxiters,maxCG,prior,optimizerState) % % Supervised training of a regularized K-class linear logistic % regression. Allows regularization via weight penalties and via % label confusion probabilities. % % % Inputs: % classf: 1-by-N row of class labels, in the range 1..K % % system: MV2DF function handle that maps parameters to score matrix. % Note: The system input data is already wrapped in this handle. % % penalizer: MV2DF function handle that maps parameters to a positive regularization penalty. % % % W0: initial parameters. This is NOT optional. % % % confusion: a scalar or a matrix of label confusion probabilities % -- if this is a K-by-K matrix, then % entry_ij denotes P(label_j | class i) % % -- if scalar: confusion = q, then % P(label_i | class_i) = 1-q, and % P(label_j | class_i) = q/(K-1) % % maxiters: the maximum number of Newton Trust Region optimization % iterations to perform. Note, the user can make maxiters % small, examine the solution and then continue training: % -- see W0 and optimizerState. % % % prior: a prior probability distribution over the K classes to % modify the optimization operating point. % optional: % omit or use [] % default is prior = ones(1,K)/K % % optimizerState: In this implementation, it is the trust region radius. % optional: % omit or use [] % If not supplied when resuming iteration, % this may cost some extra iterations. % Resume further iteration thus: % [W1,...,optimizerState] = train_..._logregr(...); % ... examine solution W1 ... % [W2,...,optimizerState] = train_..._logregr(...,W1,...,optimizerState); % % % % % % Outputs: % W: the solution. % mce: normalized multiclass cross-entropy of the solution. % The range is 0 (good) to 1(useless). % % optimizerState: see above, can be used to resume iteration. % if nargin==0 test_this(); return; end classf = classf(:)'; K = max(classf); N = length(classf); if ~exist('maxCG','var') || isempty(maxCG) maxCG = 100; end if ~exist('optimizerState','var') optimizerState=[]; end if ~exist('prior','var') || isempty(prior) prior = ones(K,1)/K; else prior = prior(:); end weights = zeros(1,N); for k = 1:K fk = find(classf==k); count = length(fk); weights(fk) = prior(k)/(count*log(2)); end if ~exist('confusion','var') || isempty(confusion) confusion = 0; end if isscalar(confusion) q = confusion; confusion = (1-q)*eye(K) + (q/(K-1))*(ones(K)-eye(K)); end post = bsxfun(@times,confusion,prior(:)); post = bsxfun(@times,post,1./sum(post,1)); logpost = post; nz = logpost>0; logpost(nz) = log(post(nz)); confusion_entropy = -mean(sum(post.*logpost,1),2); prior_entropy = -log(prior)'*prior; c_pen = confusion_entropy/prior_entropy; fprintf('normalized confusion entropy = %g\n',c_pen); T = zeros(K,N); for i=1:N T(:,i) = post(:,classf(i)); end w=[]; obj1 = mce_obj(system,T,weights,log(prior)); obj2 = penalizer(w); obj = sum_of_functions(w,[1,lambda],obj1,obj2); w0 = W0(:); [w,y,optimizerState,converged] = trustregion_newton_cg(obj,w0,maxiters,maxCG,optimizerState,[],1); w_pen = lambda*obj2(w)/prior_entropy; mce = y/prior_entropy-w_pen; divergence = mce-c_pen; fprintf('mce = %g, divergence = %g, conf entr = %g, weight pen = %g\n',mce,divergence,c_pen,w_pen); function y = score_map(W,X) [dim,N] = size(X); W = reshape(W,[],dim+1); offs = W(:,end); W(:,end)=[]; y = bsxfun(@plus,W*X,offs); y = y(:); function W = score_transmap(y,X) [dim,N] = size(X); y = reshape(y,[],N).'; W = [X*y;sum(y)]; W = W.'; W = W(:); function test_this() K = 3; N = 100; dim = 2; % ----------------syntesize data ------------------- randn('state',0); means = randn(dim,K)*10; %signal X = randn(dim,K*N); % noise classf = zeros(1,K*N); ii = 1:N; for k=1:K X(:,ii) = bsxfun(@plus,means(:,k),X(:,ii)); classf(ii) = k; ii = ii+N; end N = K*N; % ---------------- define system ------------------- w=[]; map = @(W) score_map(W,X); transmap = @(Y) score_transmap(Y,X); system = linTrans(w,map,transmap); penalizer = sumsquares_penalty(w,1); % ------------- train it ------------------------------ confusion = 0.01; lambda = 0.01; W0 = zeros(K,dim+1); W = train_system(classf,system,penalizer,W0,lambda,confusion,20); % ------------ plot log posterior on training data -------------------- scores = score_system(W,system,K); scores = logsoftmax(scores); subplot(1,2,1);plot(scores'); scores = score_system(W,system,K,true); scores = [scores;zeros(1,N)]; scores = logsoftmax(scores); subplot(1,2,2);plot(scores');
github
bsxfan/meta-embeddings-master
sum_of_functions.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/sum_of_functions.m
1,094
utf_8
af1885792c3ce587c098ffc61a10cc06
function [y,deriv] = sum_of_functions(w,weights,f,g) % This is an MV2DF (see MV2DF_API_DEFINITION.readme) which % represents the new function, s(w), obtained by summing the % weighted outputs of the given functions: % s(w) = sum_i weights(i)*functions{i}(w) % % Usage examples: % % s = @(w) sum_of_functions(w,[1,-1],f,g) % % Here f,g are function handles to MV2DF's. if nargin==0 test_this(); return; end weights = weights(:); if isempty(w) s = stack(w,f,g,true); n = length(weights); map = @(s) reshape(s,[],n)*weights; transmap = @(y) reshape(y(:)*weights.',[],1); y = linTrans(s,map,transmap); return; end if isa(w,'function_handle') f = sum_of_functions([],weights,f,g); y = compose_mv(f,w,[]); return; end f = sum_of_functions([],weights,f,g); if nargout==1 y = f(w); else [y,deriv] = f(w); end function test_this() A = randn(4,4); B = randn(4,4); w = []; f = gemm(w,4,4,4); g = transpose_mv2df(f,4,4); %f = A*B; %g = B'*A'; s = sum_of_functions(w,[-1,1],f,g); %s = stack(w,f,g); test_MV2DF(s,[A(:);B(:)]);
github
bsxfan/meta-embeddings-master
scale_function.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/scale_function.m
856
utf_8
fae26a24cbea0fcc7ae35cf1642b18e4
function [y,deriv] = scale_function(w,scale,f) % This is an MV2DF (see MV2DF_API_DEFINITION.readme) which % represents the new function, % % g(w) = scale(w)*f(w), % % where scale is scalar-valued and f is matrix-valued. % % % Here scale and f are function handles to MV2DF's. if nargin==0 test_this(); return; end if isempty(w) s = stack(w,f,scale); y = mm_special(s,@(w)reshape(w(1:end-1),[],1),@(w)w(end)); return; end if isa(w,'function_handle') f = scale_function([],scale,f); y = compose_mv(f,w,[]); return; end f = scale_function([],scale,f); if nargout==1 y = f(w); else [y,deriv] = f(w); end function test_this() m = 5; n = 10; data = randn(m,n); scal = 3; w = [data(:);scal]; g = subvec([],m*n+1,1,m*n); scal = subvec([],m*n+1,m*n+1,1); f = scale_function([],scal,g); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
outerprod_of_functions.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/outerprod_of_functions.m
1,085
utf_8
731782f761b675bb6d3567ddb560c950
function [y,deriv] = outerprod_of_functions(w,f,g,m,n) % This is an MV2DF (see MV2DF_API_DEFINITION.readme) which % represents the new function, % % g(w) = f(w)g(w)' % % where f(w) and g(w) are column vectors of sizes m and n respectively. % % Here f,g are function handles to MV2DF's. if nargin==0 test_this(); return; end if ~exist('n','var'), n=[]; end function A = extractA(w) if isempty(m), m = length(w)-n; end A = w(1:m); A = A(:); end function B = extractB(w) if isempty(m), m = length(w)-n; end B = w(1+m:end); B = B(:).'; end if isempty(w) s = stack(w,f,g); y = mm_special(s,@(w)extractA(w),@(w)extractB(w)); return; end if isa(w,'function_handle') f = outerprod_of_functions([],f,g,m,n); y = compose_mv(f,w,[]); return; end f = outerprod_of_functions([],f,g,m,n); if nargout==1 y = f(w); else [y,deriv] = f(w); end end function test_this() m = 5; n = 3; w = randn(m+n,1); f = subvec([],m+n,1,m); g = subvec([],m+n,m+1,n); h = outerprod_of_functions([],f,g,m,n); test_MV2DF(h,w); end
github
bsxfan/meta-embeddings-master
interleave.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/interleave.m
2,028
utf_8
0cdd5849311559d9813888914f7530cd
function [y,deriv] = interleave(w,functions) % interleave is an MV2DF (see MV2DF_API_DEFINITION.readme) which % represents the new function, s(w), obtained by interleaving the outputs of % f() and g() thus: % % S(w) = [f(w)';g(w)']; % s(w) = S(:); if nargin==0 test_this(); return; end if isempty(w) y = @(w)interleave(w,functions); return; end if isa(w,'function_handle') outer = interleave([],functions); y = compose_mv(outer,w,[]); return; end % if ~isa(f,'function_handle') % f = const_mv2df([],f); % end % if ~isa(g,'function_handle') % g = const_mv2df([],g); % end w = w(:); m = length(functions); k = length(w); if nargout==1 y1 = functions{1}(w); n = length(y1); y = zeros(m,n); y(1,:) = y1; for i=2:m y(i,:) = functions{i}(w); end y = y(:); return; end deriv = cell(1,m); [y1,deriv{1}] = functions{1}(w); n = length(y1); y = zeros(m,n); y(1,:) = y1; for i=2:m [y(i,:),deriv{i}] = functions{i}(w); end y = y(:); deriv = @(g2) deriv_this(g2,deriv,m,n,k); function [g,hess,linear] = deriv_this(y,deriv,m,n,k) y = reshape(y,m,n); if nargout==1 g = deriv{1}(y(1,:).'); for i=2:m g = g+ deriv{i}(y(i,:).'); end return; end hess = cell(1,m); lin = false(1,m); [g,hess{1},lin(1)] = deriv{1}(y(1,:).'); linear = lin(1); for i=2:m [gi,hess{i},lin(i)] = deriv{i}(y(i,:).'); g = g + gi; linear = linear && lin(i); end hess = @(d) hess_this(d,hess,lin,m,n,k); function [h,Jv] = hess_this(d,hess,lin,m,n,k) if all(lin) h = []; else h = zeros(k,1); end if nargout>1 Jv = zeros(m,n); for i=1:m [hi,Jv(i,:)] = hess{i}(d); if ~lin(i) h = h + hi; end end Jv = Jv(:); else for i=1:m hi = hess{i}(d); if ~lin(i) h = h + hi; end end end function test_this() w = []; f = exp_mv2df(w); g = square_mv2df(w); h = identity_trans(w); s = interleave(w,{f,g,h}); w = randn(5,1); test_MV2DF(s,w);
github
bsxfan/meta-embeddings-master
shift_function.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/shift_function.m
896
utf_8
82abefaa89d6e02403f0b543a3c69a0b
function [y,deriv] = shift_function(w,shift,f) % This is an MV2DF (see MV2DF_API_DEFINITION.readme) which % represents the new function, % % g(w) = shift(w)+f(w), % % where shift is scalar-valued and f is matrix-valued. % % % Here shift and f are function handles to MV2DF's. if nargin==0 test_this(); return; end if isempty(w) s = stack(w,shift,f); map = @(s) s(2:end)+s(1); transmap = @(y) [sum(y);y]; y = linTrans(s,map,transmap); return; end if isa(w,'function_handle') f = shift_function([],shift,f); y = compose_mv(f,w,[]); return; end f = shift_function([],shift,f); if nargout==1 y = f(w); else [y,deriv] = f(w); end function test_this() m = 5; n = 10; data = randn(m,n); shift = 3; w = [data(:);shift]; g = subvec([],m*n+1,1,m*n); shift = subvec([],m*n+1,m*n+1,1); f = shift_function([],shift,g); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
dotprod_of_functions.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/dotprod_of_functions.m
952
utf_8
2999899143500736ecbc06d5afc09df0
function [y,deriv] = dotprod_of_functions(w,f,g) % This is an MV2DF (see MV2DF_API_DEFINITION.readme) which % represents the new function, % % g(w) = f(w)'g(w) % % where f(w) and g(w) are column vectors of the same size. % % Here f,g are function handles to MV2DF's. if nargin==0 test_this(); return; end function A = extractA(w) A = w(1:length(w)/2); A = A(:).'; end function B = extractB(w) B = w(1+length(w)/2:end); B = B(:) ; end if isempty(w) s = stack(w,f,g); y = mm_special(s,@(w)extractA(w),@(w)extractB(w)); return; end if isa(w,'function_handle') f = dotprod_of_functions([],f,g); y = compose_mv(f,w,[]); return; end f = dotprod_of_functions([],f,g); if nargout==1 y = f(w); else [y,deriv] = f(w); end end function test_this() m = 5; w = randn(2*m,1); f = subvec([],2*m,1,m); g = subvec([],2*m,m+1,m); h = dotprod_of_functions([],f,g); test_MV2DF(h,w); end
github
bsxfan/meta-embeddings-master
dottimes_of_functions.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/dottimes_of_functions.m
641
utf_8
45195e5dddb789f3431e2f84495b06c7
function [y,deriv] = dottimes_of_functions(w,A,B) % This is an MV2DF (see MV2DF_API_DEFINITION.readme) % % w --> A(w) .* B(w) % % Here A and B are function handles to MV2DF's. if nargin==0 test_this(); return; end if isempty(w) s = stack(w,A,B); y = dottimes(s); return; end if isa(w,'function_handle') f = dottimes_of_functions([],A,B); y = compose_mv(f,w,[]); return; end f = dottimes_of_functions([],A,B); [y,deriv] = f(w); function test_this() M = 4; X = []; Xt = transpose_mv2df(X,M,M); A = UtU(X,M,M); B = UtU(Xt,M,M); f = dottimes_of_functions(X,A,B); test_MV2DF(f,randn(16,1));
github
bsxfan/meta-embeddings-master
replace_hessian.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/replace_hessian.m
1,399
utf_8
1e948d50795df5102876278fd5022da8
function [y,deriv] = replace_hessian(w,f,cstep) % This is an MV2DF. See MV2DF_API_DEFINITION.readme. % if nargin==0 test_this(); return; end if isempty(w) y = @(w)replace_hessian(w,f,cstep); return; end if isa(w,'function_handle') outer = replace_hessian([],f,cstep); y = compose_mv(outer,w,[]); return; end if nargout==1 y = f(w); else [y,derivf] = f(w); deriv = @(dy) deriv_this(dy,derivf,f,w,cstep); end end function [g,hess,linear] = deriv_this(dy,derivf,f,w,cstep) g = derivf(dy); if nargout>1 linear = false; hess = @(dx) hess_this(dx,dy,f,w,cstep); end end function [h,Jv] = hess_this(dx,dy,f,w,cstep) if cstep h = cstep_approxHess(dx,dy,f,w); else h = rstep_approxHess(dx,dy,f,w); end if nargout>1 error('replace_hessian cannot compute Jv'); %Jv = zeros(size(dy)); end end function x = rstep_approxHess(dx,dy,f,x0) alpha = sqrt(eps); x2 = x0 + alpha*dx; [dummy,deriv2] = f(x2); x1 = x0 - alpha*dx; [dummy,deriv1] = f(x1); g2 = deriv2(dy); g1 = deriv1(dy); x = (g2-g1)/(2*alpha); end function p = cstep_approxHess(dx,dy,f,x0) x = x0 + 1e-20i*dx; [dummy,deriv] = f(x); g = deriv(dy); p = 1e20*imag(g); end function test_this() A = randn(5); B = randn(5,1); map = @(w) 5*w; h = linTrans([],map,map); f = solve_AXeqB([],5); g = replace_hessian([],f,true); g = g(h); w = [A(:);B(:)]; test_MV2DF(g,w); end
github
bsxfan/meta-embeddings-master
product_of_functions.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/product_of_functions.m
745
utf_8
ae86bc0a8429bacd6044704a6a8a0e06
function [y,deriv] = product_of_functions(w,A,B,m,k,n) % This is an MV2DF (see MV2DF_API_DEFINITION.readme) % % w --> vec ( reshape(A(w),m,k) * reshape(B(w),k,n) ) % % Here A and B are function handles to MV2DF's. if nargin==0 test_this(); return; end if isempty(w) s = stack(w,A,B); y = gemm(s,m,k,n); return; end if isa(w,'function_handle') f = product_of_functions([],A,B,m,k,n); y = compose_mv(f,w,[]); return; end f = product_of_functions([],A,B,m,k,n); [y,deriv] = f(w); function test_this() M = 4; N = 4; X = []; Xt = transpose_mv2df(X,M,N); A = UtU(X,M,N); B = UtU(Xt,N,M); %A = A(randn(16,1)); %B = B(randn(16,1)); f = product_of_functions(X,A,B,4,4,4); test_MV2DF(f,randn(16,1));
github
bsxfan/meta-embeddings-master
stack.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/stack.m
3,136
utf_8
cbfe5ccd3255b5021692c3eb13e1798f
function [y,deriv] = stack(w,f,g,eqlen) % STACK is an MV2DF (see MV2DF_API_DEFINITION.readme) which % represents the new function, s(w), obtained by stacking the outputs of % f() and g() thus: % s(w) = [f(w);g(w)] if nargin==0 test_this(); return; end if ~exist('eqlen','var') eqlen = false; end if isempty(w) y = @(w)stack(w,f,g,eqlen); return; end if isa(w,'function_handle') outer = stack([],f,g,eqlen); y = compose_mv(outer,w,[]); return; end % if ~isa(f,'function_handle') % f = const_mv2df([],f); % end % if ~isa(g,'function_handle') % g = const_mv2df([],g); % end w = w(:); if nargout==1 y1 = f(w); y2 = g(w); n1 = length(y1); n2 = length(y2); if eqlen, assert(n1==n2,'length(f(w))=%i must equal length(g(w))=%i.',n1,n2); end y = [y1;y2]; return; end [y1,deriv1] = f(w); [y2,deriv2] = g(w); y = [y1;y2]; n1 = length(y1); n2 = length(y2); if eqlen, assert(n1==n2,'length(f(w))=%i must equal length(g(w))=%i.',n1,n2); end deriv = @(g2) deriv_this(g2,deriv1,deriv2,n1); function [g,hess,linear] = deriv_this(y,deriv1,deriv2,n1) if nargout==1 g1 = deriv1(y(1:n1)); g2 = deriv2(y(n1+1:end)); g = g1 + g2; return; end [g1,hess1,lin1] = deriv1(y(1:n1)); [g2,hess2,lin2] = deriv2(y(n1+1:end)); g = g1+g2; linear = lin1 && lin2; hess = @(d) hess_this(d,hess1,hess2,lin1,lin2); function [h,Jv] = hess_this(d,hess1,hess2,lin1,lin2) if nargout>1 [h1,Jv1] = hess1(d); [h2,Jv2] = hess2(d); Jv = [Jv1;Jv2]; else [h1] = hess1(d); [h2] = hess2(d); end if lin1 && lin2 h = []; elseif ~lin1 && ~lin2 h = h1 + h2; elseif lin2 h = h1; else h = h2; end function test_this() fprintf('-------------- Test 1 ------------------------\n'); fprintf('Stack [f(w);g(w)]: f() is non-linear and g() is non-linear:\n'); A = randn(4,5); B = randn(5,4); w = []; f = gemm(w,4,5,4); g = gemm(subvec(w,40,1,20),2,5,2); s = stack(w,f,g); w = [A(:);B(:)]; test_MV2DF(s,w); fprintf('--------------------------------------\n\n'); fprintf('-------------- Test 2 ------------------------\n'); fprintf('Stack [f(w);g(w)]: f() is linear and g() is non-linear:\n'); A = randn(4,5); B = randn(5,4); w = [A(:);B(:)]; T = randn(40); f = @(w) linTrans(w,@(x)T*x,@(y)T.'*y); g = @(w) gemm(w,4,5,4); s = @(w) stack(w,f,g); test_MV2DF(s,w); fprintf('--------------------------------------\n\n'); fprintf('-------------- Test 3 ------------------------\n'); fprintf('Stack [f(w);g(w)]: f() is non-linear and g() is linear:\n'); A = randn(4,5); B = randn(5,4); w = [A(:);B(:)]; T = randn(40); f = @(w) linTrans(w,@(x)T*x,@(y)T.'*y); g = @(w) gemm(w,4,5,4); s = @(w) stack(w,g,f); test_MV2DF(s,w); fprintf('--------------------------------------\n\n'); fprintf('-------------- Test 4 ------------------------\n'); fprintf('Stack [f(w);g(w)]: f() is linear and g() is linear:\n'); w = randn(10,1); T1 = randn(11,10); f = @(w) linTrans(w,@(x)T1*x,@(y)T1.'*y); T2 = randn(12,10); g = @(w) linTrans(w,@(x)T2*x,@(y)T2.'*y); s = @(w) stack(w,g,f); test_MV2DF(s,w); fprintf('--------------------------------------\n\n');
github
bsxfan/meta-embeddings-master
scale_and_translate.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/scale_and_translate.m
1,341
utf_8
121b1cd2e23a3d7111f310db8e3b6a05
function [y,deriv] = scale_and_translate(w,vectors,params,m,n) % This is an MV2DF (see MV2DF_API_DEFINITION.readme) which % represents the new function, obtained by scaling and translating the % column vectors of the output matrix of the function vectors(w). The % scaling and translation parameters, params(w) is also a function of w. % % The output, y is calulated as follows: % % V = reshape(vectors(w),m,n); % [scal;offs] = params(w); where scal is scalar and offs is m-by-1 % y = bsxfun(@plus,scal*V,offs); % y = y(:); % % Usage examples: % % s = @(w) sum_of_functions(w,[1,-1],f,g) % % Here f,g are function handles to MV2DF's. if nargin==0 test_this(); return; end if isempty(w) s = stack(w,vectors,params); y = calibrateScores(s,m,n); return; end if isa(w,'function_handle') f = scale_and_translate([],vectors,params,m,n); y = compose_mv(f,w,[]); return; end f = scale_and_translate([],vectors,params,m,n); if nargout==1 y = f(w); else [y,deriv] = f(w); end function test_this() m = 5; n = 10; data = randn(m,n); offs = randn(m,1); scal = 3; w = [data(:);scal;offs]; vectors = subvec([],m*n+m+1,1,m*n); %vectors = randn(size(data)); params = subvec([],m*n+m+1,m*n+1,m+1); %params = [scal;offs]; f = scale_and_translate([],vectors,params,m,n); test_MV2DF(f,w);
github
bsxfan/meta-embeddings-master
compose_mv.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/utility_funcs/Optimization_Toolkit/MV2DF/function_combination/compose_mv.m
2,958
utf_8
108f7eb78b4ff77e907d369fc9ae14db
function [y,deriv] = compose_mv(outer,inner,x) % COMPOSE_MV is an MV2DF (see MV2DF_API_DEFINITION.readme) which represents % the combination of two functions. If 'outer' is an MV2DF for a function % g() and 'inner' for a function f(), then this MV2DF represents g(f(x)). %feature scopedaccelenablement off if nargin==0 test_this(); return; end if isempty(x) y = @(w)compose_mv(outer,inner,w); return; end if isa(x,'function_handle') fh = compose_mv(outer,inner,[]); % fh =@(x) outer(inner(x)) y = compose_mv(fh,x,[]); % y =@(w) outer(inner(x(w))) return; end % if ~isa(outer,'function_handle') % outer = const_mv2df([],outer); % end % if ~isa(inner,'function_handle') % inner = const_mv2df([],inner); % end if nargout==1 y = outer(inner(x)); return; end [y1,deriv1] = inner(x); [y,deriv2] = outer(y1); deriv = @(g3) deriv_this(deriv1,deriv2,g3); function [g,hess,linear] = deriv_this(deriv1,deriv2,g3) if nargout==1 g = deriv1(deriv2(g3)); return; end [g2,hess2,lin2] = deriv2(g3); [g,hess1,lin1] = deriv1(g2); hess =@(d) hess_this(deriv1,hess1,hess2,lin1,lin2,d); linear = lin1 && lin2; function [h,Jv] = hess_this(deriv1,hess1,hess2,lin1,lin2,d) if nargout==1 if ~lin2 [h1,Jv1] = hess1(d); h2 = hess2(Jv1); h2 = deriv1(h2); elseif ~lin1 h1 = hess1(d); end else [h1,Jv1] = hess1(d); [h2,Jv] = hess2(Jv1); if ~lin2 h2 = deriv1(h2); end end if lin1 && lin2 h=[]; elseif (~lin1) && (~lin2) h = h1+h2; elseif lin1 h = h2; else % if lin2 h = h1; end function test_this() fprintf('-------------- Test 1 ------------------------\n'); fprintf('Composition g(f(w)): f() is non-linear and g() is non-linear:\n'); A = randn(4,5); B = randn(5,4); w = [A(:);B(:)]; f = @(w) gemm(w,4,5,4); g1 = gemm(f,2,4,2); test_MV2DF(g1,w); fprintf('--------------------------------------\n\n'); fprintf('-------------- Test 2 ------------------------\n'); fprintf('Composition g(f(w)): f() is linear and g() is non-linear:\n'); A = randn(4,5); B = randn(5,4); w = [A(:);B(:)]; T = randn(40); f = @(w) linTrans(w,@(x)T*x,@(y)T.'*y); g2 = gemm(f,4,5,4); test_MV2DF(g2,w); fprintf('--------------------------------------\n\n'); fprintf('-------------- Test 3 ------------------------\n'); fprintf('Composition g(f(w)): f() is non-linear and g() is linear:\n'); A = randn(4,5); B = randn(5,4); w = [A(:);B(:)]; f = @(w) gemm(w,4,5,4); T = randn(16); g3 = linTrans(f,@(x)T*x,@(y)T.'*y); test_MV2DF(g3,w); fprintf('--------------------------------------\n\n'); fprintf('-------------- Test 4 ------------------------\n'); fprintf('Composition g(f(w)): f() is linear and g() is linear:\n'); w = randn(10,1); T1 = randn(11,10); f = @(w) linTrans(w,@(x)T1*x,@(y)T1.'*y); T2 = randn(5,11); g4 = linTrans(f,@(x)T2*x,@(y)T2.'*y); test_MV2DF(g4,w); fprintf('--------------------------------------\n\n');
github
bsxfan/meta-embeddings-master
pav_calibration.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/calibration/pav_calibration.m
2,716
utf_8
2a9298835be5d7757fb4d660a5a2d7b3
function [pav_trans,score_bounds,llr_bounds] = pav_calibration(tar,non,small_val) % Creates a calibration transformation function using the PAV algorithm. % Inputs: % tar: A vector of target scores. % non: A vector of non-target scores. % small_val: An offset to make the transformation function % invertible. small_val is subtracted from the left-hand side % of the bin and added to the right-hand side (and the bin % height is linear between its left and right ends). % Outputs: % pav_trans: The transformation function. It takes in scores and % outputs (calibrated) log-likelihood ratios. % score_bounds: The left and right ends of the line segments % that make up the transformation. % llr_bounds: The lower and upper ends of the line segments that % make up the transformation. if nargin==0 test_this() return else assert(nargin==3) assert(size(tar,1)==1) assert(size(non,1)==1) assert(length(tar)>0) assert(length(non)>0) end largeval = 1e6; scores = [-largeval tar non largeval]; Pideal = [ones(1,length(tar)+1),zeros(1,length(non)+1)]; [scores,perturb] = sort(scores); Pideal = Pideal(perturb); [Popt,width,height] = pavx(Pideal); data_prior = (length(tar)+1)/length(Pideal); llr = logit(Popt) - logit(data_prior); bnd_ndx = make_bnd_ndx(width); score_bounds = scores(bnd_ndx); llr_bounds = llr(bnd_ndx); llr_bounds(1:2:end) = llr_bounds(1:2:end) - small_val; llr_bounds(2:2:end) = llr_bounds(2:2:end) + small_val; pav_trans = @(s) pav_transform(s,score_bounds,llr_bounds); end function scr_out = pav_transform(scr_in,score_bounds,llr_bounds) scr_out = zeros(1,length(scr_in)); for ii=1:length(scr_in) x = scr_in(ii); [x1,x2,v1,v2] = get_line_segment_vals(x,score_bounds,llr_bounds); scr_out(ii) = (v2 - v1) * (x - x1) / (x2 - x1) + v1; end end function bnd_ndx = make_bnd_ndx(width) len = length(width)*2; c = cumsum(width); bnd_ndx = zeros(1,len); bnd_ndx(1:2:len) = [1 c(1:end-1)+1]; bnd_ndx(2:2:len) = c; end function [x1,x2,v1,v2] = get_line_segment_vals(x,score_bounds,llr_bounds) p = find(x>=score_bounds,1,'last'); x1 = score_bounds(p); x2 = score_bounds(p+1); v1 = llr_bounds(p); v2 = llr_bounds(p+1); end function test_this() ntar = 10; nnon = 12; tar = 2*randn(1,ntar)+2; non = 2*randn(1,nnon)-2; tarnon = [tar non]; scores = [-inf tarnon inf]; Pideal = [ones(1,length(tar)+1),zeros(1,length(non)+1)]; [scores,perturb] = sort(scores); Pideal = Pideal(perturb); [Popt,width,height] = pavx(Pideal); data_prior = (length(tar)+1)/length(Pideal); llr = logit(Popt) - logit(data_prior); [dummy,pinv] = sort(perturb); tmp = llr(pinv); llr = tmp(2:end-1) pav_trans = pav_calibration(tar,non,0); pav_trans(tarnon) end
github
bsxfan/meta-embeddings-master
align_with_ndx.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Scores/align_with_ndx.m
2,628
utf_8
5899b5e5bd43dea8280d84cea8fdf0ec
function aligned_scr = align_with_ndx(scr,ndx) % The ordering in the output Scores object corresponds to ndx, so % aligning several Scores objects with the same ndx will result in % them being comparable with each other. % Inputs: % scr: a Scores object % ndx: a Key or Ndx object % Outputs: % aligned_scr: scr resized to size of 'ndx' and reordered % according to the ordering of modelset and segset in 'ndx'. if nargin==1 test_this(); return end assert(nargin==2) assert(isa(scr,'Scores')) assert(isa(ndx,'Key')||isa(ndx,'Ndx')) assert(scr.validate()) assert(ndx.validate()) aligned_scr = Scores(); aligned_scr.modelset = ndx.modelset; aligned_scr.segset = ndx.segset; m = length(ndx.modelset); n = length(ndx.segset); [hasmodel,rindx] = ismember(ndx.modelset,scr.modelset); rindx = rindx(hasmodel); [hasseg,cindx] = ismember(ndx.segset,scr.segset); cindx = cindx(hasseg); aligned_scr.scoremat = zeros(m,n); aligned_scr.scoremat(hasmodel,hasseg) = double(scr.scoremat(rindx,cindx)); aligned_scr.scoremask = false(m,n); aligned_scr.scoremask(hasmodel,hasseg) = scr.scoremask(rindx,cindx); assert(sum(aligned_scr.scoremask(:)) <= sum(hasmodel)*sum(hasseg)); if isa(ndx,'Ndx') aligned_scr.scoremask = aligned_scr.scoremask & ndx.trialmask; else aligned_scr.scoremask = aligned_scr.scoremask & (ndx.tar | ndx.non); end if sum(hasmodel)<m log_warning('models reduced from %i to %i\n',m,sum(hasmodel)); end if sum(hasseg)<n log_warning('testsegs reduced from %i to %i\n',n,sum(hasseg)); end if isa(ndx,'Key') %supervised tar = ndx.tar & aligned_scr.scoremask; non = ndx.non & aligned_scr.scoremask; missing = sum(ndx.tar(:)) - sum(tar(:)); if missing > 0 log_warning('%i of %i targets missing.\n',missing,sum(ndx.tar(:))); end missing = sum(ndx.non(:)) - sum(non(:)); if missing > 0 log_warning('%i of %i non-targets missing.\n',missing,sum(ndx.non(:))); end else mask = ndx.trialmask & aligned_scr.scoremask; missing = sum(ndx.trialmask(:)) - sum(mask(:)); if missing > 0 log_warning('%i of %i trials missing\n',missing,sum(ndx.trialmask(:))); end end assert(all(isfinite(aligned_scr.scoremat(aligned_scr.scoremask(:))))) assert(aligned_scr.validate()) end function test_this() key = Key(); key.modelset = {'1','2','3'}; key.segset = {'a','b','c'}; key.tar = logical(eye(3)); key.non = ~key.tar; scr = Scores(); scr.scoremat = [1 2 3; 4 5 6; 7 8 9]; scr.scoremask = true(3); scr.modelset = {'3','2','1'}; scr.segset = {'c','b','a'}; scores = scr.scoremat, scr = scr.align_with_ndx(key); scores = scr.scoremat, end
github
bsxfan/meta-embeddings-master
filter.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Scores/filter.m
2,622
utf_8
ebaa2297b42e23384ffa07c12bdcc005
function outscr = filter(inscr,modlist,seglist,keep) % Removes some of the information in a Scores object. Useful for % creating a gender specific score set from a pooled gender score % set. Depending on the value of 'keep', the two input lists % indicate the models and test segments (and their associated % scores) to retain or discard. % Inputs: % inscr: A Scores object. % modlist: A cell array of strings which will be compared with % the modelset of 'inscr'. % seglist: A cell array of strings which will be compared with % the segset of 'inscr'. % keep: A boolean indicating whether modlist and seglist are the % models to keep or discard. % Outputs: % outscr: A filtered version of 'inscr'. if nargin == 0 test_this(); return end assert(nargin==4) assert(isa(inscr,'Scores')) assert(iscell(modlist)) assert(iscell(seglist)) assert(inscr.validate()) if keep keepmods = modlist; keepsegs = seglist; else keepmods = setdiff(inscr.modelset,modlist); keepsegs = setdiff(inscr.segset,seglist); end keepmodidx = ismember(inscr.modelset,keepmods); keepsegidx = ismember(inscr.segset,keepsegs); outscr = Scores(); outscr.modelset = inscr.modelset(keepmodidx); outscr.segset = inscr.segset(keepsegidx); outscr.scoremat = inscr.scoremat(keepmodidx,keepsegidx); outscr.scoremask = inscr.scoremask(keepmodidx,keepsegidx); assert(outscr.validate()) if length(inscr.modelset) > length(outscr.modelset) log_info('Number of models reduced from %d to %d.\n',length(inscr.modelset),length(outscr.modelset)); end if length(inscr.segset) > length(outscr.segset) log_info('Number of test segments reduced from %d to %d.\n',length(inscr.segset),length(outscr.segset)); end end function test_this() scr = Scores(); scr.modelset = {'aaa','bbb','ccc','ddd'}; scr.segset = {'11','22','33','44','55'}; scr.scoremat = [1,2,3,4,5;6,7,8,9,10;11,12,13,14,15;16,17,18,19,20]; scr.scoremask = true(4,5); fprintf('scr.modelset\n'); disp(scr.modelset) fprintf('scr.segset\n'); disp(scr.segset) fprintf('scr.scoremat\n'); disp(scr.scoremat) modlist = {'bbb','ddd'} seglist = {'11','55'} keep = true out = Scores.filter(scr,modlist,seglist,keep); fprintf('out.modelset\n'); disp(out.modelset) fprintf('out.segset\n'); disp(out.segset) fprintf('out.scoremat\n'); disp(out.scoremat) fprintf('out.scoremask\n'); disp(out.scoremask) keep = false out = Scores.filter(scr,modlist,seglist,keep); fprintf('out.modelset\n'); disp(out.modelset) fprintf('out.segset\n'); disp(out.segset) fprintf('out.scoremat\n'); disp(out.scoremat) fprintf('out.scoremask\n'); disp(out.scoremask) end
github
bsxfan/meta-embeddings-master
filter.m
.m
meta-embeddings-master/code/snapshot_for_anya/matlab/bosaris_toolkit/classes/@Key/filter.m
3,047
utf_8
9274e13ab0bf80ca9a90fd6f46da8ff0
function outkey = filter(inkey,modlist,seglist,keep) % Removes some of the information in a key. Useful for creating a % gender specific key from a pooled gender key. Depending on the % value of 'keep', the two input lists indicate the strings to % retain or the strings to discard. % Inputs: % inkey: A Key object. % modlist: A cell array of strings which will be compared with % the modelset of 'inkey'. % seglist: A cell array of strings which will be compared with % the segset of 'inkey'. % keep: A boolean indicating whether modlist and seglist are the % models to keep or discard. % Outputs: % outkey: A filtered version of 'inkey'. if nargin == 0 test_this(); return else assert(nargin==4) end assert(isa(inkey,'Key')) assert(iscell(modlist)) assert(iscell(seglist)) assert(inkey.validate()) if keep keepmods = modlist; keepsegs = seglist; else keepmods = setdiff(inkey.modelset,modlist); keepsegs = setdiff(inkey.segset,seglist); end keepmodidx = ismember(inkey.modelset,keepmods); keepsegidx = ismember(inkey.segset,keepsegs); outkey = Key(); outkey.modelset = inkey.modelset(keepmodidx); outkey.segset = inkey.segset(keepsegidx); outkey.tar = inkey.tar(keepmodidx,keepsegidx); outkey.non = inkey.non(keepmodidx,keepsegidx); assert(outkey.validate()) if length(inkey.modelset) > length(outkey.modelset) log_info('Number of models reduced from %d to %d.\n',length(inkey.modelset),length(outkey.modelset)); end if length(inkey.segset) > length(outkey.segset) log_info('Number of test segments reduced from %d to %d.\n',length(inkey.segset),length(outkey.segset)); end end function test_this() key = Key(); key.modelset = {'aaa','bbb','ccc','ddd'}; key.segset = {'11','22','33','44','55'}; key.tar = logical([1,0,0,1,0;0,1,0,1,1;0,0,0,1,0;1,1,0,0,0]); key.non = logical([0,1,0,0,0;1,0,0,0,0;1,1,1,0,0;0,0,1,1,1]); fprintf('key.modelset\n'); disp(key.modelset) fprintf('key.segset\n'); disp(key.segset) fprintf('key.tar\n'); disp(key.tar) fprintf('key.non\n'); disp(key.non) modlist = {'bbb','ddd'} seglist = {'11','55'} keep = true out = Key.filter(key,modlist,seglist,keep); fprintf('out.modelset\n'); disp(out.modelset) fprintf('out.segset\n'); disp(out.segset) fprintf('out.tar\n'); disp(out.tar) fprintf('out.non\n'); disp(out.non) keep = false out = Key.filter(key,modlist,seglist,keep); fprintf('out.modelset\n'); disp(out.modelset) fprintf('out.segset\n'); disp(out.segset) fprintf('out.tar\n'); disp(out.tar) fprintf('out.non\n'); disp(out.non) modlist = {'bbb','ddd','eee'} seglist = {'11','66','77','55'} keep = true out = Key.filter(key,modlist,seglist,keep); fprintf('out.modelset\n'); disp(out.modelset) fprintf('out.segset\n'); disp(out.segset) fprintf('out.tar\n'); disp(out.tar) fprintf('out.non\n'); disp(out.non) keep = false out = Key.filter(key,modlist,seglist,keep); fprintf('out.modelset\n'); disp(out.modelset) fprintf('out.segset\n'); disp(out.segset) fprintf('out.tar\n'); disp(out.tar) fprintf('out.non\n'); disp(out.non) end