plateform
stringclasses 1
value | repo_name
stringlengths 13
113
| name
stringlengths 3
74
| ext
stringclasses 1
value | path
stringlengths 12
229
| size
int64 23
843k
| source_encoding
stringclasses 9
values | md5
stringlengths 32
32
| text
stringlengths 23
843k
|
---|---|---|---|---|---|---|---|---|
github
|
akileshbadrinaaraayanan/IITH-master
|
model_train.m
|
.m
|
IITH-master/Sem6/CS5190_Soft_Computing/cs13b1042_final_code/model_train.m
| 15,023 |
utf_8
|
d917316085f0feb53840b2af50c42680
|
function model_train(varargin)
% Copyright (C) 2015 Tsung-Yu Lin, Aruni RoyChowdhury, Subhransu Maji.
% All rights reserved.
%
% This file is part of the BCNN and is made available under
% the terms of the BSD license (see the COPYING file).
[opts, imdb] = model_setup(varargin{:}) ;
% -------------------------------------------------------------------------
% Train encoders and compute codes
% -------------------------------------------------------------------------
if ~exist(opts.resultPath)
psi = {} ;
for i = 1:numel(opts.encoders)
if exist(opts.encoders{i}.codePath)
load(opts.encoders{i}.codePath, 'code', 'area') ;
else
if exist(opts.encoders{i}.path)
encoder = load(opts.encoders{i}.path) ;
if isfield(encoder, 'net')
if opts.useGpu
encoder.net = vl_simplenn_move(encoder.net, 'gpu') ;
encoder.net.useGpu = true ;
else
encoder.net = vl_simplenn_move(encoder.net, 'cpu') ;
encoder.net.useGpu = false ;
end
end
if isfield(encoder, 'neta')
if opts.useGpu
encoder.neta = vl_simplenn_move(encoder.neta, 'gpu') ;
encoder.netb = vl_simplenn_move(encoder.netb, 'gpu') ;
encoder.neta.useGpu = true ;
encoder.netb.useGpu = true ;
else
encoder.neta = vl_simplenn_move(encoder.neta, 'cpu') ;
encoder.netb = vl_simplenn_move(encoder.netb, 'cpu') ;
encoder.neta.useGpu = false ;
encoder.netb.useGpu = false ;
end
end
else
opts.encoders{i}.opts = horzcat(opts.encoders{i}.opts);
train = find(ismember(imdb.images.set, [1 2])) ;
train = vl_colsubset(train, 1000, 'uniform') ;
encoder = encoder_train_from_images(...
imdb, imdb.images.id(train), ...
opts.encoders{i}.opts{:}, ...
'useGpu', opts.useGpu) ;
encoder_save(encoder, opts.encoders{i}.path) ;
end
code = encoder_extract_for_images(encoder, imdb, imdb.images.id, 'dataAugmentation', opts.dataAugmentation) ;
savefast(opts.encoders{i}.codePath, 'code') ;
end
psi{i} = code ;
clear code ;
end
psi = cat(1, psi{:}) ;
end
% -------------------------------------------------------------------------
% Train and test
% -------------------------------------------------------------------------
if exist(opts.resultPath)
info = load(opts.resultPath) ;
else
info = traintest(opts, imdb, psi) ;
save(opts.resultPath, '-struct', 'info') ;
vl_printsize(1) ;
[a,b,c] = fileparts(opts.resultPath) ;
print('-dpdf', fullfile(a, [b '.pdf'])) ;
end
str = {} ;
str{end+1} = sprintf('data: %s', opts.expDir) ;
str{end+1} = sprintf(' setup: %10s', opts.suffix) ;
str{end+1} = sprintf(' mAP: %.1f', info.test.map*100) ;
if isfield(info.test, 'acc')
str{end+1} = sprintf(' acc: %6.1f ', info.test.acc*100);
end
if isfield(info.test, 'im_acc')
str{end+1} = sprintf(' acc wo normlization: %6.1f ', info.test.im_acc*100);
end
str{end+1} = sprintf('\n') ;
str = cat(2, str{:}) ;
fprintf('%s', str) ;
[a,b,c] = fileparts(opts.resultPath) ;
txtPath = fullfile(a, [b '.txt']) ;
f=fopen(txtPath, 'w') ;
fprintf(f, '%s', str) ;
fclose(f) ;
% -------------------------------------------------------------------------
function info = traintest(opts, imdb, psi)
% -------------------------------------------------------------------------
% Train using verification or not
verificationTask = isfield(imdb, 'pairs');
switch opts.dataAugmentation
case 'none'
ts =1 ;
case 'f2'
ts = 2;
otherwise
error('not supported data augmentation')
end
if verificationTask,
train = ismember(imdb.pairs.set, [1 2]) ;
test = ismember(imdb.pairs.set, 3) ;
else % classification task
multiLabel = (size(imdb.images.label,1) > 1) ; % e.g. PASCAL VOC cls
train = ismember(imdb.images.set, [1 2]) ;
train = repmat(train, ts, []);
train = train(:)';
test = ismember(imdb.images.set, 3) ;
test = repmat(test, ts, []);
test = test(:)';
info.classes = find(imdb.meta.inUse) ;
% Train classifiers
C = 1 ;
w = {} ;
b = {} ;
for c=1:numel(info.classes)
fprintf('\n-------------------------------------- ');
fprintf('OVA-classifier: class: %d\n', c) ;
if ~multiLabel
y = 2*(imdb.images.label == info.classes(c)) - 1 ;
else
y = imdb.images.label(c,:) ;
end
y_test = y(test(1:ts:end));
y = repmat(y, ts, []);
y = y(:)';
np = sum(y(train) > 0) ;
nn = sum(y(train) < 0) ;
n = np + nn ;
[w{c},b{c}] = vl_svmtrain(psi(:,train & y ~= 0), y(train & y ~= 0), 1/(n* C), ...
'epsilon', 0.001, 'verbose', 'biasMultiplier', 1, ...
'maxNumIterations', n * 200) ;
pred = w{c}'*psi + b{c} ;
% try cheap calibration
mp = median(pred(train & y > 0)) ;
mn = median(pred(train & y < 0)) ;
b{c} = (b{c} - mn) / (mp - mn) ;
w{c} = w{c} / (mp - mn) ;
pred = w{c}'*psi + b{c} ;
scores{c} = pred ;
pred_test = reshape(pred(test), ts, []);
pred_test = mean(pred_test, 1);
[~,~,i]= vl_pr(y(train), pred(train)) ; ap(c) = i.ap ; ap11(c) = i.ap_interp_11 ;
[~,~,i]= vl_pr(y_test, pred_test) ; tap(c) = i.ap ; tap11(c) = i.ap_interp_11 ;
[~,~,i]= vl_pr(y(train), pred(train), 'normalizeprior', 0.01) ; nap(c) = i.ap ;
[~,~,i]= vl_pr(y_test, pred_test, 'normalizeprior', 0.01) ; tnap(c) = i.ap ;
end
% Book keeping
info.w = cat(2,w{:}) ;
info.b = cat(2,b{:}) ;
info.scores = cat(1, scores{:}) ;
info.train.ap = ap ;
info.train.ap11 = ap11 ;
info.train.nap = nap ;
info.train.map = mean(ap) ;
info.train.map11 = mean(ap11) ;
info.train.mnap = mean(nap) ;
info.test.ap = tap ;
info.test.ap11 = tap11 ;
info.test.nap = tnap ;
info.test.map = mean(tap) ;
info.test.map11 = mean(tap11) ;
info.test.mnap = mean(tnap) ;
clear ap nap tap tnap scores ;
fprintf('mAP train: %.1f, test: %.1f\n', ...
mean(info.train.ap)*100, ...
mean(info.test.ap)*100);
% Compute predictions, confusion and accuracy
[~,preds] = max(info.scores,[],1) ;
info.testScores = reshape(info.scores(:,test), size(info.scores,1), ts, []);
info.testScores = reshape(mean(info.testScores, 2), size(info.testScores,1), []);
[~,pred_test] = max(info.testScores, [], 1);
[~,gts] = ismember(imdb.images.label, info.classes) ;
gts_test = gts(test(1:ts:end));
gts = repmat(gts, ts, []);
gts = gts(:)';
[info.train.confusion, info.train.acc] = compute_confusion(numel(info.classes), gts(train), preds(train)) ;
[info.test.confusion, info.test.acc] = compute_confusion(numel(info.classes), gts_test, pred_test) ;
[~, info.train.im_acc] = compute_confusion(numel(info.classes), gts(train), preds(train), ones(size(gts(train))), true) ;
[~, info.test.im_acc] = compute_confusion(numel(info.classes), gts_test, pred_test, ones(size(gts_test)), true) ;
% [info.test.confusion, info.test.acc] = compute_confusion(numel(info.classes), gts(test), preds(test)) ;
end
% -------------------------------------------------------------------------
function code = encoder_extract_for_images(encoder, imdb, imageIds, varargin)
% -------------------------------------------------------------------------
opts.batchSize = 64 ;
opts.maxNumLocalDescriptorsReturned = 500 ;
opts.concatenateCode = true;
opts.dataAugmentation = 'none';
opts = vl_argparse(opts, varargin) ;
[~,imageSel] = ismember(imageIds, imdb.images.id) ;
imageIds = unique(imdb.images.id(imageSel)) ;
n = numel(imageIds) ;
% prepare batches
n = ceil(numel(imageIds)/opts.batchSize) ;
batches = mat2cell(1:numel(imageIds), 1, [opts.batchSize * ones(1, n-1), numel(imageIds) - opts.batchSize*(n-1)]) ;
batchResults = cell(1, numel(batches)) ;
% just use as many workers as are already available
numWorkers = matlabpool('size') ;
%parfor (b = 1:numel(batches), numWorkers)
for b = numel(batches):-1:1
batchResults{b} = get_batch_results(imdb, imageIds, batches{b}, ...
encoder, opts.maxNumLocalDescriptorsReturned, opts.dataAugmentation) ;
end
switch opts.dataAugmentation
case 'none'
ts = 1;
case 'f2'
ts = 2;
otherwise
error('not supported data augmentation')
end
code = cell(1, numel(imageIds)*ts) ;
for b = 1:numel(batches)
m = numel(batches{b});
for j = 1:m
k = batches{b}(j) ;
for aa=1:ts
code{(k-1)*ts+aa} = batchResults{b}.code{(j-1)*ts+aa};
end
end
end
if opts.concatenateCode
code = cat(2, code{:}) ;
end
% code is either:
% - a cell array, each cell containing an array of local features for a
% segment
% - an array of FV descriptors, one per segment
% -------------------------------------------------------------------------
function result = get_batch_results(imdb, imageIds, batch, encoder, maxn, dataAugmentation)
% -------------------------------------------------------------------------
m = numel(batch) ;
im = cell(1, m) ;
task = getCurrentTask() ;
if ~isempty(task), tid = task.ID ; else tid = 1 ; end
switch dataAugmentation
case 'none'
tfs = [0 ; 0 ; 0 ];
case 'f2'
tfs = [...
0 0 ;
0 0 ;
0 1];
otherwise
error('not supported data augmentation')
end
ts = size(tfs,2);
im = cell(1, m*ts);
for i = 1:m
fprintf('Task: %03d: encoder: extract features: image %d of %d\n', tid, batch(i), numel(imageIds)) ;
for j=1:ts
idx = (i-1)*ts+j;
im{idx} = imread(fullfile(imdb.imageDir, imdb.images.name{imdb.images.id == imageIds(batch(i))}));
if size(im{idx}, 3) == 1, im{idx} = repmat(im{idx}, [1 1 3]); end; %grayscale image
tf = tfs(:,j) ;
if tf(3), sx = fliplr(1:size(im{idx}, 2)) ;
im{idx} = im{idx}(:,sx,:);
end
end
end
if ~isfield(encoder, 'numSpatialSubdivisions')
encoder.numSpatialSubdivisions = 1 ;
end
switch encoder.type
case 'rcnn'
code_ = get_rcnn_features(encoder.net, ...
im, ...
'regionBorder', encoder.regionBorder) ;
case 'dcnn'
gmm = [] ;
if isfield(encoder, 'covariances'), gmm = encoder ; end
code_ = get_dcnn_features(encoder.net, ...
im, ...
'encoder', gmm, ...
'numSpatialSubdivisions', encoder.numSpatialSubdivisions, ...
'maxNumLocalDescriptorsReturned', maxn) ;
case 'dsift'
gmm = [] ;
if isfield(encoder, 'covariances'), gmm = encoder ; end
code_ = get_dcnn_features([], im, ...
'useSIFT', true, ...
'encoder', gmm, ...
'numSpatialSubdivisions', encoder.numSpatialSubdivisions, ...
'maxNumLocalDescriptorsReturned', maxn) ;
case 'bcnn'
code_ = get_bcnn_features(encoder.neta, encoder.netb,...
im, ...
'regionBorder', encoder.regionBorder, ...
'normalization', encoder.normalization);
end
result.code = code_ ;
% -------------------------------------------------------------------------
function encoder = encoder_train_from_images(imdb, imageIds, varargin)
% -------------------------------------------------------------------------
opts.type = 'rcnn' ;
opts.model = '' ;
opts.modela = '';
opts.modelb = '';
opts.layer = 0 ;
opts.layera = 0 ;
opts.layerb = 0 ;
opts.useGpu = false ;
opts.regionBorder = 0.05 ;
opts.numPcaDimensions = +inf ;
opts.numSamplesPerWord = 1000 ;
opts.whitening = false ;
opts.whiteningRegul = 0 ;
opts.renormalize = false ;
opts.numWords = 64 ;
opts.numSpatialSubdivisions = 1 ;
opts.normalization = 'sqrt_L2';
opts = vl_argparse(opts, varargin) ;
encoder.type = opts.type ;
encoder.regionBorder = opts.regionBorder ;
switch opts.type
case {'dcnn', 'dsift'}
encoder.numWords = opts.numWords ;
encoder.renormalize = opts.renormalize ;
encoder.numSpatialSubdivisions = opts.numSpatialSubdivisions ;
end
switch opts.type
case {'rcnn', 'dcnn'}
encoder.net = load(opts.model) ;
encoder.net.layers = encoder.net.layers(1:opts.layer) ;
if opts.useGpu
encoder.net = vl_simplenn_move(encoder.net, 'gpu') ;
encoder.net.useGpu = true ;
else
encoder.net = vl_simplenn_move(encoder.net, 'cpu') ;
encoder.net.useGpu = false ;
end
case 'bcnn'
encoder.normalization = opts.normalization;
encoder.neta = load(opts.modela);
encoder.neta.layers = encoder.neta.layers(1:opts.layera);
encoder.netb = load(opts.modelb);
encoder.netb.layers = encoder.netb.layers(1:opts.layerb);
if opts.useGpu,
encoder.neta = vl_simplenn_move(encoder.neta, 'gpu');
encoder.netb = vl_simplenn_move(encoder.netb, 'gpu');
encoder.neta.useGpu = true;
encoder.netb.useGpu = true;
else
encoder.neta = vl_simplenn_move(encoder.neta, 'cpu');
encoder.netb = vl_simplenn_move(encoder.netb, 'cpu');
encoder.neta.useGpu = false;
encoder.netb.useGpu = false;
end
end
switch opts.type
case {'rcnn', 'bcnn'}
return ;
end
% Step 0: sample descriptors
fprintf('%s: getting local descriptors to train GMM\n', mfilename) ;
code = encoder_extract_for_images(encoder, imdb, imageIds, 'concatenateCode', false) ;
descrs = cell(1, numel(code)) ;
numImages = numel(code);
numDescrsPerImage = floor(encoder.numWords * opts.numSamplesPerWord / numImages);
for i=1:numel(code)
descrs{i} = vl_colsubset(code{i}, numDescrsPerImage) ;
end
descrs = cat(2, descrs{:}) ;
fprintf('%s: obtained %d local descriptors to train GMM\n', ...
mfilename, size(descrs,2)) ;
% Step 1 (optional): learn PCA projection
if opts.numPcaDimensions < inf || opts.whitening
fprintf('%s: learning PCA rotation/projection\n', mfilename) ;
encoder.projectionCenter = mean(descrs,2) ;
x = bsxfun(@minus, descrs, encoder.projectionCenter) ;
X = x*x' / size(x,2) ;
[V,D] = eig(X) ;
d = diag(D) ;
[d,perm] = sort(d,'descend') ;
d = d + opts.whiteningRegul * max(d) ;
m = min(opts.numPcaDimensions, size(descrs,1)) ;
V = V(:,perm) ;
if opts.whitening
encoder.projection = diag(1./sqrt(d(1:m))) * V(:,1:m)' ;
else
encoder.projection = V(:,1:m)' ;
end
clear X V D d ;
else
encoder.projection = 1 ;
encoder.projectionCenter = 0 ;
end
descrs = encoder.projection * bsxfun(@minus, descrs, encoder.projectionCenter) ;
if encoder.renormalize
descrs = bsxfun(@times, descrs, 1./max(1e-12, sqrt(sum(descrs.^2)))) ;
end
% Step 2: train GMM
v = var(descrs')' ;
[encoder.means, encoder.covariances, encoder.priors] = ...
vl_gmm(descrs, opts.numWords, 'verbose', ...
'Initialization', 'kmeans', ...
'CovarianceBound', double(max(v)*0.0001), ...
'NumRepetitions', 1) ;
|
github
|
akileshbadrinaaraayanan/IITH-master
|
bcnn_train.m
|
.m
|
IITH-master/Sem6/CS5190_Soft_Computing/cs13b1042_final_code/bcnn_train.m
| 20,624 |
utf_8
|
08aba3ab9bfbaab945389480c52b8ac7
|
function [bcnn_net, info] = bcnn_train(bcnn_net, getBatch, imdb, varargin)
% BNN_TRAIN training an asymmetric BCNN
% BCNN_TRAIN() is an example learner implementing stochastic gradient
% descent with momentum to train an asymmetric BCNN for image classification.
% It can be used with different datasets by providing a suitable
% getBatch function.
% INPUT
% bcnn_net: a bcnn networks structure
% getBatch: function to read a batch of images
% imdb: imdb structure of a dataset
% OUTPUT
% bcnn_net: an output of asymmetric bcnn network after fine-tuning
% info: log of training and validation
% An asymmetric BCNN network BCNN_NET consist of three parts:
% neta: Network A to extract features
% netb: Network B to extract features
% netc: consists of normalization layers and softmax layer to obtain the
% classification error and loss based on bcnn features combined from neta
% and netb
% Copyright (C) 2015 Tsung-Yu Lin, Aruni RoyChowdhury, Subhransu Maji.
% All rights reserved.
%
% This file is part of the BCNN and is made available under
% the terms of the BSD license (see the COPYING file).
% This function is modified from CNN_TRAIN of MatConvNet
% basic setting
opts.train = [] ;
opts.val = [] ;
opts.numEpochs = 300 ;
opts.batchSize = 256 ;
opts.useGpu = false ;
opts.learningRate = 0.0001 ;
opts.continue = false ;
opts.expDir = fullfile('data','exp') ;
opts.conserveMemory = false ;
opts.sync = true ;
opts.prefetch = false ;
opts.weightDecay = 0.0005 ;
opts.momentum = 0.3 ;
opts.errorType = 'multiclass' ;
opts.plotDiagnostics = false ;
opts.layera = 14;
opts.layerb = 14;
opts.regionBorder = 0.05;
opts.dataAugmentation = {'none', 'none', 'none'};
opts.scale = 2;
opts = vl_argparse(opts, varargin) ;
if ~exist(opts.expDir, 'dir'), mkdir(opts.expDir) ; end
if isempty(opts.train), opts.train = find(imdb.images.set==1) ; end
if isempty(opts.val), opts.val = find(imdb.images.set==2) ; end
if isnan(opts.train), opts.train = [] ; end
% -------------------------------------------------------------------------
% Network initialization
% -------------------------------------------------------------------------
% set hyperparameters of network A
neta = bcnn_net.neta;
for i=1:numel(neta.layers)
if ~strcmp(neta.layers{i}.type,'conv'), continue; end
neta.layers{i}.filtersMomentum = zeros(size(neta.layers{i}.filters), ...
class(neta.layers{i}.filters)) ;
neta.layers{i}.biasesMomentum = zeros(size(neta.layers{i}.biases), ...
class(neta.layers{i}.biases)) ;
if ~isfield(neta.layers{i}, 'filtersLearningRate')
neta.layers{i}.filtersLearningRate = 1 ;
end
if ~isfield(neta.layers{i}, 'biasesLearningRate')
neta.layers{i}.biasesLearningRate = 1 ;
end
if ~isfield(neta.layers{i}, 'filtersWeightDecay')
neta.layers{i}.filtersWeightDecay = 1 ;
end
if ~isfield(neta.layers{i}, 'biasesWeightDecay')
neta.layers{i}.biasesWeightDecay = 1 ;
end
end
% set hyperparameters of network A
netb = bcnn_net.netb;
for i=1:numel(netb.layers)
if ~strcmp(netb.layers{i}.type,'conv'), continue; end
netb.layers{i}.filtersMomentum = zeros(size(netb.layers{i}.filters), ...
class(netb.layers{i}.filters)) ;
netb.layers{i}.biasesMomentum = zeros(size(netb.layers{i}.biases), ...
class(netb.layers{i}.biases)) ;
if ~isfield(netb.layers{i}, 'filtersLearningRate')
netb.layers{i}.filtersLearningRate = 1 ;
end
if ~isfield(netb.layers{i}, 'biasesLearningRate')
netb.layers{i}.biasesLearningRate = 1 ;
end
if ~isfield(netb.layers{i}, 'filtersWeightDecay')
netb.layers{i}.filtersWeightDecay = 1 ;
end
if ~isfield(netb.layers{i}, 'biasesWeightDecay')
netb.layers{i}.biasesWeightDecay = 1 ;
end
end
% set hyperparameters of network A
netc = bcnn_net.netc;
for i=1:numel(netc.layers)
if ~strcmp(netc.layers{i}.type,'conv'), continue; end
netc.layers{i}.filtersMomentum = zeros(size(netc.layers{i}.filters), ...
class(netc.layers{i}.filters)) ;
netc.layers{i}.biasesMomentum = zeros(size(netc.layers{i}.biases), ...
class(netc.layers{i}.biases)) ;
if ~isfield(netc.layers{i}, 'filtersLearningRate')
netc.layers{i}.filtersLearningRate = 1 ;
end
if ~isfield(netc.layers{i}, 'biasesLearningRate')
netc.layers{i}.biasesLearningRate = 1 ;
end
if ~isfield(netc.layers{i}, 'filtersWeightDecay')
netc.layers{i}.filtersWeightDecay = 1 ;
end
if ~isfield(netc.layers{i}, 'biasesWeightDecay')
netc.layers{i}.biasesWeightDecay = 1 ;
end
end
% -------------------------------------------------------------------------
% Move network to GPU or CPU
% -------------------------------------------------------------------------
if opts.useGpu
neta.useGpu = true;
neta = vl_simplenn_move(neta, 'gpu') ;
for i=1:numel(neta.layers)
if ~strcmp(neta.layers{i}.type,'conv'), continue; end
neta.layers{i}.filtersMomentum = gpuArray(neta.layers{i}.filtersMomentum) ;
neta.layers{i}.biasesMomentum = gpuArray(neta.layers{i}.biasesMomentum) ;
end
netb.useGpu = true;
netb = vl_simplenn_move(netb, 'gpu') ;
for i=1:numel(netb.layers)
if ~strcmp(netb.layers{i}.type,'conv'), continue; end
netb.layers{i}.filtersMomentum = gpuArray(netb.layers{i}.filtersMomentum) ;
netb.layers{i}.biasesMomentum = gpuArray(netb.layers{i}.biasesMomentum) ;
end
netc.useGpu = true;
netc = vl_simplenn_move(netc, 'gpu') ;
for i=1:numel(netc.layers)
if ~strcmp(netc.layers{i}.type,'conv'), continue; end
netc.layers{i}.filtersMomentum = gpuArray(netc.layers{i}.filtersMomentum) ;
netc.layers{i}.biasesMomentum = gpuArray(netc.layers{i}.biasesMomentum) ;
end
else
neta.useGpu = false;
neta = vl_simplenn_move(neta, 'cpu') ;
for i=1:numel(neta.layers)
if ~strcmp(neta.layers{i}.type,'conv'), continue; end
neta.layers{i}.filtersMomentum = gather(neta.layers{i}.filtersMomentum) ;
neta.layers{i}.biasesMomentum = gather(neta.layers{i}.biasesMomentum) ;
end
netb.useGpu = false;
netb = vl_simplenn_move(netb, 'cpu') ;
for i=1:numel(netb.layers)
if ~strcmp(netb.layers{i}.type,'conv'), continue; end
netb.layers{i}.filtersMomentum = gather(netb.layers{i}.filtersMomentum) ;
netb.layers{i}.biasesMomentum = gather(netb.layers{i}.biasesMomentum) ;
end
netc.useGpu = false;
netc = vl_simplenn_move(netc, 'cpu') ;
for i=1:numel(netc.layers)
if ~strcmp(netc.layers{i}.type,'conv'), continue; end
netc.layers{i}.filtersMomentum = gather(netc.layers{i}.filtersMomentum) ;
netc.layers{i}.biasesMomentum = gather(netc.layers{i}.biasesMomentum) ;
end
end
% -------------------------------------------------------------------------
% Train and validate
% -------------------------------------------------------------------------
if opts.useGpu
one = gpuArray(single(1)) ;
else
one = single(1) ;
end
info.train.objective = [] ;
info.train.error = [] ;
info.train.topFiveError = [] ;
info.train.speed = [] ;
info.val.objective = [] ;
info.val.error = [] ;
info.val.topFiveError = [] ;
info.val.speed = [] ;
lr = 0 ;
resa = [] ;
resb = [] ;
resc = [] ;
for epoch=1:opts.numEpochs
prevLr = lr ;
lr = opts.learningRate(min(epoch, numel(opts.learningRate))) ;
% fast-forward to where we stopped
modelPath = @(ep) fullfile(opts.expDir, sprintf('net-epoch-%d.mat', ep));
modelFigPath = fullfile(opts.expDir, 'net-train.pdf') ;
if opts.continue
if exist(modelPath(epoch),'file'), continue ; end
if epoch > 1
fprintf('resuming by loading epoch %d\n', epoch-1) ;
load(modelPath(epoch-1), 'neta', 'netb', 'netc', 'info') ;
end
end
train = opts.train(randperm(numel(opts.train))) ;
val = opts.val ;
info.train.objective(end+1) = 0 ;
info.train.error(end+1) = 0 ;
info.train.topFiveError(end+1) = 0 ;
info.train.speed(end+1) = 0 ;
info.val.objective(end+1) = 0 ;
info.val.error(end+1) = 0 ;
info.val.topFiveError(end+1) = 0 ;
info.val.speed(end+1) = 0 ;
% reset momentum if needed
if prevLr ~= lr
fprintf('learning rate changed (%f --> %f): resetting momentum\n', prevLr, lr) ;
for l=1:numel(neta.layers)
if ~strcmp(neta.layers{l}.type, 'conv'), continue ; end
neta.layers{l}.filtersMomentum = 0 * neta.layers{l}.filtersMomentum ;
neta.layers{l}.biasesMomentum = 0 * neta.layers{l}.biasesMomentum ;
end
for l=1:numel(netb.layers)
if ~strcmp(netb.layers{l}.type, 'conv'), continue ; end
netb.layers{l}.filtersMomentum = 0 * netb.layers{l}.filtersMomentum ;
netb.layers{l}.biasesMomentum = 0 * netb.layers{l}.biasesMomentum ;
end
for l=1:numel(netc.layers)
if ~strcmp(netc.layers{l}.type, 'conv'), continue ; end
netc.layers{l}.filtersMomentum = 0 * netc.layers{l}.filtersMomentum ;
netc.layers{l}.biasesMomentum = 0 * netc.layers{l}.biasesMomentum ;
end
end
for t=1:opts.batchSize:numel(train)
% get next image batch and labels
batch = train(t:min(t+opts.batchSize-1, numel(train))) ;
batch_time = tic ;
fprintf('training: epoch %02d: processing batch %3d of %3d ...', epoch, ...
fix((t-1)/opts.batchSize)+1, ceil(numel(train)/opts.batchSize)) ;
[im, labels] = getBatch(imdb, batch, opts.dataAugmentation{1}, opts.scale) ;
if opts.prefetch
nextBatch = train(t+opts.batchSize:min(t+2*opts.batchSize-1, numel(train)), opts.scale) ;
getBatch(imdb, nextBatch) ;
end
if(exist('dA', 'var'))
clear dA dB dEdpsi psi_n ima imb resa resb
wait(gpuDevice);
resc = [];
end
% do forward passes on neta and netb to get bilinear CNN features
[psi, resa, resb] = bcnn_asym_forward(neta, netb, im, ...
'regionBorder', opts.regionBorder, ...
'normalization', 'none', 'networkconservmemory', false);
if opts.useGpu
A = gather(resa(end).x);
B = gather(resb(end).x);
else
A = resa(end).x;
B = resb(end).x;
end
psi = cat(2, psi{:});
psi = reshape(psi, [1,1,size(psi,1),size(psi,2)]);
if opts.useGpu
psi = gpuArray(psi) ;
end
netc.layers{end}.class = labels ;
% do forward and backward passes on netc after bilinear pool
resc = vl_bilinearnn(netc, psi, 1, resc, ...
'conserveMemory', false, ...
'sync', opts.sync) ;
% compute the derivative with respected to the outputs of network A and network B
dEdpsi = reshape(squeeze(resc(1).dzdx), size(A,3), size(B,3), size(A,4));
[dA, dB] = arrayfun(@(x) compute_deriv_resp_AB(dEdpsi(:,:,x), A(:,:,:,x), B(:,:,:,x), opts.useGpu), 1:size(dEdpsi, 3), 'UniformOutput', false);
dA = cat(4, dA{:});
dB = cat(4, dB{:});
% backprop through network A
resa = vl_bilinearnn(neta, [], dA, resa, ...
'conserveMemory', opts.conserveMemory, ...
'sync', opts.sync, 'doforward', false) ;
% backprop through network B
resb = vl_bilinearnn(netb, [], dB, resb, ...
'conserveMemory', opts.conserveMemory, ...
'sync', opts.sync, 'doforward', false) ;
% gradient step on network A
for l=1:numel(neta.layers)
if ~strcmp(neta.layers{l}.type, 'conv'), continue ; end
neta.layers{l}.filtersMomentum = ...
opts.momentum * neta.layers{l}.filtersMomentum ...
- (lr * neta.layers{l}.filtersLearningRate) * ...
(opts.weightDecay * neta.layers{l}.filtersWeightDecay) * neta.layers{l}.filters ...
- (lr * neta.layers{l}.filtersLearningRate) / numel(batch) * resa(l).dzdw{1} ;
neta.layers{l}.biasesMomentum = ...
opts.momentum * neta.layers{l}.biasesMomentum ...
- (lr * neta.layers{l}.biasesLearningRate) * ....
(opts.weightDecay * neta.layers{l}.biasesWeightDecay) * neta.layers{l}.biases ...
- (lr * neta.layers{l}.biasesLearningRate) / numel(batch) * resa(l).dzdw{2} ;
neta.layers{l}.filters = neta.layers{l}.filters + neta.layers{l}.filtersMomentum ;
neta.layers{l}.biases = neta.layers{l}.biases + neta.layers{l}.biasesMomentum ;
end
% gradient step on network B
for l=1:numel(netb.layers)
if ~strcmp(netb.layers{l}.type, 'conv'), continue ; end
netb.layers{l}.filtersMomentum = ...
opts.momentum * netb.layers{l}.filtersMomentum ...
- (lr * netb.layers{l}.filtersLearningRate) * ...
(opts.weightDecay * netb.layers{l}.filtersWeightDecay) * netb.layers{l}.filters ...
- (lr * netb.layers{l}.filtersLearningRate) / numel(batch) * resb(l).dzdw{1} ;
netb.layers{l}.biasesMomentum = ...
opts.momentum * netb.layers{l}.biasesMomentum ...
- (lr * netb.layers{l}.biasesLearningRate) * ....
(opts.weightDecay * netb.layers{l}.biasesWeightDecay) * netb.layers{l}.biases ...
- (lr * netb.layers{l}.biasesLearningRate) / numel(batch) * resb(l).dzdw{2} ;
netb.layers{l}.filters = netb.layers{l}.filters + netb.layers{l}.filtersMomentum ;
netb.layers{l}.biases = netb.layers{l}.biases + netb.layers{l}.biasesMomentum ;
end
% gradient step on network C
for l=1:numel(netc.layers)
if ~strcmp(netc.layers{l}.type, 'conv'), continue ; end
netc.layers{l}.filtersMomentum = ...
opts.momentum * netc.layers{l}.filtersMomentum ...
- (lr * netc.layers{l}.filtersLearningRate) * ...
(opts.weightDecay * netc.layers{l}.filtersWeightDecay) * netc.layers{l}.filters ...
- (lr * netc.layers{l}.filtersLearningRate) / numel(batch) * resc(l).dzdw{1} ;
netc.layers{l}.biasesMomentum = ...
opts.momentum * netc.layers{l}.biasesMomentum ...
- (lr * netc.layers{l}.biasesLearningRate) * ....
(opts.weightDecay * netc.layers{l}.biasesWeightDecay) * netc.layers{l}.biases ...
- (lr * netc.layers{l}.biasesLearningRate) / numel(batch) * resc(l).dzdw{2} ;
netc.layers{l}.filters = netc.layers{l}.filters + netc.layers{l}.filtersMomentum ;
netc.layers{l}.biases = netc.layers{l}.biases + netc.layers{l}.biasesMomentum ;
end
% print information
batch_time = toc(batch_time) ;
speed = numel(batch)/batch_time ;
info.train = updateError(opts, info.train, netc, resc, batch_time) ;
fprintf(' %.2f s (%.1f images/s)', batch_time, speed) ;
n = t + numel(batch) - 1 ;
fprintf(' err %.1f err5 %.1f', ...
info.train.error(end)/n*100, info.train.topFiveError(end)/n*100) ;
fprintf('\n') ;
% debug info
if opts.plotDiagnostics
figure(2) ; vl_simplenn_diagnose(net,res) ; drawnow ;
end
end % next batch
% evaluation on validation set
for t=1:opts.batchSize:numel(val)
batch_time = tic ;
batch = val(t:min(t+opts.batchSize-1, numel(val))) ;
fprintf('validation: epoch %02d: processing batch %3d of %3d ...', epoch, ...
fix((t-1)/opts.batchSize)+1, ceil(numel(val)/opts.batchSize)) ;
[im, labels] = getBatch(imdb, batch, opts.dataAugmentation{2}, opts.scale) ;
if opts.prefetch
nextBatch = train(t+opts.batchSize:min(t+2*opts.batchSize-1, numel(train)), opts.scale) ;
getBatch(imdb, nextBatch, opts.dataAugmentation{2}, opts.scale) ;
end
if(exist('psi_n', 'var'))
clear psi_n ima imb resa resb resc
wait(gpuDevice);
resc = [];
end
% do forward pass on neta and netb to get bilinear CNN features
[psi, ~, ~] = bcnn_asym_forward(neta, netb, im, ...
'regionBorder', opts.regionBorder, 'normalization', 'none');
psi = cat(2, psi{:});
psi = reshape(psi, [1,1,size(psi,1),size(psi,2)]);
if opts.useGpu
psi = gpuArray(psi) ;
end
netc.layers{end}.class = labels ;
% do forward pass on netc after bilinear pool
resc = vl_bilinearnn(netc, psi, [], resc, ...
'conserveMemory', opts.conserveMemory, ...
'sync', opts.sync) ;
% print information
batch_time = toc(batch_time) ;
speed = numel(batch)/batch_time ;
info.val = updateError(opts, info.val, netc, resc, batch_time) ;
fprintf(' %.2f s (%.1f images/s)', batch_time, speed) ;
n = t + numel(batch) - 1 ;
fprintf(' err %.1f err5 %.1f', ...
info.val.error(end)/n*100, info.val.topFiveError(end)/n*100) ;
fprintf('\n') ;
end
% save
info.train.objective(end) = info.train.objective(end) / numel(train) ;
info.train.error(end) = info.train.error(end) / numel(train) ;
info.train.topFiveError(end) = info.train.topFiveError(end) / numel(train) ;
info.train.speed(end) = numel(train) / info.train.speed(end) ;
info.val.objective(end) = info.val.objective(end) / numel(val) ;
info.val.error(end) = info.val.error(end) / numel(val) ;
info.val.topFiveError(end) = info.val.topFiveError(end) / numel(val) ;
info.val.speed(end) = numel(val) / info.val.speed(end) ;
save(modelPath(epoch), 'neta', 'netb', 'netc', 'info', '-v7.3') ;
figure(1) ; clf ;
subplot(1,2,1) ;
semilogy(1:epoch, info.train.objective, 'k') ; hold on ;
semilogy(1:epoch, info.val.objective, 'b') ;
xlabel('training epoch') ; ylabel('energy') ;
grid on ;
h=legend('train', 'val') ;
set(h,'color','none');
title('objective') ;
subplot(1,2,2) ;
switch opts.errorType
case 'multiclass'
plot(1:epoch, info.train.error, 'k') ; hold on ;
plot(1:epoch, info.train.topFiveError, 'k--') ;
plot(1:epoch, info.val.error, 'b') ;
plot(1:epoch, info.val.topFiveError, 'b--') ;
h=legend('train','train-5','val','val-5') ;
case 'binary'
plot(1:epoch, info.train.error, 'k') ; hold on ;
plot(1:epoch, info.val.error, 'b') ;
h=legend('train','val') ;
end
grid on ;
xlabel('training epoch') ; ylabel('error') ;
set(h,'color','none') ;
title('error') ;
drawnow ;
print(1, modelFigPath, '-dpdf') ;
end
bcnn_net.neta = neta;
bcnn_net.netb = netb;
bcnn_net.netc = netc;
% -------------------------------------------------------------------------
function info = updateError(opts, info, net, res, speed)
% -------------------------------------------------------------------------
predictions = gather(res(end-1).x) ;
sz = size(predictions) ;
n = prod(sz(1:2)) ;
labels = net.layers{end}.class ;
info.objective(end) = info.objective(end) + sum(double(gather(res(end).x))) ;
info.speed(end) = info.speed(end) + speed ;
switch opts.errorType
case 'multiclass'
[~,predictions] = sort(predictions, 3, 'descend') ;
error = ~bsxfun(@eq, predictions, reshape(labels, 1, 1, 1, [])) ;
info.error(end) = info.error(end) +....
sum(sum(sum(error(:,:,1,:))))/n ;
info.topFiveError(end) = info.topFiveError(end) + ...
sum(sum(sum(min(error(:,:,1:5,:),[],3))))/n ;
case 'binary'
error = bsxfun(@times, predictions, labels) < 0 ;
info.error(end) = info.error(end) + sum(error(:))/n ;
end
function Ar = array_resize(A, w, h)
numChannels = size(A, 3);
indw = round(linspace(1,size(A,2),w));
indh = round(linspace(1,size(A,1),h));
Ar = zeros(w*h, numChannels, 'single');
for i = 1:numChannels,
Ai = A(indh,indw,i);
Ar(:,i) = Ai(:);
end
function [dA, dB] = compute_deriv_resp_AB(dEdpsi, A, B, useGpu)
w1 = size(A,2) ;
h1 = size(A,1) ;
w2 = size(B,2) ;
h2 = size(B,1) ;
%% make sure A and B have same aspect ratio
assert(w1/h1==w2/h2, 'Only support two feature maps have same aspect ration')
if w1*h1 <= w2*h2,
%downsample B
B = array_resize(B, w1, h1);
A = reshape(A, [w1*h1 size(A,3)]);
else
%downsample A
A = array_resize(A, w2, h2);
B = reshape(B, [w2*h2 size(B,3)]);
end
dA = B*dEdpsi';
dB = A*dEdpsi;
if w1*h1 <= w2*h2,
%B is downsampled, upsample dB back to original size
dB = reshape(dB, h1, w1, size(dB,2));
tempdB = dB;
if(useGpu)
dB = gpuArray(zeros(h2, w2, size(B,2), 'single'));
else
dB = zeros(h2, w2, size(B,2), 'single');
end
indw = round(linspace(1,w2,w1));
indh = round(linspace(1,h2,h1));
dB(indh, indw, :) = tempdB;
dA = reshape(dA, h1, w1, size(dA,2));
else
%A is downsampled, upsample dA back to original size
dA = reshape(dA, h2, h2, size(dA,2));
tempdA = dA;
if(useGpu)
dA = gpuArray(zeros(h1, w1, size(A,2), 'single'));
else
dA = zeros(h1, w1, size(A,2), 'single');
end
indw = round(linspace(1,w1,w2));
indh = round(linspace(1,h1,h2));
dA(indh, indw, :) = tempdA;
dB = reshape(dB, h2, w2, size(dB, 2));
end
|
github
|
akileshbadrinaaraayanan/IITH-master
|
imdb_get_batch_bcnn.m
|
.m
|
IITH-master/Sem6/CS5190_Soft_Computing/cs13b1042_final_code/imdb_get_batch_bcnn.m
| 3,947 |
utf_8
|
66ff062002437a8ac04ba2fd4e8f8468
|
function imo = imdb_get_batch_bcnn(images, varargin)
% imdb_get_batch_bcnn Load, preprocess, and pack images for BCNN evaluation
% For asymmetric model, the function preprocesses the images twice for two networks
% separately.
% OUTPUT
% imo: a cell array where each element is a cell array of images.
% For symmetric bcnn model, numel(imo) will be 1 and imo{1} will be a
% cell array of images
% For asymmetric bcnn, numel(imo) will be 2. imo{1} is a cell array containing the preprocessed images for network A
% Similarly, imo{2} is a cell array containing the preprocessed images for network B
%
% Copyright (C) 2015 Tsung-Yu Lin, Aruni RoyChowdhury, Subhransu Maji.
% All rights reserved.
%
% This file is part of the BCNN and is made available under
% the terms of the BSD license (see the COPYING file).
%
% This file modified from CNN_IMAGENET_GET_BATCH of MatConvNet
for i=1:numel(varargin{1})
opts(i).imageSize = [227, 227] ;
opts(i).border = [0, 0] ;
opts(i).averageImage = [] ;
opts(i).augmentation = 'none' ;
opts(i).interpolation = 'bilinear' ;
opts(i).numAugments = 1 ;
opts(i).numThreads = 0 ;
opts(i).prefetch = false ;
opts(i).keepAspect = false;
opts(i).scale = 1;
opts(i) = vl_argparse(opts(i), {varargin{1}(i),varargin{2:end}});
%opts(i) = vl_argparse(opts(i), varargin(2:end));
if(i==1)
switch opts(i).augmentation
case 'none'
tfs = [.5 ; .5 ; 0 ];
case 'f2'
tfs = [...
0.5 0.5 ;
0.5 0.5 ;
0 1];
end
[~,augmentations] = sort(rand(size(tfs,2), numel(images)), 1) ;
end
imo{i} = get_batch_fun(images, opts(i), augmentations, tfs);
end
function imo = get_batch_fun(images, opts, augmentations, tfs)
opts.imageSize(1:2) = round(opts.imageSize(1:2).*opts.scale);
if(opts.scale ~= 1)
opts.averageImage = mean(mean(opts.averageImage, 1),2);
end
% fetch is true if images is a list of filenames (instead of
% a cell array of images)
% fetch = numel(images) > 1 && ischar(images{1}) ;
fetch = ischar(images{1}) ;
% prefetch is used to load images in a separate thread
prefetch = fetch & opts.prefetch ;
im = cell(1, numel(images)) ;
if opts.numThreads > 0
if prefetch
vl_imreadjpeg(images, 'numThreads', opts.numThreads, 'prefetch') ;
imo = [] ;
return ;
end
if fetch
im = vl_imreadjpeg(images,'numThreads', opts.numThreads) ;
end
end
if ~fetch
im = images ;
end
imo = cell(1, numel(images)*opts.numAugments) ;
si=1;
for i=1:numel(images)
% acquire image
if isempty(im{i})
imt = imread(images{i}) ;
imt = single(imt) ; % faster than im2single (and multiplies by 255)
else
imt = im{i} ;
end
if size(imt,3) == 1
imt = cat(3, imt, imt, imt) ;
end
% resize
if opts.keepAspect
w = size(imt,2) ;
h = size(imt,1) ;
factor = [(opts.imageSize(1)+opts.border(1))/h ...
(opts.imageSize(2)+opts.border(2))/w];
factor = max(factor) ;
if any(abs(factor - 1) > 0.0001)
imt = imresize(imt, ...
'scale', factor, ...
'method', opts.interpolation) ;
end
w = size(imt,2) ;
h = size(imt,1) ;
imt = imcrop(imt, [fix((w-opts.imageSize(1))/2)+1, fix((h-opts.imageSize(2))/2)+1, opts.imageSize(1)-1, opts.imageSize(2)-1]);
else
imt = imresize(imt, ...
opts.imageSize(1:2), ...
'method', opts.interpolation) ;
end
% crop & flip
w = size(imt,2) ;
h = size(imt,1) ;
for ai = 1:opts.numAugments
t = augmentations(ai,i) ;
tf = tfs(:,t) ;
sx = 1:w;
if tf(3), sx = fliplr(sx) ; end
imo{si} = imt(:,sx,:);
si = si + 1 ;
end
end
if ~isempty(opts.averageImage)
for i=1:numel(imo)
imo{i} = bsxfun(@minus, imo{i}, opts.averageImage) ;
end
end
|
github
|
econdaryl/GSSA-master
|
GSSA.m
|
.m
|
GSSA-master/MATLAB/GSSA MATLAB Code in Progress/GSSA.m
| 12,736 |
utf_8
|
ccdceb3ab22c08fcd51d3d23fc692ecf
|
% GSSA function
% version 1.3 written by Kerk L. Phillips 2/11/2012
% Implements a version of Judd, Mailar & Mailar's (Quatitative Economics,
% 2011) Generalized Stochastic Simulation Algorithm with jump variables.
% This function takes two inputs:
% 1) a guess for the steady state value of the endogneous state variables &
% jump variables, XYbarguess It outputs the parameters for an
% approximation of the state transition function, X(t+1) = f(X(t),Z(t)) and
% the jump variable function Y(t) = g(X(t),Z(t)).
% where X is a vector of nx endogenous state variables
% Y is a vector of ny endogenous state variables
% and Z is a vector of nz exogenous state variables
% 2) an initial guess for the parameter matrix, beta
% 3) a string specifying the model's name, modelname
% It requires the following subroutines written by by Kenneth L. Judd,
% Lilia Maliar and Serguei Maliar which are available as a zip file from
% Lilia & Setguei Mailar's webapage at:
% http://www.stanford.edu/~maliars/Files/Codes.html
% 1. "Num_Stab_Approx.m" implements the numerically stable LS and LAD
% approximation methods
% 2. "Ord_Polynomial_N.m" constructs the sets of basis functions for ordinary
% polynomials of the degrees from one to five, for
% the N-country model
% 3. "Monomials_1.m" constructs integration nodes and weights for an N-
% dimensional monomial (non-product) integration rule
% with 2N nodes
% 4. "Monomials_2.m" constructs integration nodes and weights for an N-
% dimensional monomial (non-product) integration rule
% with 2N^2+1 nodes
% 5. "GH_Quadrature.m" constructs integration nodes and weights for the
% Gauss-Hermite rules with the number of nodes in
% each dimension ranging from one to ten
% It also requires one model-specificvexternal function:
% It should be named "modelname"_dyn.m and should take as inputs:
% X(t+2), X(t+1), X(t), Y(t+1), Y(t), Z(t+1) & Z(t)
% It should output a with nx+ny elements column vector with the values of
% the model's behavioral equations (many of which will be Euler equations)
% written in such a way that the equation evaluates to zero. This
% function is used to find the steady state and in the GSSA algorithm
% itself.
% The output is:
% 1) a vector of approximation coeffcients, out.
% 2) a vector of steady state values for X & Y, XYbarout.
function [out,XYbarout] = GSSA(XYbarguess,beta,modelname)
% data values
global X1 Z
% options flags
global fittype numerSS quadtype dotrunc PF
% parameters
global nx ny nz nobs XYbar NN SigZ dyneqns trunceqns D
global J epsi_nodes weight_nodes
% create the name of the dynamic behavioral equations function
dyneqns = str2func([modelname '_dyn']);
trunceqns = str2func([modelname '_trunc']);
% set numerical parameters (I include this in the model script)
% nx = 1; %number of endogenous state variables
% ny = 0; %number of jump variables
% nz = 1; %number of exogenous state variables
%numerSS = 1; %set to 1 if XYbargues is a guess, 0 if it is exact SS.
nobs = 1000; % number of observations in the simulation sample
T = nobs;
kdamp = 0.05; % Damping parameter for (fixed-point) iteration on
% the coefficients of the capital policy functions
maxwhile = 1000; % maximimum iterations for approximations
usePQ = 0; % 1 to use a linear approximation to get initial guess
% for beta
dotrunc = 0; % 1 to truncate data outside constraints, 0 otherwise
fittype = 1; % functional form for polynomial approximations
% 1) linear
% 2) log-linear
RM = 6; % regression (approximation) method, RM=1,...,8:
% 1=OLS, 2=LS-SVD, 3=LAD-PP, 4=LAD-DP,
% 5=RLS-Tikhonov, 6=RLS-TSVD, 7=RLAD-PP, 8=RLAD-DP;
penalty = 7; % a parameter determining the value of the regularization
% parameter for a regularization methods, RM=5,6,7,8;
D = 5; % order of polynomical approximation (1,2,3,4 or 5)
PF = 1; % polynomial family
% 1) ordinary
% 2) Hermite
quadtype = 4; % type of quadrature used
% 1) "Monomials_1.m"
% constructs integration nodes and weights for an N-
% dimensional monomial (non-product) integration rule
% with 2N nodes
% 2) "Monomials_2.m"
% constructs integration nodes and weights for an N-
% dimensional monomial (non-product) integration rule
% with 2N^2+1 nodes
% 3)"GH_Quadrature.m"
% constructs integration nodes and weights for the
% Gauss-Hermite rules with the number of nodes in
% each dimension ranging from one to ten
% 4)"GSSA_nodes.m"
% does rectangular arbitrage for a large number of nodes
% used only for univariate shock case.
Qn = 10; % the number of nodes in each dimension; 1<=Qn<=10
% for "GH_Quadrature.m" only
% calculate nodes and weights for quadrature
% Inputs: "nz" is the number of random variables; N>=1;
% "SigZ" is the variance-covariance matrix; N-by-N
% Outputs: "n_nodes" is the total number of integration nodes; 2*N;
% "epsi_nodes" are the integration nodes; n_nodes-by-N;
% "weight_nodes" are the integration weights; n_nodes-by-1
if quadtype == 1
[J,epsi_nodes,weight_nodes] = Monomials_1(nz,SigZ);
elseif quadtype == 2
[J,epsi_nodes,weight_nodes] = Monomials_2(nz,SigZ);
elseif quadtype == 3
% Inputs: "Qn" is the number of nodes in each dimension; 1<=Qn<=10;
[J,epsi_nodes,weight_nodes] = GH_Quadrature(Qn,nz,SigZ);
elseif quadtype == 4
J = 20;
[epsi_nodes, weight_nodes] = GSSA_nodes(J,quadtype);
end
JJJ = J
% find steady state numerically; skip if you already have exact values
if numerSS == 1
options = optimset('Display','iter');
XYbar = fsolve(@GSSA_ss,XYbarguess,options)
else
XYbar = XYbarguess;
end
Xbar = XYbar(1:nx)
Ybar = XYbar(nx+1:nx+ny);
Zbar = zeros(nz,1);
% generate a random history of Z's to be used throughout
Z = zeros(nobs,nz);
eps = randn(nobs,nz)*SigZ;
for t=2:nobs
Z(t,:) = Z(t-1,:)*NN + eps(t,:);
end
if usePQ == 1
% get an initial estimate of beta by simulating about the steady state
% using Uhlig's solution method.
in = [Xbar; Xbar; Xbar; Ybar; Ybar; Zbar; Zbar];
[PP,QQ,UU] = GSSA_PQU(in);
% generate X & Y data given Z's above
Xtilde = zeros(T,nx);
X = ones(T,nx);
Ytilde = zeros(T,ny);
X(1,nx) = Xbar;
PP'
QQ'
Z(t-1,:)
Xtilde(t,:)
for t=2:T
Xtilde(t,:) = Xtilde(t-1,:)*PP' + Z(t-1,:)*QQ';
X(t,:) = Xbar.*exp(Xtilde(t,:));
end
Xtildefinal = Xtilde(T,:)*PP' + Z(T,:)*QQ';
Xfinal = Xbar.*exp(Xtildefinal);
% estimate beta using this data
Xrhs = X;
Ylhs = [X(2:T,:); Xfinal];
if fittype == 2
Xrhs = log(Xrhs);
Ylhs = log(Ylhs);
end
% add the Z series
Xrhs = [Xrhs Z];
% construct basis functions
XZbasis = Ord_Polynomial_N(Xrhs,D);
% run regressions to fit data
beta = Num_Stab_Approx(XZbasis,Ylhs,RM,penalty,1);
beta = real(beta)
end
% construct valid beta matrix from betaguess
% find size of beta guess
[rbeta, cbeta] = size(beta);
% find number of terms in basis functions
[rbasis, ~] = size(Ord_Polynomial_N(ones(1,nx+nz),D)')
if rbeta > rbasis
disp('rbeta > rbasis truncating betaguess')
beta = beta(1:rbasis,:);
[rbeta, cbeta] = size(beta);
end
if cbeta > nx+ny
disp('cbeta > cbasis truncating betaguess')
beta = beta(:,1:nx+ny);
[rbeta, cbeta] = size(beta);
end
if rbeta < rbasis
disp('rbeta < rbasis adding extra zero rows to betaguess')
beta = [beta; zeros(rbasis-rbeta,cbeta)];
[rbeta, cbeta] = size(beta);
end
if cbeta < nx+ny
disp('cbeta < cbasis adding extra zero columns to betaguess')
beta = [beta zeros(rbeta,nx+ny-cbeta)];
end
% find constants that yield theoretical SS
beta(1,:)
eye(nx)
beta(2:nx+1,1:nx)
beta(1,:) = XYbar(1,1:nx)*(eye(nx)-beta(2:nx+1,1:nx))
% start simulations at SS
X1 = XYbar(1:nx);
% set intial value of old coefficients
betaold = beta;
% begin iterations
dif_1d = 1;
icount = 0;
[icount dif_1d 1]
while dif_1d > 1e-4*kdamp;
% update interation count
icount = icount + 1;
% stop if too many iterations have passed
if icount > maxwhile
break
end
% find convex combination of old and new coefficients
beta = kdamp*beta + (1-kdamp)*betaold;
betaold = beta;
% find time series for XYap using approximate function
% initialize series
XYap = zeros(T,nx+ny);
% find SS implied by current beta
if fittype == 2
Xbar = exp(beta(1,1:nx)*(eye(nx)-beta(2:1+nx,1:nx))^(-1));
else
Xbar = beta(1,1:nx)*(eye(nx)-beta(2:1+nx,1:nx))^(-1);
end
% % use theoretical SS vslues
% Xbar = XYbar(:,1:nx);
if ny > 0
if fittype == 2
Ybar = exp(ones(1,ny)*beta(1,1+nx:nx+ny) + log(Xbar)*beta(2:1+nx,1+nx:nx+ny));
else
Ybar = ones(1,ny)*beta(1,1+nx:nx+ny) + Xbar*beta(2:1+nx,1+nx:nx+ny);
end
else
Ybar = [];
end
X1 = [Xbar Ybar];
% find Xp & Y using approximate Xp & Y functions
XYap(1,:) = GSSA_XYfunc(X1(1:nx),Z(1,:),beta);
for t=2:T
XYap(t,:) = GSSA_XYfunc(XYap(t-1,1:nx),Z(t,:),beta);
end
% generate XYex using the behavioral equations
% Judd, Mailar & Mailar call this y(t)
[XYex,~] = GSSA_genex(XYap,beta);
% % watch the data converge using plots
% for i=1:nx+ny
% figure;
% subplot(nx+ny,1,i)
% plot([XYap(:,i) XYex(:,i)])
% end
[XYap(1:10,:) XYex(1:10,:)]
[XYap(T-10:T,:) XYex(T-10:T,:)]
% find new coefficient values
% generate basis functions for Xap & Z
% get the X portion of XYap
Xap = [X1(1:nx); XYap(1:T-1,1:nx)];
if fittype == 2
Xap = log(Xap);
XYex = log(XYex);
end
% add the Z series
XZap = [Xap Z];
% construct basis functions
XZbasis = Ord_Polynomial_N(XZap,D);
% run regressions to fit data
% Inputs: "X" is a matrix of dependent variables in a regression, T-by-n,
% where n corresponds to the total number of coefficients in the
% original regression (i.e. with unnormalized data);
% "Y" is a matrix of independent variables, T-by-N;
% "RM" is the regression (approximation) method, RM=1,...,8:
% 1=OLS, 2=LS-SVD, 3=LAD-PP, 4=LAD-DP,
% 5=RLS-Tikhonov, 6=RLS-TSVD, 7=RLAD-PP, 8=RLAD-DP;
% "penalty" is a parameter determining the value of the regulari-
% zation parameter for a regularization methods, RM=5,6,7,8;
% "normalize" is the option of normalizing the data,
% 0=unnormalized data, 1=normalized data
beta = Num_Stab_Approx(XZbasis,XYex,RM,penalty,1);
beta = real(beta)
% evauate convergence criteria
if icount == 1
dif_1d = 1;
dif_beta = abs(1 - mean(mean(betaold./beta)));
else
dif_1d =abs(1 - mean(mean(XYapold./XYap)));
dif_beta =abs(1 - mean(mean(betaold./beta)));
if isnan(dif_1d)
dif_1d = 0;
disp('There were problems with NaN for the convergence metric')
end
if isinf(dif_1d)
dif_1d = 0;
disp('There were problems with inf for the convergence metric')
end
end
% replace old k values
XYapold = XYap;
% report results of iteration
[icount dif_1d dif_beta]
end
out = beta;
XYbarout = XYbar;
end
%%
function out = GSSA_ss(XYbar)
% This function finds the steady state using numerical methods
% parameters
global nx ny nz dyneqns
Xbar = XYbar(1:nx);
Ybar = XYbar(nx+1:nx+ny);
out = dyneqns([Xbar'; Xbar'; Xbar'; Ybar'; Ybar'; zeros(nz,1); zeros(nz,1)]);
end
|
github
|
econdaryl/GSSA-master
|
GSSA.m
|
.m
|
GSSA-master/MATLAB/GSSA MATLAB ver 1.2 (4 Feb 2012)/GSSA.m
| 13,295 |
utf_8
|
659817883a76880af2cfcebd95a488ed
|
% GSSA function
% version 1.1 written by Kerk L. Phillips 2/5/2012
% Implements a version of Judd, Mailar & Mailar's (Quatitative Economics,
% 2011) Generalized Stochastic Simulation Algorithm with jump variables.
% This function takes two inputs:
% 1) a guess for the steady state value of the endogneous state variables &
% jump variables, XYbarguess It outputs the parameters for an
% approximation of the state transition function, X(t+1) = f(X(t),Z(t)) and
% the jump variable function Y(t) = g(X(t),Z(t)).
% where X is a vector of nx endogenous state variables
% Y is a vector of ny endogenous state variables
% and Z is a vector of nz exogenous state variables
% 2) 3 initial guesses for the parameter matrices, AA, BB & CC
% 3) a string specifying the model's name, "modelname"
% This script requires two external functions:
% 1) One should be named "modelname"_dyn.m and should take as inputs:
% X(t+2), X(t+1), X(t), Y(t+1), Y(t), Z(t+1) & Z(t)
% It should output a with nx+ny elements column vector with the values of
% the model's behavioral equations (many of which will be Euler equations)
% written in such a way that the equation evaluates to zero. This
% function is used to find the steady state and in the GSSA algorithm
% itself.
% 2) A second function is named GSSA_fittype.m and is used to rehape the
% vector of approximate function coefficients into matrices suitable for
% simulating data. (In this version these matrices are AA, BB, and CC.)
% The output is:
% 1) a vector of approximation coeffcients.
% 2) a vector of steady state values for X & Y
% 3) average Euler equation errors over a series of Monte Carlos.
% The GSSA_fittype.m function can be used to recover their matrix
% versions.
function [out,XYbarout,errors] = GSSA(XYbarguess,AAguess,BBguess,CCguess,modelname)
% data values
global X1 Z
% options flags
global fittype regconstant regtype numerSS quadtype
% parameters
global nx ny nz nc npar betar nobs XYpars XYbar NN SigZ dyneqns
global nodes weights
global AA BB CC beta
% create the name of the dynamic behavioral equations function
dyneqns = str2func([modelname '_dyn']);
% set numerical parameters (I include this in the model script)
% nx = 1; %number of endogenous state variables
% ny = 0; %number of jump variables
% nz = 1; %number of exogenous state variables
%numerSS = 1; %set to 1 if XYbargues is a guess, 0 if it is exact SS.
nobs = 20; %number of observations in the simulation sample
ccrit = .001; %convergence criterion for approximations
conv = .25; %convexifier parameter for approximations
maxwhile = 500; %maximimum iterations for approximations
nmc = 1; %number of Monte Carlos for Euler error check
% fittype = 1; %functional form for approximations
% %0 is linear (AVOID)
% %1 is log-linear
% %2 is quadratic (AVOID)
% %3 is log-quadratic
regconstant = 0; %choose 1 to include a constant in the regression fitting
regtype = 1; %type of regression, 1 is LAD, 0 is OLS.
quadtype = 0; %type of quadrature used
%0 is rectangular
J = 20; %number of nodes for quadrature
nc = (nx+nz)*(nx+nz+1)/2;
npar = (nx*nx+nx*nz+nx+ny*nx+ny*nz+ny);
if fittype==2 || fittype==3
npar = npar + nc;
end
if regconstant == 0;
npar = npar - nx - ny;
end
npar
% calculate nodes and weights for quadrature
[nodes, weights] = GSSA_nodes(J,quadtype);
% set initial guess for coefficients
if regconstant == 1
AA = [];
else
if size(AAguess) ~= [1,nx+ny]
AA = AAguess;
else
AA = zeros(1,nx+ny);
end
end
if size(BBguess) ~= [nx+nz,nx+ny]
BB = .1*ones(nx+nz,nx+ny);
else
BB = BBguess;
end
if fittype==2 || fittype==3
if size(CCguess) ~= [nc,nx+ny]
CC = zeros(nc,nx+ny);
else
CC = CCguess;
end
else
CC = [];
end
beta = [AA; BB; CC];
[betar, betac] = size(beta);
nbeta = betar*betac
XYpars = reshape(beta,1,nbeta);
% find steady state numerically; skip if you already have exact values
if numerSS == 1
options = optimset('Display','iter');
XYbar = fsolve(@GSSA_ss,XYbarguess,options);
else
XYbar = XYbarguess;
end
% start at SS
X1 = XYbar(1:nx);
% generate a random history of Z's to be used throughout
Z = zeros(nobs,nz);
eps = randn(nobs,nz)*SigZ;
for t=2:nobs
Z(t,:) = Z(t-1,:)*NN + eps(t,:);
end
% set intial value of old coefficients
XYparsold = XYpars;
% begin iterations
converge = 1;
icount = 0;
[icount converge XYpars]
while converge > ccrit
% update interation count
icount = icount + 1;
% stop if too many iterations have passed
if icount > maxwhile
break
end
% find convex combination of old and new coefficients
XYpars = conv*XYpars + (1-conv)*XYparsold;
XYparsold = XYpars;
% find time series for XYap using approximate function
XYap = GSSA_genap();
% generate XYex using the behavioral equations
% Judd, Mailar & Mailar call this y(t)
[XYex,~] = GSSA_genex();
% find new coefficient values;
XYpars = GSSA_fit(XYex,XYap,XYparsold);
beta = reshape(XYpars,betar,nx+ny);
[AA,BB,CC] = GSSA_fittype(beta);
% evauate convergence criteria
if icount == 1
converge = 1;
else
converge = sum(sum(abs((XYap-XYapold)./XYap)),2)/(nobs*(nx+ny));
if isnan(converge)
converge = 0;
disp('There are problems with NaN for the convergence metric')
end
if isinf(converge)
converge = 0;
disp('There are problems with inf for the convergence metric')
end
end
% replace old k values
XYapold = XYap;
% report results of iteration
%[icount,converge]
[icount converge XYpars]
end
out = XYpars;
XYbarout = XYbar;
% run Monte Carlos to get average Euler errors
errors = 0;
for m=1:nmc
% create a new Z series
Z = zeros(nobs,nz);
eps = randn(nobs,nz)*SigZ;
for t=2:nobs
Z(t,:) = Z(t-1,:)*NN + eps(t,:);
end
% generate data & calcuate the errors, add this to running average
[~,temp] = GSSA_genex();
errors = errors*(m-1)/m + temp/m;
m
end
end
%%
function XY = GSSA_genap()
% This function generates approximate values of Xp using the approximation
% equations in GSSA_XYfunc.m
% data values
global Z X1
% parameters
global nx ny
T = size(Z,1);
% initialize series
XY = zeros(T,nx+ny);
% find Xp & Y using approximate Xp & Y functions
XY(1,:) = GSSA_XYfunc(X1,Z(1,:));
for t=2:T
XY(t,:) = GSSA_XYfunc(XY(t-1,1:nx),Z(t,:));
end
end
%%
function [XYex,eulerr] = GSSA_genex()
% This function generates values of Xp using the behavioral equations
% in "name"_dyn.n. This is y(t) from Judd, Mailar & Mailar.
% data values
global Z X1
% options flags
global quadtype
% parameters
global nx ny nz NN nobs dyneqns nodes weights
T = nobs;
[~,J] =size(nodes);
% initialize series
XYex = zeros(T,nx+ny);
% find Xp & Y using approximate Xp & Y functions
XY = GSSA_genap();
Xp = XY(:,1:nx);
Y = XY(:,nx+1:nx+ny);
eulert = zeros(T,1);
% find X
X = [X1; Xp(1:T-1,:)];
% find EZp & EXpp using law of motion and approximate X & Y functions
if quadtype == 0 && nz < 2
for t=1:T
EZpj = zeros(J,nz);
EXYj = zeros(J,nx+ny);
EXppj = zeros(J,nx);
if ny > 0
EYpj = zeros(J,ny);
end
EGFj = zeros(J,nx+ny);
XYexj = zeros(J,nx+ny);
% integrate over discrete intervals
for j=1:J
% find Ezp using law of motion
EZpj(j,:) = Z(t,:)*NN + nodes(j);
% find EXpp & EYp using approximate functions
EXYj(j,:) = GSSA_XYfunc(Xp(t,:),EZpj(j,:));
EXppj(j,:) = EXYj(j,1:nx);
if ny > 0
EYpj(j,:) = EXYj(j,nx+1:nx+ny);
end
% find expected G & F values using nonlinear behavioral
% equations since GSSA-dyn evaluates to zero at the fixed point
% and it needs to evaluate to one, add ones to each element.
if ny > 0
EGFj(j,:) = dyneqns([EXppj(j,:)';Xp(t,:)';X(t,:)';...
EYpj(j,:)';Y(t,:)';EZpj(j,:)';Z(t,:)]')' + 1;
else
EGFj(j,:) = dyneqns([EXppj(j,:)';Xp(t,:)';X(t,:)';...
EZpj(j,:)';Z(t,:)]')' + 1;
end
% find Judd, Mailar & Mailar's y
XYexj(j,:) = EGFj(j,:).*[Y(t,:) Xp(t,:)];
end
% sum over J
XYex(t,:) = weights*XYexj;
eulert(t,:) = weights*(EGFj-1)*ones(nx+ny,1)/(nx+ny);
end
end
eulerr = ones(1,T)*eulert/T;
end
%%
function [Xp,Y] = GSSA_XYfunc(X,Z)
% This is the approximation function that genrates Xp and Y from the
% current state. inputs and outputs are row vectors
% Currently it allows for log-linear OLS or log-linear LAD forms.
% parameters
global nx ny XYbar
global beta
% options flags
global fittype regconstant
Xbar = XYbar(1:nx);
% notation assumes data in column vectors, so individualn observations are
% row vectors.
% put appropriate linear & quadratic terms in indep list, we are removing
% the mean from the X's.
if fittype == 0 %linear
indep = [(X-Xbar) Z];
elseif fittype == 1 %log-linear
indep = [log(X./Xbar) Z];
elseif fittype == 2 %quaddratic
sqterms = GSSA_sym2vec([(X-Xbar) Z]'*[(X-Xbar) Z]);
indep = [(X-Xbar) Z sqterms];
elseif fittype == 3 %log quadratic
sqterms = GSSA_sym2vec([log(X./Xbar) Z]'*[log(X./Xbar) Z]);
indep = [log(X./Xbar) Z sqterms];
end
% add constants if needed
if regconstant == 1;
indep = [ones(1,nx+ny) indep];
end
% create dependent variable
dep = indep*beta;
% convert to Xp's & Y's
if fittype==0 || fittype==2
XYp = XYbar + dep;
elseif fittype==1 || fittype==3
XYp = XYbar.*exp(dep);
end
% separate Xp's and Y's
Xp = XYp(1:nx);
if ny > 0
Y = XYplus(nx+1:nx+ny);
else
Y = [];
end
end
%%
function out = GSSA_ss(XYbar)
% This function finds the steady state using numerical methods
% parameters
global nx ny nz dyneqns
Xbar = XYbar(1:nx);
Ybar = XYbar(nx+1:nx+ny);
out = dyneqns([Xbar'; Xbar'; Xbar'; Ybar'; Ybar'; zeros(nz,1); zeros(nz,1)]);
end
%%
function XYparout = GSSA_fit(XYex,XYap,XYparguess)
% This function fits Xpex (Judd, Mailar & Mailar's y(t+1)) to Xpap (the X
% series from the approximation equations.
% data values
global X1 Z
% paramters
global nx nc XYbar
% options flags
global fittype regtype regconstant
% data to pass to GSSA_ADcalc function
global LADY LADX
[T,~] = size(XYex);
% independent variables
% get current period values, take deviations from SS, and add Z
Xbar = XYbar(1:nx);
Xap = [X1; XYap(1:T-1,1:nx)];
if fittype==0 || fittype==2 %levels
Xap = Xap - repmat(Xbar,T,1);
elseif fittype==1 || fittype==3 %logarithms
Xap = log(Xap) - repmat(log(Xbar),T,1);
end
if regconstant == 1
LADX = [ones(T,1) Xap Z];
else
LADX = [Xap Z];
end
% calculate and concatenate squared terms if needed
if fittype==2 || fittype==3 %quadratic
sqterms = zeros(T,nc);
for t=1:T
sqterms(t,:) = GSSA_sym2vec([Xap(t,:) Z(t,:)]'*[Xap(t,:) Z(t,:)]);
end
LADX = [LADX sqterms];
end
% dependent variables
% take deviations
if fittype==0 || fittype==2 %levels
LADY = XYex - repmat(XYbar,T,1);
elseif fittype==1 || fittype==3 %logarithms
LADY = log(XYex) - repmat(log(XYbar),T,1);
end
% choose estimation method
if regtype == 1 %linear regression with LAD
options = optimset('Display','on','MaxFunEvals',1000000,...
'MaxIter',10000);
XYparout = fminsearch(@GSSA_ADcalc,XYparguess,options);
elseif regtype == 0 %linear with OLSregression
XYparout = LADY\LADX;
end
end
%%
function AD = GSSA_ADcalc(XYpars)
% Does LAD estimation
% data for AD estimation
global LADY LADX
% get number of regressors and regressands
[~,nX] = size(LADX);
[~,nY] = size(LADY);
beta = reshape(XYpars,nX,nY);
AD = sum(sum(abs(LADY - LADX*beta)),2);
end
%%
function [nodes, weights] = GSSA_nodes(J,quadtype)
% does quadrature on expectations
% parameters
global nz SigZ
% calculate nodes and weights for rectangular quadrature used for taking
% expectations of behavioral eqns. This setup uses equally spaced
% probabilities (weights)
if quadtype == 0 && nz < 2
weights = ones(1,J)/J;
cumprob = .5/J:1/J:1-.5/J;
nodes = norminv(cumprob,0,SigZ);
end
% if isGpuAvailable == 1
% nodes = gpuArry(nodes);
% end
% check if the nodes look like a cummulative normal
% expnodes = weights*nodes'
% figure;
% plot(nodes)
end
%%
function A = GSSA_sym2vec(B)
% B is a symmetric matrix
% A is a row vectorization of it's upper triangular portion
A = [];
for k = 1:size(B,1)
A = [A B(k,1:k)];
end
end
|
github
|
econdaryl/GSSA-master
|
GSSA_genex.m
|
.m
|
GSSA-master/MATLAB/GSSA MATLAB ver 1.4 (5 Nov 2012)/GSSA_genex.m
| 3,651 |
utf_8
|
e0d07d7cbae613aac65b2279399c5620
|
% GSSA package
% version 1.4 written by Kerk L. Phillips 11/5/2012
% Implements a version of Judd, Mailar & Mailar's (Quatitative Economics,
% 2011) Generalized Stochastic Simulation Algorithm with jump variables.
function [XYex,eulerr] = GSSA_genex(XY,Z,beta,J,epsi_nodes,weight_nodes,...
GSSAparams,modelparams)
global dyneqns
% This function generates values of Xp using the behavioral equations
% in "name"_dyn.n. This is y(t) from Judd, Mailar & Mailar.
%
% This function takes 8 inputs:
% 1) XY, a t-by-(nx+ny) matrix of values generated using the fitted
% functions
% 2) Z, a t-by-nz matrix of exogenous variables
% 3) beta, a maxtrix of polynomial coefficients for the fitted functions
% 4) J, the number of nodes used in the quadrature
% 5) epsi_nodes, a vector of epsilon values
% 6) weight_nodes, a vector of probability weights
% 7) GSSAparams, the vector of paramter values from the GSSA function
% 8) modelparams, a vector of model specific parameter values passed to the
% model dynamic function named dyneqns
%
% The output is:
% 1) XYex, the matrix of expected values for X and Y
% 2) eulerr, the aveage value of the Euler error over the sample
% read in GSSA parameters
nx = GSSAparams(1);
ny = GSSAparams(2);
nz = GSSAparams(3);
NN = GSSAparams(17);
X1 = GSSAparams(19);
[T,~] = size(XY);
% initialize series
XYex = zeros(T,nx+ny);
% find X, Xp & Y using approximate parts of XY
Xp = XY(:,1:nx);
X = [X1(1:nx); Xp(1:T-1,:)];
Y = XY(:,nx+1:nx+ny);
eulert = zeros(T,1);
% find EZp & EXpp using law of motion and approximate X & Y functions
for t=1:T
EZpj = zeros(J,nz);
EXYj = zeros(J,nx+ny);
EXppj = zeros(J,nx);
if ny > 0
EYpj = zeros(J,ny);
end
EGFj = zeros(J,nx+ny);
XYexj = zeros(J,nx+ny);
% integrate over discrete intervals
for j=1:J
% find Ezp using law of motion
EZpj(j,:) = Z(t,:)*NN + epsi_nodes(j);
% find EXpp & EYp using approximate functions
EXYj(j,:) = GSSA_XYfunc(Xp(t,:),EZpj(j,:),beta,GSSAparams);
EXppj(j,:) = EXYj(j,1:nx);
if ny > 0
EYpj(j,:) = EXYj(j,nx+1:nx+ny);
end
% find expected G & F values using nonlinear behavioral
% equations since GSSA-dyn evaluates to zero at the fixed point
% and it needs to evaluate to one, add ones to each element.
if ny > 0
EGFj(j,:) = dyneqns([EXppj(j,:)';Xp(t,:)';X(t,:)';...
EYpj(j,:)';Y(t,:)';EZpj(j,:)';Z(t,:)]', ...
modelparams)' + 1;
% % additive
% EGFj(j,:) = dyneqns([EXppj(j,:)';Xp(t,:)';X(t,:)';...
% EYpj(j,:)';Y(t,:)';EZpj(j,:)';Z(t,:)]')';
else
EGFj(j,:) = dyneqns([EXppj(j,:)';Xp(t,:)';X(t,:)';...
EZpj(j,:)';Z(t,:)]', modelparams)' + 1;
% % additive
% EGFj(j,:) = dyneqns([EXppj(j,:)';Xp(t,:)';X(t,:)';...
% EZpj(j,:)';Z(t,:)]')';
end
% find Judd, Mailar & Mailar's y
XYexj(j,:) = EGFj(j,:).*[Y(t,:) Xp(t,:)];
% % additive
% XYexj(j,:) = EGFj(j,:)+[Y(t,:) Xp(t,:)];
end
% sum over J
XYex(t,:) = weight_nodes'*XYexj;
eulert(t,:) = weight_nodes'*(EGFj-1)*ones(nx+ny,1)/(nx+ny);
end
eulerr = ones(1,T)*eulert/T;
end
|
github
|
econdaryl/GSSA-master
|
GSSA_nodes.m
|
.m
|
GSSA-master/MATLAB/GSSA MATLAB ver 1.4 (5 Nov 2012)/GSSA_nodes.m
| 1,095 |
utf_8
|
c559001097911876e4daa2eefbab70f6
|
% GSSA package
% version 1.4 written by Kerk L. Phillips 11/5/2012
% Implements a version of Judd, Mailar & Mailar's (Quatitative Economics,
% 2011) Generalized Stochastic Simulation Algorithm with jump variables.
function [nodes, weights] = GSSA_nodes(J,nz,SigZ)
% This function calculates nodes and weights for rectangular quadrature
% used for taking expectations of behavioral eqns. This setup uses equally
% spaced probabilities (weights). This function is only suitable for the
% case of a single exogenous shock.
%
% This function takes 3 inputs:
% 1) J, the number of nodes used in the quadrature
% 2) nz, the integer number of elements in Z
% 3) SigZ, the variance of the shocks to Z
%
% The output is:
% 1) nodes, a vector of epsilon values
% 2) weights, a vector of probability weights
if nz < 2
weights = ones(J,1)/J;
cumprob = .5/J:1/J:1-.5/J;
nodes = norminv(cumprob,0,SigZ);
else
disp('too many elements in Z')
end
% check if the nodes look like a cummulative normal
% expnodes = weights*nodes'
% figure;
% plot(nodes)
end
|
github
|
econdaryl/GSSA-master
|
GSSA.m
|
.m
|
GSSA-master/MATLAB/GSSA MATLAB ver 1.4 (5 Nov 2012)/GSSA.m
| 13,889 |
utf_8
|
d75f59a5fb83f59f079bb42862bdf5fb
|
% GSSA package
% version 1.4 written by Kerk L. Phillips 11/5/2012
% Implements a version of Judd, Mailar & Mailar's (Quatitative Economics,
% 2011) Generalized Stochastic Simulation Algorithm with jump variables.
function [beta,XYbar,eulerr] = GSSA(XYbarguess, beta, modelname,...
GSSAparams, modelparams)
global nx ny nz dyneqns trunceqns
% This function takes three inputs:
% 1) XYbarguess
% a guess for the steady state value of the endogneous state variables &
% jump variables, XYbarguess It outputs the parameters for an
% approximation of the state transition function, X(t+1) = f(X(t),Z(t))
% and the jump variable function Y(t) = g(X(t),Z(t)).
% where X is a vector of nx endogenous state variables
% Y is a vector of ny endogenous state variables
% and Z is a vector of nz exogenous state variables
% 2) beta
% an initial guess for the parameter matrix
% 3) modelname
% a string specifying the model's name
% 4) GSSAparams
% a vector of GSSA parameters
% 5) modelparams
% a vectpr of model specific parameters that is passed to the
% appropriate "modelname"_dyn and "modelname"_trunc files.
%
% The output is:
% 1) beta, a vector of approximation coeffcients.
% 2) XYbar, a vector of steady state values for X & Y.
% 3) eulerr
%
% The GSSA parameters to be set are:
% nx number of endogenous state variables
% ny number of jump variables
% nz number of exogenous state variables
% numerSS set to 1 if XYbarguess is a guess, 0 if it is exact SS.
% T number of observations in the simulation sample
% kdamp Damping parameter for (fixed-point) iteration onthe coefficients
% of the capital policy functions
% maxwhile maximimum iterations for approximations
% usePQ 1 to use a linear approximation to get initial guess for beta
% dotrunc 1 to truncate data outside constraints, 0 otherwise
% fittype functional form for polynomial approximations
% 1) linear
% 2) log-linear
% RM regression (approximation) method, RM=1,...,8:
% 1=OLS, 2=LS-SVD, 3=LAD-PP, 4=LAD-DP,
% 5=RLS-Tikhonov, 6=RLS-TSVD, 7=RLAD-PP, 8=RLAD-DP;
% penalty a parameter determining the value of the regularization
% parameter for a regularization methods, RM=5,6,7,8;
% D order of polynomical approximation (1,2,3,4 or 5)
% PF polynomial family
% 1) ordinary
% 2) Hermite
% quadtype type of quadrature used
% 1) "Monomials_1.m" - constructs integration nodes and weights
% for an N-dimensional monomial (non-product) integration rule
% with 2N nodes
% 2) "Monomials_2.m" - constructs integration nodes and weights
% for an N-dimensional monomial (non-product) integration rule
% with 2N^2+1 nodes
% 3) "GH_Quadrature.m" - constructs integration nodes and weights
% for the Gauss-Hermite rules with the number of nodes in each
% dimension ranging from one to ten
% 4) "GSSA_nodes.m" - does rectangular arbitrage for a large
% number of nodesused only for univariate shock case.
% Qn the number of nodes in each dimension; 1<=Qn<=10 for
% "GH_Quadrature.m" only
% SigZ z-by-nz matrix of variances/covariances for the
% exogenous state variables.
% X1 nx-by-1 vector of starting values
% setting this to -999 uses the steady state as starting values
%
% It requires the following subroutines written by by Kenneth L. Judd,
% Lilia Maliar and Serguei Maliar which are available as a zip file from
% Lilia & Setguei Mailar's webapage at:
% http://www.stanford.edu/~maliars/Files/Codes.html
% 1. "Num_Stab_Approx.m" implements the numerically stable LS and LAD
% approximation methods
% 2. "Ord_Polynomial_N.m" constructs the sets of basis functions for ordinary
% polynomials of the degrees from one to five, for
% the N-country model
% 3. "Monomials_1.m" constructs integration nodes and weights for an N-
% dimensional monomial (non-product) integration rule
% with 2N nodes
% 4. "Monomials_2.m" constructs integration nodes and weights for an N-
% dimensional monomial (non-product) integration rule
% with 2N^2+1 nodes
% 5. "GH_Quadrature.m" constructs integration nodes and weights for the
% Gauss-Hermite rules with the number of nodes in
% each dimension ranging from one to ten
%
% It also requires one model-specific external function:
% It should be named "modelname"_dyn.m and should take as inputs:
% X(t+2), X(t+1), X(t), Y(t+1), Y(t), Z(t+1) & Z(t)
% It should output a with nx+ny elements column vector with the values of
% the model's behavioral equations (many of which will be Euler equations)
% written in such a way that the equation evaluates to zero. This
% function is used to find the steady state and in the GSSA algorithm
% itself.
% An optional file named "modelname"_trunc.m takes the XY vector in a given
% period and truncates the variables to any upper and lower bounds set in
% that function.
% read in GSSA parameters
nx = GSSAparams(1);
ny = GSSAparams(2);
nz = GSSAparams(3);
numerSS = GSSAparams(4);
T = GSSAparams(5);
kdamp = GSSAparams(6);
maxwhile = GSSAparams(7);
usePQ = GSSAparams(8);
dotrunc = GSSAparams(9);
fittype = GSSAparams(10);
RM = GSSAparams(11);
penalty = GSSAparams(12);
D = GSSAparams(13);
PF = GSSAparams(14);
quadtype = GSSAparams(15);
Qn = GSSAparams(16);
NN = GSSAparams(17);
SigZ = GSSAparams(18);
X1 = GSSAparams(19);
% create the name of the dynamic behavioral equations function
% fundamental dynamic equations that define the model
dyneqns = str2func([modelname '_dyn']);
% restrictions on values of variables during simulation
trunceqns = str2func([modelname '_trunc']);
% calculate nodes and weights for quadrature
% Inputs: "nz" is the number of random variables; N>=1;
% "SigZ" is the variance-covariance matrix; N-by-N
% Outputs: "n_nodes" is the total number of integration nodes; 2*N;
% "epsi_nodes" are the integration nodes; n_nodes-by-N;
% "weight_nodes" are the integration weights; n_nodes-by-1
if quadtype == 1
[J,epsi_nodes,weight_nodes] = Monomials_1(nz,SigZ);
elseif quadtype == 2
[J,epsi_nodes,weight_nodes] = Monomials_2(nz,SigZ);
elseif quadtype == 3
% Inputs: "Qn" is the number of nodes in each dimension; 1<=Qn<=10;
[J,epsi_nodes,weight_nodes] = GH_Quadrature(Qn,nz,SigZ);
elseif quadtype == 4
J = 20;
[epsi_nodes, weight_nodes] = GSSA_nodes(J,nz,SigZ);
end
% find steady state numerically; skip if you already have exact values
if numerSS == 1
options = optimset('Display','iter');
XYbar = fsolve(@GSSA_ss,XYbarguess,options);
else
XYbar = XYbarguess;
end
Xbar = XYbar(1:nx);
Ybar = XYbar(nx+1:nx+ny);
Zbar = zeros(nz,1);
% generate a random history of Z's to be used throughout
Z = zeros(T,nz);
eps = randn(T,nz)*SigZ;
for t=2:T
Z(t,:) = Z(t-1,:)*NN + eps(t,:);
end
if usePQ == 1
% get an initial estimate of beta by simulating about the steady state
% using Uhlig's solution method.
in = [Xbar; Xbar; Xbar; Ybar; Ybar; Zbar; Zbar];
[PP,QQ,UU,RR,SS,VV] = GSSA_PQU(in,Zbar,GSSAparams,modelparams);
% generate X & Y data given Z's above
Xtilde = zeros(T,nx);
X = ones(T,nx);
if ny> 0
Ytilde = zeros(T,ny);
Y = ones(T,ny);
end
X(1,:) = Xbar;
if ny > 0
Y(1,:) = Ybar;
end
for t=2:T
Xtilde(t,:) = UU + Xtilde(t-1,:)*PP' + Z(t-1,:)*QQ';
X(t,:) = Xbar.*exp(Xtilde(t,:));
if ny > 0
Ytilde(t,:) = VV + Xtilde(t-1,:)*RR' + Z(t-1,:)*SS';
Y(t,:) = Ybar.*exp(Ytilde(t,:));
end
end
Xtildefinal = UU + Xtilde(T,:)*PP' + Z(T,:)*QQ';
Xfinal = Xbar.*exp(Xtildefinal);
if ny > 0
Ytildefinal = VV + Ytilde(T,:)*RR' + Z(T,:)*SS';
Yfinal = Ybar.*exp(Ytildefinal);
end
% estimate beta using this data
if ny > 0
Xrhs = [X Y];
XYlhs = [X(2:T,:) Y(2:T,:); Xfinal Yfinal];
else
Xrhs = X;
XYlhs = [X(2:T,:); Xfinal];
end
if fittype == 2
Xrhs = log(Xrhs);
XYlhs = log(XYlhs);
end
% add the Z series
Xrhs = [Xrhs Z];
% construct basis functions
XZbasis = Ord_Polynomial_N(Xrhs,D);
% run regressions to fit data
beta = Num_Stab_Approx(XZbasis,XYlhs,RM,penalty,1);
beta = real(beta);
end
% construct valid beta matrix from betaguess
% find size of beta guess
[rbeta, cbeta] = size(beta);
% find number of terms in basis functions
[rbasis, ~] = size(Ord_Polynomial_N(ones(1,nx+nz),D)');
if rbeta > rbasis
disp('rbeta > rbasis truncating betaguess')
beta = beta(1:rbasis,:);
[rbeta, cbeta] = size(beta);
end
if cbeta > nx+ny
disp('cbeta > cbasis truncating betaguess')
beta = beta(:,1:nx+ny);
[rbeta, cbeta] = size(beta);
end
if rbeta < rbasis
disp('rbeta < rbasis adding extra zero rows to betaguess')
beta = [beta; zeros(rbasis-rbeta,cbeta)];
[rbeta, cbeta] = size(beta);
end
if cbeta < nx+ny
disp('cbeta < cbasis adding extra zero columns to betaguess')
beta = [beta zeros(rbeta,nx+ny-cbeta)];
end
% find constants that yield theoretical SS
beta(1,:) = XYbar(1,1:nx)*(eye(nx)-beta(2:nx+1,1:nx));
% set starting values to SS if flag is on.
if X1 == -999
X1 = XYbar(1:nx);
end
% set intial value of old coefficients
betaold = beta;
% begin iterations
dif_1d = 1;
icount = 0;
[icount dif_1d 1]
while dif_1d > 1e-4*kdamp
% update interation count
icount = icount + 1;
% stop if too many iterations have passed
if icount > maxwhile
break
end
% find convex combination of old and new coefficients
beta = kdamp*beta + (1-kdamp)*betaold;
betaold = beta;
% find time series for XYap using approximate function
% initialize series
XYap = zeros(T,nx+ny);
% find SS implied by current beta
if fittype == 2
Xbar = exp(beta(1,1:nx)*(eye(nx)-beta(2:1+nx,1:nx))^(-1));
else
Xbar = beta(1,1:nx)*(eye(nx)-beta(2:1+nx,1:nx))^(-1);
end
% % use theoretical SS vslues
% Xbar = XYbar(:,1:nx);
if ny > 0
if fittype == 2
Ybar = exp(ones(1,ny)*beta(1,1+nx:nx+ny) + log(Xbar)*beta(2:1+nx,1+nx:nx+ny));
else
Ybar = ones(1,ny)*beta(1,1+nx:nx+ny) + Xbar*beta(2:1+nx,1+nx:nx+ny);
end
else
Ybar = [];
end
X1 = [Xbar Ybar];
% find Xp & Y using approximate Xp & Y functions
XYap(1,:) = GSSA_XYfunc(X1(1:nx),Z(1,:),beta,GSSAparams);
for t=2:T
XYap(t,:) = GSSA_XYfunc(XYap(t-1,1:nx),Z(t,:),beta,GSSAparams);
end
% generate XYex using the behavioral equations
% Judd, Mailar & Mailar call this y(t)
[XYex,~] = GSSA_genex(XYap,Z,beta,J,epsi_nodes,weight_nodes,...
GSSAparams,modelparams);
% % watch the data converge using plots
% for i=1:nx+ny
% figure;
% subplot(nx+ny,1,i)
% plot([XYap(:,i) XYex(:,i)])
% end
% [XYap(1:10,:) XYex(1:10,:)]
% [XYap(T-10:T,:) XYex(T-10:T,:)]
% find new coefficient values
% generate basis functions for Xap & Z
% get the X portion of XYap
Xap = [X1(1:nx); XYap(1:T-1,1:nx)];
if fittype == 2
Xap = log(Xap);
XYex = log(XYex);
end
% add the Z series
XZap = [Xap Z];
% construct basis functions
XZbasis = Ord_Polynomial_N(XZap,D);
% run regressions to fit data
% Inputs: "X" is a matrix of dependent variables in a regression, T-by-n,
% where n corresponds to the total number of coefficients in the
% original regression (i.e. with unnormalized data);
% "Y" is a matrix of independent variables, T-by-N;
% "RM" is the regression (approximation) method, RM=1,...,8:
% 1=OLS, 2=LS-SVD, 3=LAD-PP, 4=LAD-DP,
% 5=RLS-Tikhonov, 6=RLS-TSVD, 7=RLAD-PP, 8=RLAD-DP;
% "penalty" is a parameter determining the value of the regulari-
% zation parameter for a regularization methods, RM=5,6,7,8;
% "normalize" is the option of normalizing the data,
% 0=unnormalized data, 1=normalized data
beta = Num_Stab_Approx(XZbasis,XYex,RM,penalty,1);
beta = real(beta);
% evauate convergence criteria
if icount == 1
dif_1d = 1;
dif_beta = abs(1 - mean(mean(betaold./beta)));
else
dif_1d =abs(1 - mean(mean(XYapold./XYap)));
dif_beta =abs(1 - mean(mean(betaold./beta)));
if isnan(dif_1d)
dif_1d = 0;
disp('There were problems with NaN for the convergence metric')
end
if isinf(dif_1d)
dif_1d = 0;
disp('There were problems with inf for the convergence metric')
end
end
% replace old k values
XYapold = XYap;
% report results of iteration
[icount dif_1d dif_beta]
end
[~,eulerr] = GSSA_genex(XYap,Z,beta,J,epsi_nodes,weight_nodes,...
GSSAparams,modelparams);
end
|
github
|
econdaryl/GSSA-master
|
GSSA_ss.m
|
.m
|
GSSA-master/MATLAB/GSSA MATLAB ver 1.4 (5 Nov 2012)/GSSA_ss.m
| 759 |
utf_8
|
14e62369785d64c04a7385ac10dac2d8
|
% GSSA package
% version 1.4 written by Kerk L. Phillips 11/5/2012
% Implements a version of Judd, Mailar & Mailar's (Quatitative Economics,
% 2011) Generalized Stochastic Simulation Algorithm with jump variables.
function out = GSSA_ss(XYbar)
global nx ny nz dyneqns
% GSSA.m uses this function along with the fsolve command to find the
% steady state using numerical methods.
%
% This function takes as an input:
% XYbar, a vector of steady state values for X and Y
%
% The output is:
% out, deviations of the model characterizing equations, which should be
% zero at the steady state
Xbar = XYbar(1:nx);
Ybar = XYbar(nx+1:nx+ny);
out = dyneqns([Xbar'; Xbar'; Xbar'; Ybar'; Ybar'; ...
zeros(nz,1); zeros(nz,1)]);
end
|
github
|
econdaryl/GSSA-master
|
GSSA_PQU.m
|
.m
|
GSSA-master/MATLAB/GSSA MATLAB ver 1.4 (5 Nov 2012)/GSSA_PQU.m
| 1,810 |
utf_8
|
7a44121e3a77dec00f67532aacb8a189
|
% GSSA package
% version 1.4 written by Kerk L. Phillips 11/5/2012
% Implements a version of Judd, Mailar & Mailar's (Quatitative Economics,
% 2011) Generalized Stochastic Simulation Algorithm with jump variables.
function [PP,QQ,UU,RR,SS,VV] = GSSA_PQU(theta0, Zbar, GSSAparams,...
modelparam)
global dyneqns
% This function solves for the linear coefficient matrices of poolicy and
% jump variable functions
%
% This function takes 4 inputs:
% 1) theta0, the vector [X(t+2),X(t+1),X(t),Y(t+1),Y(t),Z(t+1),Z(t)]
% 2) Zbar, steady state value for Z
% 3) GSSAparams, the vector of paramter values from the GSSA function
% 4) modelparams, a vector of model specific parameter values passed to the
% model dynamic function named dyneqns
%
% The output is the coeffient matrices for the following approximate policy
% and jump variable functions.
% X(t+1) = UU + X(t)*PP + Z(t)*QQ
% X(t+1) = VV + X(t)*RR + Z(t)*SS
%
% It requires the following subroutines written by by Kerk L. Phillips
% incorporating code written by Harald Uhlig.
% 1) LinApp_Deriv.m - takes numerical derivatives
% 2) LinApp_Solve.m - solves for the linear coefficients
% read in GSSA parameters
nx = GSSAparams(1);
ny = GSSAparams(2);
nz = GSSAparams(3);
fittype = GSSAparams(10);
NN = GSSAparams(17);
if fittype == 2
logX = 1;
else
logX = 0;
end
modelparam
theta0
[nx,ny,nz]
logX
% rename the state about which the approximation will be made
[AA, BB, CC, DD, FF, GG, HH, JJ, KK, LL, MM, WW, TT] = ...
LinApp_Deriv(dyneqns,modelparam,theta0,nx,ny,nz,logX);
Z0 = theta0(3*nx+2*ny+1,:)-Zbar;
% find coefficients
[PP, QQ, UU, RR, SS, VV] = ...
LinApp_Solve(AA, BB, CC, DD, FF, GG, HH, JJ, KK, LL, MM, WW, TT, NN, Z0);
|
github
|
econdaryl/GSSA-master
|
GSSA_XYfunc.m
|
.m
|
GSSA-master/MATLAB/GSSA MATLAB ver 1.4 (5 Nov 2012)/GSSA_XYfunc.m
| 1,819 |
utf_8
|
524db1e17195d9b5ca0c86b72fb4b511
|
% GSSA package
% version 1.4 written by Kerk L. Phillips 11/5/2012
% Implements a version of Judd, Mailar & Mailar's (Quatitative Economics,
% 2011) Generalized Stochastic Simulation Algorithm with jump variables.
function XYp = GSSA_XYfunc(X,Z,beta,GSSAparams)
global trunceqns
% This is the approximation function that genrates Xp and Y from the
% current state. inputs and outputs are row vectors
% Currently it allows for log-linear OLS or log-linear LAD forms.
%
% This function takes 4 inputs:
% 1) X, the vector of current period endogenous state variables
% 2) Z, the vector of current period exogenous state variables
% 3) beta, the vector polynomial coefficients
% 4) GSSAparams, the vector of paramter values from the GSSA function
%
% The output is:
% XYp, a vector of the next period exogenous state variables and the
% current period jump variables
%
% It requires the following subroutine written by by Kenneth L. Judd,
% Lilia Maliar and Serguei Maliar which are available as a zip file from
% Lilia & Setguei Mailar's webapage at:
% http://www.stanford.edu/~maliars/Files/Codes.html
% "Ord_Polynomial_N.m" constructs the sets of basis functions for ordinary
% polynomials of the degrees from one to five, for
% the N-country model
% read in GSSA parameters
dotrunc = GSSAparams(9);
fittype = GSSAparams(10);
D = GSSAparams(13);
if fittype == 2 %log-linear
XZ = [log(X) Z];
else
XZ = [X Z];
end
% create dependent variable
% using basis functions of XZ (includes constants) * beta
XYp = Ord_Polynomial_N(XZ,D)*beta;
% convert if needed
if fittype == 2
XYp = exp(XYp);
end
% truncate if needed depending on model
if dotrunc == 1
XYp = trunceqns(XYp);
end
end
|
github
|
econdaryl/GSSA-master
|
GSSAold.m
|
.m
|
GSSA-master/MATLAB/Old Uused MATLAB code/GSSAold.m
| 14,505 |
utf_8
|
cf655438b9ec319d635806843013b005
|
% GSSA function
% version 1.3 written by Kerk L. Phillips 2/11/2012
% Implements a version of Judd, Mailar & Mailar's (Quatitative Economics,
% 2011) Generalized Stochastic Simulation Algorithm with jump variables.
% It requires the following subroutines written by by Kenneth L. Judd,
% Lilia Maliar and Serguei Maliar which are available as a zip file from
% Lilia & Setguei Mailar's webapage at:
% http://www.stanford.edu/~maliars/Files/Codes.html
% This function takes two inputs:
% 1) a guess for the steady state value of the endogneous state variables &
% jump variables, XYbarguess It outputs the parameters for an
% approximation of the state transition function, X(t+1) = f(X(t),Z(t)) and
% the jump variable function Y(t) = g(X(t),Z(t)).
% where X is a vector of nx endogenous state variables
% Y is a vector of ny endogenous state variables
% and Z is a vector of nz exogenous state variables
% 2) an initial guess for the parameter matrix, beta
% 3) a string specifying the model's name, modelname
% This script requires one external function:
% It should be named "modelname"_dyn.m and should take as inputs:
% X(t+2), X(t+1), X(t), Y(t+1), Y(t), Z(t+1) & Z(t)
% It should output a with nx+ny elements column vector with the values of
% the model's behavioral equations (many of which will be Euler equations)
% written in such a way that the equation evaluates to zero. This
% function is used to find the steady state and in the GSSA algorithm
% itself.
% The output is:
% 1) a vector of approximation coeffcients, out.
% 2) a vector of steady state values for X & Y, XYbarout.
% 3) average Euler equation errors over a series of Monte Carlos, errors
function [out,XYbarout,errors] = GSSA(XYbarguess,beta,modelname)
% data values
global X1 Z
% options flags
global fittype regtype numerSS quadtype
% parameters
global nx ny nz nc npar betar nbeta nobs XYpars XYbar NN SigZ dyneqns
global nodes weights
global AA BB CC beta
% model
global model
% create the name of the dynamic behavioral equations function
dyneqns = str2func([modelname '_dyn']);
% set numerical parameters (I include this in the model script)
% nx = 1; %number of endogenous state variables
% ny = 0; %number of jump variables
% nz = 1; %number of exogenous state variables
%numerSS = 1; %set to 1 if XYbargues is a guess, 0 if it is exact SS.
model = modelname;
nobs = 1000; %number of observations in the simulation sample
ccrit = 10^-10; %convergence criterion for approximations
conv = .025; %convexifier parameter for approximations (weight on new)
maxwhile = 1000; %maximimum iterations for approximations
nmc = 10; %number of Monte Carlos for Euler error check
fittype = 2; %functional form for approximations
%0 is linear
%1 is log-linear
%2 is quadratic
%3 is log-quadratic
regtype = 3; %type of regression
%0 is OLS
%1 is LAD
%2 is SVD
%3 is truncated SVD
quadtype = 0; %type of quadrature used
%0 is rectangular
J = 20; %number of nodes for quadrature
na = 1;
nb = (nx+nz);
nc = (nx+nz)*(nx+nz+1)/2;
npar = na+nb;
if fittype==2 || fittype==3
npar = npar + nc;
end
npar = (nx+ny)*npar
% calculate nodes and weights for quadrature
[nodes, weights] = GSSA_nodes(J,quadtype);
% find steady state numerically; skip if you already have exact values
if numerSS == 1
options = optimset('Display','iter');
XYbar = fsolve(@GSSA_ss,XYbarguess,options)
else
XYbar = XYbarguess;
end
% set initial guess for coefficients
AA = .1*ones(1,nx+ny);
if sum(abs(size(BBguess)-[nx+nz,nx+ny]),2) ~= 0
BB = zeros(nx+nz,nx+ny);
else
BB = BBguess;
end
if fittype==2 || fittype==3
if sum(abs(size(CCguess)-[nc,nx+ny]),2) ~= 0
CC = zeros(nc,nx+ny);
else
CC = CCguess;
end
else
CC = [];
end
beta = [AA; BB; CC]
[betar, betac] = size(beta);
nbeta = betar*betac
XYpars = reshape(beta,1,nbeta);
% start at SS
X1 = XYbar(1:nx);
% generate a random history of Z's to be used throughout
Z = zeros(nobs,nz);
eps = randn(nobs,nz)*SigZ;
for t=2:nobs
Z(t,:) = Z(t-1,:)*NN + eps(t,:);
end
% set intial value of old coefficients
XYparsold = XYpars;
betaold = beta;
% begin iterations
converge = 1;
icount = 0;
[icount converge XYpars]
while converge > ccrit
% update interation count
icount = icount + 1;
% stop if too many iterations have passed
if icount > maxwhile
break
end
% find convex combination of old and new coefficients
XYpars = conv*XYpars + (1-conv)*XYparsold;
beta = conv*beta + (1-conv)*betaold;
XYparsold = XYpars;
betaold = beta;
% find time series for XYap using approximate function
XYap = GSSA_genap();
% generate XYex using the behavioral equations
% Judd, Mailar & Mailar call this y(t)
[XYex,~] = GSSA_genex();
% for i=1:nx+ny
% figure;
% subplot(nx+ny,1,i)
% plot([XYap(:,i) XYex(:,i)])
% end
% [XYap(1:10,:) XYex(1:10,:)]
% find new coefficient values;
beta = GSSA_fit(XYex,XYap);
XYpars = reshape(beta,1,nbeta);
[AA,BB,CC] = GSSA_fittype(beta);
% evauate convergence criteria
if icount == 1
converge = 1;
else
converge = sum(sum(abs((XYap-XYapold)./XYap)),2)/(nobs*(nx+ny));
if isnan(converge)
converge = .5;
disp('There are problems with NaN for the convergence metric')
end
if isinf(converge)
converge = .5;
disp('There are problems with inf for the convergence metric')
end
end
% replace old k values
XYapold = XYap;
% report results of iteration
%[icount,converge]
[icount converge XYpars]
beta
end
out = XYpars;
XYbarout = XYbar;
% run Monte Carlos to get average Euler errors
errors = 0;
for m=1:nmc
% create a new Z series
Z = zeros(nobs,nz);
eps = randn(nobs,nz)*SigZ;
for t=2:nobs
Z(t,:) = Z(t-1,:)*NN + eps(t,:);
end
% generate data & calcuate the errors, add this to running average
[~,temp] = GSSA_genex();
errors = errors*(m-1)/m + temp/m;
m
end
end
%%
function XY = GSSA_genap()
% This function generates approximate values of Xp using the approximation
% equations in GSSA_XYfunc.m
% data values
global Z X1
% parameters
global nx ny beta
% model
global model
T = size(Z,1);
% initialize series
XY = zeros(T,nx+ny);
% find SS implied by current beta
Xbar = beta(1,1:nx)*(eye(nx)-beta(2:1+nx,1:nx))^(-1);
if ny > 0
Ybar = ones(1,ny)*beta(1,1+nx:nx+ny) + Xbar*beta(2:1+nx,1+nx:nx+ny);
else
Ybar = [];
end
X1 = [Xbar Ybar];
% find Xp & Y using approximate Xp & Y functions
XY(1,:) = GSSA_XYfunc(X1,Z(1,:));
for t=2:T
XY(t,:) = GSSA_XYfunc(XY(t-1,1:nx),Z(t,:));
% model specific truncations
% if model == 'test2'
% % h is between zero and one
% if XY(t,2) > 1-10^-10
% XY(t,2) = 1-10^-10;
% elseif XY(t,2) < 10^-10
% XY(t,2) = 10^-10;
% end
% % k is positive
% if XY(t,1) < 10^-10
% XY(t,1) = 10^-10;
% end
% end
end
end
%%
function [XYex,eulerr] = GSSA_genex()
% This function generates values of Xp using the behavioral equations
% in "name"_dyn.n. This is y(t) from Judd, Mailar & Mailar.
% data values
global Z X1
% options flags
global quadtype
% parameters
global nx ny nz NN nobs dyneqns nodes weights
T = nobs;
[~,J] =size(nodes);
% initialize series
XYex = zeros(T,nx+ny);
% find Xp & Y using approximate Xp & Y functions
XY = GSSA_genap();
Xp = XY(:,1:nx);
Y = XY(:,nx+1:nx+ny);
eulert = zeros(T,1);
% find X
X = [X1; Xp(1:T-1,:)];
% find EZp & EXpp using law of motion and approximate X & Y functions
if quadtype == 0 && nz < 2
for t=1:T
EZpj = zeros(J,nz);
EXYj = zeros(J,nx+ny);
EXppj = zeros(J,nx);
if ny > 0
EYpj = zeros(J,ny);
end
EGFj = zeros(J,nx+ny);
XYexj = zeros(J,nx+ny);
% integrate over discrete intervals
for j=1:J
% find Ezp using law of motion
EZpj(j,:) = Z(t,:)*NN + nodes(j);
% find EXpp & EYp using approximate functions
EXYj(j,:) = GSSA_XYfunc(Xp(t,:),EZpj(j,:));
EXppj(j,:) = EXYj(j,1:nx);
if ny > 0
EYpj(j,:) = EXYj(j,nx+1:nx+ny);
end
% find expected G & F values using nonlinear behavioral
% equations since GSSA-dyn evaluates to zero at the fixed point
% and it needs to evaluate to one, add ones to each element.
if ny > 0
EGFj(j,:) = dyneqns([EXppj(j,:)';Xp(t,:)';X(t,:)';...
EYpj(j,:)';Y(t,:)';EZpj(j,:)';Z(t,:)]')' + 1;
else
EGFj(j,:) = dyneqns([EXppj(j,:)';Xp(t,:)';X(t,:)';...
EZpj(j,:)';Z(t,:)]')' + 1;
end
% find Judd, Mailar & Mailar's y
XYexj(j,:) = EGFj(j,:).*[Y(t,:) Xp(t,:)];
end
% sum over J
XYex(t,:) = weights*XYexj;
eulert(t,:) = weights*(EGFj-1)*ones(nx+ny,1)/(nx+ny);
end
end
eulerr = ones(1,T)*eulert/T;
end
%%
function XYp = GSSA_XYfunc(X,Z)
% This is the approximation function that genrates Xp and Y from the
% current state. inputs and outputs are row vectors
% Currently it allows for log-linear OLS or log-linear LAD forms.
% parameters
global nx nz
global beta
% options flags
global fittype
% notation assumes data in column vectors, so individualn observations are
% row vectors.
% put appropriate linear & quadratic terms in indep list, we are removing
% the mean from the X's.
if fittype == 0 %linear
indep = [X Z];
elseif fittype == 1 %log-linear
indep = [log(X) Z];
elseif fittype == 2 %quaddratic
sqterms = GSSA_sym2vec([X Z]'*[X Z]);
indep = [X Z sqterms];
elseif fittype == 3 %log quadratic
sqterms = GSSA_sym2vec([log(X) Z]'*[log(X) Z]);
indep = [log(X) Z sqterms];
end
indep = [1 indep];
% create dependent variable
dep = indep*beta;
% convert to Xp's & Y's
if fittype==0 || fittype==2
XYp = dep;
elseif fittype==1 || fittype==3
XYp = exp(dep);
end
end
%%
function out = GSSA_ss(XYbar)
% This function finds the steady state using numerical methods
% parameters
global nx ny nz dyneqns
Xbar = XYbar(1:nx);
Ybar = XYbar(nx+1:nx+ny);
out = dyneqns([Xbar'; Xbar'; Xbar'; Ybar'; Ybar'; zeros(nz,1); zeros(nz,1)]);
end
%%
function betaout = GSSA_fit(XYex,XYap)
% This function fits Xpex (Judd, Mailar & Mailar's y(t+1)) to Xpap (the X
% series from the approximation equations.
% data values
global X1 Z
% paramters
global nx nz nc nbeta npar XYbar beta
% options flags
global fittype regtype
% data to pass to GSSA_ADcalc function
global Yfit Xfit
[T,~] = size(XYex);
% parameter vector
XYparguess = reshape(beta,1,nbeta);
% normalize variables
% independent Xfit
Xfit = [X1; XYap(1:T-1,1:nx)];
if fittype==1 || fittype==3
Xfit = log(Xfit);
end
Xfit = [Xfit Z];
% calculate and concatenate squared terms if needed
if fittype==2 || fittype==3 %quadratic
sqterms = zeros(T,nc);
for t=1:T
sqterms(t,:) = GSSA_sym2vec(Xfit(t,:)'*Xfit(t,:));
end
Xfit = [Xfit sqterms];
end
Xmu = mean(Xfit);
Xsig = std(Xfit);
Xfit = (Xfit - repmat(Xmu,T,1))./repmat(Xsig,T,1);
% dependent Yfit
Yfit = XYex;
if fittype==1 || fittype==3
Yfit = log(Yfit);
end
Ymu = mean(Yfit);
Ysig = std(Yfit);
Yfit = (Yfit - repmat(Ymu,T,1))./repmat(Ysig,T,1);
% % plot data
% figure;
% scatter(Yfit(:,1),Xfit(:,1),5,'filled')
% choose estimation method and find new parameters
if regtype == 0 %linear with OLSregression
betaout = Yfit\Xfit
elseif regtype == 1 %linear regression with LAD
options = optimset('Display','on','MaxFunEvals',1000000,...
'MaxIter',10000);
XYparout = fminsearch(@GSSA_ADcalc,XYparguess,options);
betaout = reshape(XYparout,nX,nY);
elseif regtype == 2 %SVD estimate
[U,S,V] = svd(Xfit,0);
betaout = V*S^(-1)*U'*Yfit;
elseif regtype == 3 %truncated SVD estimate
% set truncation criterion
kappa = 10^14;
% do SVD decomposition
[U,S,V] = svd(Xfit,0);
% find ill-conditioned components and remove
sumS = sum(S);
sumS = sumS(1)./sumS;
[~,cols] = size(S);
r = 1;
i = 1;
while i<cols+1
if sumS(i)<kappa
r = i;
else
break
end
i = i+1;
end
S = S(1:r,1:r);
U = U(:,1:r);
V = V(:,1:r);
% get estimate
betaout = V*S^(-1)*U'*Yfit;
end
% adjust for normalization of means and variances
[~,ncol] = size(Xsig);
betaout = (ones(1,ncol)./Xsig)'*Ysig.*betaout;
beta0 = Ymu - Xmu*betaout;
betaout = [beta0; betaout];
end
%%
function AD = GSSA_ADcalc(XYpars)
% Does LAD estimation
% data for AD estimation
global Yfit Xfit
% get number of regressors and regressands
[~,nX] = size(Xfit);
[~,nY] = size(Yfit);
beta = reshape(XYpars,nX,nY);
AD = sum(sum(abs(Yfit - Xfit*beta)),2);
end
%%
function [nodes, weights] = GSSA_nodes(J,quadtype)
% does quadrature on expectations
% parameters
global nz SigZ
% calculate nodes and weights for rectangular quadrature used for taking
% expectations of behavioral eqns. This setup uses equally spaced
% probabilities (weights)
if quadtype == 0 && nz < 2
weights = ones(1,J)/J;
cumprob = .5/J:1/J:1-.5/J;
nodes = norminv(cumprob,0,SigZ);
end
% if isGpuAvailable == 1
% nodes = gpuArry(nodes);
% end
% check if the nodes look like a cummulative normal
% expnodes = weights*nodes'
% figure;
% plot(nodes)
end
%%
function A = GSSA_sym2vec(B)
% B is a symmetric matrix
% A is a row vectorization of it's upper triangular portion
A = [];
for k = 1:size(B,1)
A = [A B(k,1:k)];
end
end
|
github
|
econdaryl/GSSA-master
|
GSSA.m
|
.m
|
GSSA-master/MATLAB/GSSA MATLAB ver 1.3 (11 Feb 2012)/GSSA.m
| 12,744 |
utf_8
|
99e91f3ba158ad62e5c16e92dbccbb39
|
% GSSA function
% version 1.3 written by Kerk L. Phillips 2/11/2012
% Implements a version of Judd, Mailar & Mailar's (Quatitative Economics,
% 2011) Generalized Stochastic Simulation Algorithm with jump variables.
% This function takes three inputs:
% 1) a guess for the steady state value of the endogneous state variables &
% jump variables, XYbarguess It outputs the parameters for an
% approximation of the state transition function, X(t+1) = f(X(t),Z(t)) and
% the jump variable function Y(t) = g(X(t),Z(t)).
% where X is a vector of nx endogenous state variables
% Y is a vector of ny endogenous state variables
% and Z is a vector of nz exogenous state variables
% 2) an initial guess for the parameter matrix, beta
% 3) a string specifying the model's name, modelname
% It requires the following subroutines written by by Kenneth L. Judd,
% Lilia Maliar and Serguei Maliar which are available as a zip file from
% Lilia & Setguei Mailar's webapage at:
% http://www.stanford.edu/~maliars/Files/Codes.html
% 1. "Num_Stab_Approx.m" implements the numerically stable LS and LAD
% approximation methods
% 2. "Ord_Polynomial_N.m" constructs the sets of basis functions for ordinary
% polynomials of the degrees from one to five, for
% the N-country model
% 3. "Monomials_1.m" constructs integration nodes and weights for an N-
% dimensional monomial (non-product) integration rule
% with 2N nodes
% 4. "Monomials_2.m" constructs integration nodes and weights for an N-
% dimensional monomial (non-product) integration rule
% with 2N^2+1 nodes
% 5. "GH_Quadrature.m" constructs integration nodes and weights for the
% Gauss-Hermite rules with the number of nodes in
% each dimension ranging from one to ten
% It also requires one model-specific external function:
% It should be named "modelname"_dyn.m and should take as inputs:
% X(t+2), X(t+1), X(t), Y(t+1), Y(t), Z(t+1) & Z(t)
% It should output a with nx+ny elements column vector with the values of
% the model's behavioral equations (many of which will be Euler equations)
% written in such a way that the equation evaluates to zero. This
% function is used to find the steady state and in the GSSA algorithm
% itself.
% The output is:
% 1) a vector of approximation coeffcients, out.
% 2) a vector of steady state values for X & Y, XYbarout.
function [out,XYbarout] = GSSA(XYbarguess,beta,modelname)
% data values
global X1 Z
% options flags
global fittype numerSS quadtype dotrunc PF
% parameters
global nx ny nz nobs XYbar NN SigZ dyneqns trunceqns D
global J epsi_nodes weight_nodes
% create the name of the dynamic behavioral equations function
dyneqns = str2func([modelname '_dyn']);
trunceqns = str2func([modelname '_trunc']);
% set numerical parameters (I include this in the model script)
% nx = 1; %number of endogenous state variables
% ny = 0; %number of jump variables
% nz = 1; %number of exogenous state variables
%numerSS = 1; %set to 1 if XYbargues is a guess, 0 if it is exact SS.
nobs = 10000; % number of observations in the simulation sample
T = nobs;
kdamp = 0.05; % Damping parameter for (fixed-point) iteration on
% the coefficients of the capital policy functions
maxwhile = 1000; % maximimum iterations for approximations
usePQ = 0; % 1 to use a linear approximation to get initial guess
% for beta
dotrunc = 0; % 1 to truncate data outside constraints, 0 otherwise
fittype = 1; % functional form for polynomial approximations
% 1) linear
% 2) log-linear
RM = 6; % regression (approximation) method, RM=1,...,8:
% 1=OLS, 2=LS-SVD, 3=LAD-PP, 4=LAD-DP,
% 5=RLS-Tikhonov, 6=RLS-TSVD, 7=RLAD-PP, 8=RLAD-DP;
penalty = 7; % a parameter determining the value of the regularization
% parameter for a regularization methods, RM=5,6,7,8;
D = 5; % order of polynomical approximation (1,2,3,4 or 5)
PF = 2; % polynomial family
% 1) ordinary
% 2) Hermite
quadtype = 4; % type of quadrature used
% 1) "Monomials_1.m"
% constructs integration nodes and weights for an N-
% dimensional monomial (non-product) integration rule
% with 2N nodes
% 2) "Monomials_2.m"
% constructs integration nodes and weights for an N-
% dimensional monomial (non-product) integration rule
% with 2N^2+1 nodes
% 3)"GH_Quadrature.m"
% constructs integration nodes and weights for the
% Gauss-Hermite rules with the number of nodes in
% each dimension ranging from one to ten
% 4)"GSSA_nodes.m"
% does rectangular arbitrage for a large number of nodes
% used only for univariate shock case.
Qn = 10; % the number of nodes in each dimension; 1<=Qn<=10
% for "GH_Quadrature.m" only
% calculate nodes and weights for quadrature
% Inputs: "nz" is the number of random variables; N>=1;
% "SigZ" is the variance-covariance matrix; N-by-N
% Outputs: "n_nodes" is the total number of integration nodes; 2*N;
% "epsi_nodes" are the integration nodes; n_nodes-by-N;
% "weight_nodes" are the integration weights; n_nodes-by-1
if quadtype == 1
[J,epsi_nodes,weight_nodes] = Monomials_1(nz,SigZ);
elseif quadtype == 2
[J,epsi_nodes,weight_nodes] = Monomials_2(nz,SigZ);
elseif quadtype == 3
% Inputs: "Qn" is the number of nodes in each dimension; 1<=Qn<=10;
[J,epsi_nodes,weight_nodes] = GH_Quadrature(Qn,nz,SigZ);
elseif quadtype == 4
J = 20;
[epsi_nodes, weight_nodes] = GSSA_nodes(J,quadtype);
end
JJJ = J
% find steady state numerically; skip if you already have exact values
if numerSS == 1
options = optimset('Display','iter');
XYbar = fsolve(@GSSA_ss,XYbarguess,options)
else
XYbar = XYbarguess;
end
Xbar = XYbar(1:nx)
Ybar = XYbar(nx+1:nx+ny);
Zbar = zeros(nz,1);
% generate a random history of Z's to be used throughout
Z = zeros(nobs,nz);
eps = randn(nobs,nz)*SigZ;
for t=2:nobs
Z(t,:) = Z(t-1,:)*NN + eps(t,:);
end
if usePQ == 1
% get an initial estimate of beta by simulating about the steady state
% using Uhlig's solution method.
in = [Xbar; Xbar; Xbar; Ybar; Ybar; Zbar; Zbar];
[PP,QQ,UU] = GSSA_PQU(in);
% generate X & Y data given Z's above
Xtilde = zeros(T,nx);
X = ones(T,nx);
Ytilde = zeros(T,ny);
X(1,nx) = Xbar;
PP'
QQ'
Z(t-1,:)
Xtilde(t,:)
for t=2:T
Xtilde(t,:) = Xtilde(t-1,:)*PP' + Z(t-1,:)*QQ';
X(t,:) = Xbar.*exp(Xtilde(t,:));
end
Xtildefinal = Xtilde(T,:)*PP' + Z(T,:)*QQ';
Xfinal = Xbar.*exp(Xtildefinal);
% estimate beta using this data
Xrhs = X;
Ylhs = [X(2:T,:); Xfinal];
if fittype == 2
Xrhs = log(Xrhs);
Ylhs = log(Ylhs);
end
% add the Z series
Xrhs = [Xrhs Z];
% construct basis functions
XZbasis = Ord_Polynomial_N(Xrhs,D);
% run regressions to fit data
beta = Num_Stab_Approx(XZbasis,Ylhs,RM,penalty,1);
beta = real(beta);
end
% construct valid beta matrix from betaguess
% find size of beta guess
[rbeta, cbeta] = size(beta);
% find number of terms in basis functions
[rbasis, ~] = size(Ord_Polynomial_N(ones(1,nx+nz),D)')
if rbeta > rbasis
disp('rbeta > rbasis truncating betaguess')
beta = beta(1:rbasis,:);
[rbeta, cbeta] = size(beta);
end
if cbeta > nx+ny
disp('cbeta > cbasis truncating betaguess')
beta = beta(:,1:nx+ny);
[rbeta, cbeta] = size(beta);
end
if rbeta < rbasis
disp('rbeta < rbasis adding extra zero rows to betaguess')
beta = [beta; zeros(rbasis-rbeta,cbeta)];
[rbeta, cbeta] = size(beta);
end
if cbeta < nx+ny
disp('cbeta < cbasis adding extra zero columns to betaguess')
beta = [beta zeros(rbeta,nx+ny-cbeta)];
end
% find constants that yield theoretical SS
beta(1,:)
eye(nx)
beta(2:nx+1,1:nx)
beta(1,:) = XYbar(1,1:nx)*(eye(nx)-beta(2:nx+1,1:nx))
% start simulations at SS
X1 = XYbar(1:nx);
% set intial value of old coefficients
betaold = beta;
% begin iterations
dif_1d = 1;
icount = 0;
[icount dif_1d 1]
while dif_1d > 1e-4*kdamp;
% update interation count
icount = icount + 1;
% stop if too many iterations have passed
if icount > maxwhile
break
end
% find convex combination of old and new coefficients
beta = kdamp*beta + (1-kdamp)*betaold;
betaold = beta;
% find time series for XYap using approximate function
% initialize series
XYap = zeros(T,nx+ny);
% find SS implied by current beta
if fittype == 2
Xbar = exp(beta(1,1:nx)*(eye(nx)-beta(2:1+nx,1:nx))^(-1));
else
Xbar = beta(1,1:nx)*(eye(nx)-beta(2:1+nx,1:nx))^(-1);
end
% % use theoretical SS vslues
% Xbar = XYbar(:,1:nx);
if ny > 0
if fittype == 2
Ybar = exp(ones(1,ny)*beta(1,1+nx:nx+ny) + log(Xbar)*beta(2:1+nx,1+nx:nx+ny));
else
Ybar = ones(1,ny)*beta(1,1+nx:nx+ny) + Xbar*beta(2:1+nx,1+nx:nx+ny);
end
else
Ybar = [];
end
X1 = [Xbar Ybar];
% find Xp & Y using approximate Xp & Y functions
XYap(1,:) = GSSA_XYfunc(X1(1:nx),Z(1,:),beta);
for t=2:T
XYap(t,:) = GSSA_XYfunc(XYap(t-1,1:nx),Z(t,:),beta);
end
% generate XYex using the behavioral equations
% Judd, Mailar & Mailar call this y(t)
[XYex,~] = GSSA_genex(XYap,beta);
% % watch the data converge using plots
% for i=1:nx+ny
% figure;
% subplot(nx+ny,1,i)
% plot([XYap(:,i) XYex(:,i)])
% end
% [XYap(1:10,:) XYex(1:10,:)]
% [XYap(T-10:T,:) XYex(T-10:T,:)]
% find new coefficient values
% generate basis functions for Xap & Z
% get the X portion of XYap
Xap = [X1(1:nx); XYap(1:T-1,1:nx)];
if fittype == 2
Xap = log(Xap);
XYex = log(XYex);
end
% add the Z series
XZap = [Xap Z];
% construct basis functions
XZbasis = Ord_Polynomial_N(XZap,D);
% run regressions to fit data
% Inputs: "X" is a matrix of dependent variables in a regression, T-by-n,
% where n corresponds to the total number of coefficients in the
% original regression (i.e. with unnormalized data);
% "Y" is a matrix of independent variables, T-by-N;
% "RM" is the regression (approximation) method, RM=1,...,8:
% 1=OLS, 2=LS-SVD, 3=LAD-PP, 4=LAD-DP,
% 5=RLS-Tikhonov, 6=RLS-TSVD, 7=RLAD-PP, 8=RLAD-DP;
% "penalty" is a parameter determining the value of the regulari-
% zation parameter for a regularization methods, RM=5,6,7,8;
% "normalize" is the option of normalizing the data,
% 0=unnormalized data, 1=normalized data
beta = Num_Stab_Approx(XZbasis,XYex,RM,penalty,1);
beta = real(beta);
% evauate convergence criteria
if icount == 1
dif_1d = 1;
dif_beta = abs(1 - mean(mean(betaold./beta)));
else
dif_1d =abs(1 - mean(mean(XYapold./XYap)));
dif_beta =abs(1 - mean(mean(betaold./beta)));
if isnan(dif_1d)
dif_1d = 0;
disp('There were problems with NaN for the convergence metric')
end
if isinf(dif_1d)
dif_1d = 0;
disp('There were problems with inf for the convergence metric')
end
end
% replace old k values
XYapold = XYap;
% report results of iteration
[icount dif_1d dif_beta]
end
out = beta;
XYbarout = XYbar;
end
%%
function out = GSSA_ss(XYbar)
% This function finds the steady state using numerical methods
% parameters
global nx ny nz dyneqns
Xbar = XYbar(1:nx);
Ybar = XYbar(nx+1:nx+ny);
out = dyneqns([Xbar'; Xbar'; Xbar'; Ybar'; Ybar'; zeros(nz,1); zeros(nz,1)]);
end
|
github
|
txzhao/QbH-Demo-master
|
PostProcess.m
|
.m
|
QbH-Demo-master/scripts/PostProcess.m
| 2,757 |
utf_8
|
5bbe9042e6910e393dbd430f645da70e
|
% semitone = PostProcess(frIseq, verbose)
%
% method to post-process features obtained from "GetMusicFeatures.m"
%
% Input: frIseq - feature matrix (3*T) from "GetMusicFeatures.m"
% verbose - plot flag (boolean)
%
% Output: semitone - post-processed feature vector (1*T)
%
% Usage:
% This function works for post-processing features extracted from
% "GetMusicFeatures.m". Specifically, pitch information is first filtered
% based on correlation coefficient (r) and intensity (I), and then
% converted into semitones using the relation - semitone =
% 12*log2(p/base_p) + 1. The newly-derived feature semitone is continuous,
% and ranges between (0, 13). Silent and noisy regions are filled with
% random values around 0 to 0.5.
function semitone = PostProcess(frIseq, verbose)
% post-process features
p = log(frIseq(1, :));
r = frIseq(2, :);
I = log(frIseq(3, :));
% p = p - min(p);
% p = p/max(p);
% I = I - min(I);
% I = I/max(I);
% detect noisy and silent region
p_thresh_pos = mean(p) + std(p);
p_thresh_neg = mean(p) - std(p);
r_thresh = mean(r);
I_thresh = mean(I);
noise = (p < p_thresh_neg) | (p > p_thresh_pos) | ((I < I_thresh) & (r < r_thresh));
% convert pitch information into semitone (continuous)
base_p = min(p(find(noise == 0)));
semitone = 12*log2(p/base_p) + 1;
% return random values around 0 for noise and pause
semitone(find(noise == 1)) = 0.5*rand(size(find(noise == 1)));
% % partial results output
% % print out threshold selection
% disp('---------- thresholds information ----------');
% disp(['upper pitch threshold: ' num2str(p_thresh_pos)]);
% disp(['lower pitch threshold: ' num2str(p_thresh_neg)]);
% disp(['correlation threshold: ' num2str(r_thresh)]);
% disp(['intensity threshold: ' num2str(I_thresh)]);
% fprintf('\r');
% output plots and thresholds
if verbose == true
figure;
subplot(3,1,1)
plot(p);
hold on;
pline_pos = refline(0, p_thresh_pos);
pline_neg = refline(0, p_thresh_neg);
pline_pos.Color = 'r'; pline_pos.LineStyle = '--';
pline_neg.Color = 'k'; pline_neg.LineStyle = '--';
hold off; grid on;
title('Information on pitch track');
xlabel('Number of windows'); ylabel('Logarithmic pitch');
subplot(3,1,2)
plot(r)
hold on;
rline = refline(0, r_thresh);
rline.Color = 'r'; rline.LineStyle = '--';
hold off; grid on;
title('Information on correlation-coefficient track');
xlabel('number of windows'); ylabel('Correlation coefficient');
subplot(3,1,3)
plot(I)
hold on;
Iline = refline(0, I_thresh);
Iline.Color = 'r'; Iline.LineStyle = '--';
hold off; grid on;
title('Information on intensity track');
xlabel('number of windows'); ylabel('Logarithmic intensity');
end
|
github
|
txzhao/QbH-Demo-master
|
demo.m
|
.m
|
QbH-Demo-master/scripts/demo.m
| 5,990 |
utf_8
|
a780e19736f3a0d1938b99f769bbcd44
|
function varargout = demo(varargin)
% DEMO MATLAB code for demo.fig
% DEMO, by itself, creates a new DEMO or raises the existing
% singleton*.
%
% H = DEMO returns the handle to a new DEMO or the handle to
% the existing singleton*.
%
% DEMO('CALLBACK',hObject,eventData,handles,...) calls the local
% function named CALLBACK in DEMO.M with the given input arguments.
%
% DEMO('Property','Value',...) creates a new DEMO or raises the
% existing singleton*. Starting from the left, property value pairs are
% applied to the GUI before demo_OpeningFcn gets called. An
% unrecognized property name or invalid value makes property application
% stop. All inputs are passed to demo_OpeningFcn via varargin.
%
% *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one
% instance to run (singleton)".
%
% See also: GUIDE, GUIDATA, GUIHANDLES
% Edit the above text to modify the response to help demo
% Last Modified by GUIDE v2.5 31-Oct-2017 11:25:50
% Begin initialization code - DO NOT EDIT
gui_Singleton = 1;
gui_State = struct('gui_Name', mfilename, ...
'gui_Singleton', gui_Singleton, ...
'gui_OpeningFcn', @demo_OpeningFcn, ...
'gui_OutputFcn', @demo_OutputFcn, ...
'gui_LayoutFcn', [] , ...
'gui_Callback', []);
if nargin && ischar(varargin{1})
gui_State.gui_Callback = str2func(varargin{1});
end
if nargout
[varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:});
else
gui_mainfcn(gui_State, varargin{:});
end
% End initialization code - DO NOT EDIT
% --- Executes just before demo is made visible.
function demo_OpeningFcn(hObject, eventdata, handles, varargin)
% This function has no output args, see OutputFcn.
% hObject handle to figure
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% varargin command line arguments to demo (see VARARGIN)
addpath(genpath('../db'));
addpath(genpath('GetMusicFeatures'));
addpath(genpath('trained_hmm'));
% Choose default command line output for demo
handles.output = hObject;
% Update handles structure
guidata(hObject, handles);
% UIWAIT makes demo wait for user response (see UIRESUME)
% uiwait(handles.figure1);
% --- Outputs from this function are returned to the command line.
function varargout = demo_OutputFcn(hObject, eventdata, handles)
% varargout cell array for returning output args (see VARARGOUT);
% hObject handle to figure
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Get default command line output from handles structure
varargout{1} = handles.output;
% --- Executes on button press in load_button.
function load_button_Callback(hObject, eventdata, handles)
% hObject handle to load_button (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
[Filename, PathName, ~] = uigetfile('*.wav');
[handles.st, handles.Y, handles.FS] = load_sound([PathName Filename]);
msgbox('Load recording finished!');
guidata(hObject, handles);
% --- Executes on button press in start_button.
function start_button_Callback(hObject, eventdata, handles)
% hObject handle to start_button (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
dict = {'across', 'enjoysilen', 'inthemood', 'letitbe', 'lovemetend', 'morethwor', 'obladi', 'strangers', 'sweethome', 'wishyouw'};
load('trained_hmm/hmms.mat');
lP = logprob(hmms, handles.st);
bar(lP); xlabel('melody id'); ylabel('log-probability');
[~, idx] = max(lP);
handles.text_classification.String = dict{idx};
guidata(hObject, handles);
% --- Executes on button press in play_button.
function play_button_Callback(hObject, eventdata, handles)
% hObject handle to play_button (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
sound(handles.Y, handles.FS);
msgbox('Play recording finished!');
% --- Executes on button press in record_new.
function record_new_Callback(hObject, eventdata, handles)
% hObject handle to record_new (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
FS = 22050; winlen = 0.03;
new_record = audiorecorder(FS, 16, 1);
recordblocking(new_record, handles.record_len);
newRecord = getaudiodata(new_record);
frIseq = GetMusicFeatures(newRecord, FS, winlen);
handles.st = PostProcess(frIseq, false);
handles.Y = newRecord; handles.FS = FS;
msgbox('Record new finished!');
guidata(hObject, handles);
function edit1edit_length_Callback(hObject, eventdata, handles)
% hObject handle to edit1edit_length (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Hints: get(hObject,'String') returns contents of edit1edit_length as text
% str2double(get(hObject,'String')) returns contents of edit1edit_length as a double
get(hObject,'String');
handles.record_len = str2double(get(hObject,'String'));
guidata(hObject, handles);
% --- Executes during object creation, after setting all properties.
function edit1edit_length_CreateFcn(hObject, eventdata, handles)
% hObject handle to edit1edit_length (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles empty - handles not created until after all CreateFcns called
% Hint: edit controls usually have a white background on Windows.
% See ISPC and COMPUTER.
if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))
set(hObject,'BackgroundColor','white');
end
|
github
|
txzhao/QbH-Demo-master
|
MusicFromFeatures.m
|
.m
|
QbH-Demo-master/scripts/GetMusicFeatures/MusicFromFeatures.m
| 4,007 |
utf_8
|
2c1020edef2861ddca3f5764fdf734a4
|
%[signal] = MusicFromFeatures(feats,fs)
%or
%[signal] = MusicFromFeatures(feats,fs,winlength)
%or
%[signal] = MusicFromFeatures(feats,fs,winlength,noiseopt)
%
%Method to synthesize a signal from melody recognition features
%
%Usage:
%This function can generate signals from melody recognition features data like
%those given by GetMusicFeatures. Because GetMusicFeatures discards a lot of
%information, the signal cannot be recovered exactly. Instead it is assumed
%that the signal is a sinewave of varying frequency and intensity, possibly with
%some added white noise.
%
%Input:
%feats= Matrix containing melody features. Each column corresponds to a
% frame. If there are three rows, the first row is pitch (in Hz), the
% second row is the correlation coefficient between pitch periods,
% while the third gives per-sample intensity. If there are two rows,
% these should be frequency and intensity; rho is assumed to be one.
%fs= Sampling frequency of synthesized signal in Hz.
%winlength= Length of the analysis window in seconds (default 0.03).
% This should be the same as used for GetMusicFeatures previously.
%noiseopt= How to handle noise. Can be either positive, negative, or zero. If
% negative, we assume no noise, and that all energy is due to the
% sinusoidal component (equivalent to rho = 1). Otherwise, any
% occurrences of rho < 1 is assumed to be due to white noise of
% varying amplitude decreasing the correlation between samples, and
% energy is allocated to the white noise component as well. However,
% noise is only included in the output signal if noisopt is greater
% than zero. The default is positive, meaning that noise is included.
%
%Output:
%signal= Resynthesized melody waveform based on a sine + noise model.
%
%Gustav Eje Henter 2011-10-25 tested
%Gustav Eje Henter 2012-10-09 tested
function [signal] = MusicFromFeatures(feats,fs,winlength,noiseopt)
[nF T] = size(feats);
if (nF == 2),
rhos = ones(1,T); % Assume no noise
elseif (nF == 3),
rhos = feats(2,:);
else
error('Illegal number of features per time frame!');
end
f = feats(1,:);
Is = feats(end,:);
if (nargin < 2) || isempty(fs),
fs = 44100; % Default sampling freqeuncy
end
if (nargin < 3) || isempty(winlength),
winlength = 0.030; % Default 30 ms
else
winlength = abs(winlength(1)); % Make winlength a real scalar
end
if (nargin < 4) || isempty(noiseopt),
noiseopt = true;
elseif (noiseopt < 0),
noiseopt = false;
rhos = ones(1,T); % Assume no noise
else
noiseopt = (noiseopt > 0);
end
% Sanitize features
f = max(f,0);
Is = max(Is,0);
rhos = min(max(rhos,0),1); % Negative rhos do not make sense under our model
% Compute signal component amplitudes from features, according options
sineamp = Is.*sqrt(2*rhos);
if noiseopt,
noiseamp = Is.*sqrt(1 - rhos); % Give noise appropriate, nonzero amplitude
else
noiseamp = zeros(size(sineamp));
end
% Compute frame and sample times in seconds
tframe = (1:T)*(winlength/2);
ts = (0:1:ceil((T+1)*(winlength/2)*fs))/fs;
uselog = true; % Upsample amplitudes on logarithmic scale
% Upsample signal parameters from frame-by-frame to sample-by-sample
f = upsampler(tframe,f,ts,true); % Upsample frequency in logarithmic domain
sineamp = upsampler(tframe,sineamp,ts,uselog);
noiseamp = upsampler(tframe,noiseamp,ts,uselog);
phis = 2*pi*cumsum(f)/fs; % Convert frequency to instantaneous phase
signal = sineamp.*sin(phis) + noiseamp.*randn(size(ts)); % Synthesize output
function yup = upsampler(tdwn,ydwn,tup,uselog)
% Interpolation method
%method = 'linear';
method = 'spline';
if (numel(unique(ydwn)) == 1),
yup = ydwn(1)*ones(size(tup));
elseif uselog,
okpts = isfinite(log(ydwn)); % Remove infinite points
yup = exp(interp1(tdwn(okpts),log(ydwn(okpts)),tup,method,'extrap'));
else
yup = max(interp1(tdwn,ydwn,tup,method,'extrap'),0); % Min value is 0
end
|
github
|
txzhao/QbH-Demo-master
|
GetMusicFeatures.m
|
.m
|
QbH-Demo-master/scripts/GetMusicFeatures/GetMusicFeatures.m
| 4,634 |
utf_8
|
1f5bc70d10aa7285068637abcdda7521
|
%[frIsequence] = GetMusicFeatures(signal,fs)
%or
%[frIsequence] = GetMusicFeatures(signal,fs,winlength)
%
%Method to calculate features for melody recognition
%
%Usage:
%First load a sound file using wavread or similar, then use this function
%to extract pitch and energy contours of the melody in the sound. This
%information can be used to compute a sequence of feature values or
%vectors for melody recognition. Note that the pitch estimation is
%unreliable (typically giving very high values) in silent segments, and
%may not work at all for polyphonic sounds.
%
%Input:
%signal= Vector containing sampled signal values (must be mono).
%fs= Sampling frequency of signal in Hz.
%winlength= Length of the analysis window in seconds (default 0.03).
% Square ("boxcar") analysis windows with 50% overlap are used.
%
%Output:
%frIsequence= Matrix containing pitch, correlation, and intensity estimates
% for use in creating features for melody recognition. Each column
% represents one frame in the analysis. Elements in the first
% row are pitch estimates in Hz (80--1100 Hz), the second row
% estimates the correlation coefficient (rho) between adjacent
% pitch periods, while the third row contains corresponding
% estimates of per-sample intensity.
%
%References:
%This method is based on a pitch estimator provided by Obada Alhaj Moussa.
%
%Gustav Eje Henter 2010-09-15 tested
%Gustav Eje Henter 2011-10-25 tested
function [frIsequence] = GetMusicFeatures(signal,fs,winlength)
% Wikipedia: "human voices are roughly in the range of 80 Hz to 1100 Hz"
minpitch = 80;
maxpitch = 1100;
signal = real(double(signal)); % Make sure the signal is a real double
sigsize = size(signal);
if (min(sigsize) > 2) || (length(sigsize) > 2),
error('Multichannel signals are not supported. Use only mono sounds!');
end
if (sigsize(1) == 2),
signal = (signal(1,:)' + signal(2,:)')/2;
elseif (sigsize(2) == 2),
signal = (signal(:,1) + signal(:,2))/2;
end
signal = signal - mean(signal); % Remove DC, which can disturb intesities
if fs <= 0
fs = 44100; % Replace illegal fs-values with a standard sampling freq.
end
% Compute the pitch periods in samples for the human voice range
minlag = round(fs/maxpitch);
maxlag = round(fs/minpitch);
if (nargin > 2) && ~isempty(winlength),
winlength = abs(winlength(1)); % Make winlength a real scalar
else
winlength = 0.030; % Default 30 ms
end
winlength = round(winlength*fs); % Convert to number of samples
winlength = max(winlength+mod(winlength,2),...
2*minlag); % Make windows sufficiently long and an even sample number
winstep = winlength/2;
nsteps = floor(length(signal)/winstep) - 1;
if (nsteps < 1)
error(['Signal too short. Use at least ' int2str(winlength)...
' samples!']);
end
frIsequence = zeros(3,nsteps); % Initialize output variable to correct size
for n = 0:(nsteps-1),
% Cut out a segment of the signal starting at offset n*winlength sec
window = signal(n*winstep+(1:winlength));
% Estimate the pitch (sampling frequency/pitch period), between-period
% correlation coefficient, and intensity
[pprd,maxcorr] = yin_pitch(window,minlag,maxlag);
frIsequence(:,n+1) = [fs/pprd;maxcorr;norm(window/sqrt(numel(window)))];
end
% Below is the pitch period estimation sub-routine.
% The estimate is based on the autocorrelation function.
function [pprd,maxcorr] = yin_pitch(signal,minlag,maxlag)
N = length(signal);
%minlag = 40;
%maxlag = 200;
dif = zeros(maxlag - minlag + 1, 1);
for idx = minlag : maxlag
seg1 = signal(idx + 1 : N);
seg2 = signal(1 : N - idx);
% Estimate correlation ("dif") at lag idx
dif(idx - minlag + 1) = sum((seg1 - seg2).^2) / (N - idx);
end
thresh = (max(dif) - min(dif)) * 0.1 + min(dif);
% Locate the first minimum of dif, which is the first maximum of the
% correlation; the corresponding lag is the pitch period.
idx = minlag;
while idx <= maxlag
if dif(idx - minlag + 1) <= thresh
pprd = idx;
break;
end
idx = idx + 1;
end
% Allow the procedure to find the first minimum to roll over small "bumps"
% in the autocorrelation functions, that are below than a 10% threshold.
while idx <= maxlag
if dif(idx - minlag + 1) >= thresh
break;
end
if dif(idx - minlag + 1) < dif(pprd - minlag + 1)
pprd = idx;
end
idx = idx + 1;
end
%difmin = dif(pprd - minlag + 1);
seg1 = signal(pprd + 1 : N);
seg2 = signal(1 : N - pprd);
maxcorr = corr(reshape(seg1,numel(seg1),1),reshape(seg2,numel(seg1),1));
|
github
|
txzhao/QbH-Demo-master
|
adaptStart.m
|
.m
|
QbH-Demo-master/scripts/@GaussMixD/adaptStart.m
| 674 |
utf_8
|
f2cba1509325e96dad87f697502ad32c
|
%aState=adaptStart(pD)
%starts GaussMixD object adaptation to observed data,
%by initializing accumulator data structure for sufficient statistics,
%to be used in subsequent calls to method adaptAccum and adaptSet.
%
%Input:
%pD= GaussMixD object or array of GaussD objects
%
%Result:
%aState= data structure to be used by methods adaptAccum and adaptSet.
%
%Theory is discussed in method adaptSet
%
%Arne Leijon 2004-11-18 tested
function aState=adaptStart(pD)
for i=1:prod(size(pD))%one storage set for each object in the array
aState(i).Gaussians=adaptStart(pD(i).Gaussians);%to adapt sub-object
aState(i).MixWeight=zeros(size(pD(i).MixWeight));%
end;
|
github
|
txzhao/QbH-Demo-master
|
adaptAccum.m
|
.m
|
QbH-Demo-master/scripts/@GaussMixD/adaptAccum.m
| 3,676 |
utf_8
|
9f083fcf31a490e01dbd7f56edad7c54
|
%aState=adaptAccum(pD,aState,obsData,obsWeight)
%method to adapt array of GaussMixD objects to observed data,
%by accumulating sufficient statistics from the data,
%for later updating of the object by method adaptSet.
%
%Usage:
%First obtain the storage data structure aState from method adaptStart.
%Then, adaptAccum can be called several times with different observation data subsets.
%The aState data structure must be saved externally between calls to adaptAccum.
%Finally, the GaussMixD object(s) are updated by method adaptSet.
%
%Input:
%pD= a GaussMixD object or multidim array of GaussMixD objects
%aState= accumulated adaptation state preserved from previous calls,
% first obtained from method adaptStart
%obsData= matrix with observed column vectors,
% each assumed to be drawn from one of the GaussMixD objects
%obsWeight= (optional) matrix with weight factors, one column for each vector in obsData,
% and one row for each object in the GaussMixD array
% size(obsWeight)== [length(pD(:)), size(obsData,2)]
% obsWeight(i,t)= prop to P( GaussMixD(t)=i | obsData)
% obsWeight must have consistent values for all calls.
%
%Result:
%aState= accumulated adaptation data, incl. this observation data set.
%
%Arne Leijon 2005-02-14 tested
% 2006-04-12 fixed bug for case with only one mix component
% 2009-10-11 fixed bug with component sub-probabilities
function aState=adaptAccum(pD,aState,obsData,obsWeight)
nData=size(obsData,2);%number of given vector samples
nObj=numel(pD);%n of GaussMixD objects in array
if nargin<4%no external obsWeight given
if nObj==1
obsWeight=ones(nObj,nData);%use all data with equal weight
else
obsWeight=prob(pD,obsData);%assign weight to each GaussMixD object
obsWeight=obsWeight./repmat(sum(obsWeight),nObj,1);
%obsWeight(i,t)= P(mixS(t)= i | obsData)= P(GaussMixD(i)-> X(t))
end;
end;
for i=1:nObj%for all GaussMixD objects
%***find sub-Object probabilities for each mixed GaussD
%can be done instead by the sub-Object itself ??????
%NO, because sub-Object also needs our obsWeight
nSubObj=length(pD(i).Gaussians);
if nSubObj==1%no need for extra computation
aState(i).Gaussians=adaptAccum(pD(i).Gaussians,aState(i).Gaussians,obsData,obsWeight(i,:));
aState(i).MixWeight=aState(i).MixWeight+sum(obsWeight(i,:),2);
else
subProb=prob(pD(i).Gaussians,obsData);%saved from previous call instead???
%subProb(j,t)=P(X(t)=obsData(:,t) | subS(t)=j & mixS(t)=i )
%***** should include previous MixWeight here??? 2009-10-08
subProb=diag(pD(i).MixWeight)*subProb;%fix Arne Leijon, 2009-10-11
%subProb(j,t)=P(X(t)=obsData(:,t) & subS(t)=j | mixS(t)=i )
%**** testGMM3 actually works much better without previous MixWeight!
%**** with corrected version it usually gets stuck in local maximum,
%**** but this is probably because the true MixWeights were equal,
%**** so it was better to ignore the estimated MixWeight in this case.
denom=max(realmin,sum(subProb,1));%2005-02-14: avoid division by zero in next statement
subProb=subProb./repmat(denom,nSubObj,1);%normalize to conditional prob.s
%subProb(j,t)=P(subS(t)=j| X(t)=obsData(j,t) & mixS(t)=i )
subProb=subProb.*repmat(obsWeight(i,:),nSubObj,1);%scale by externally given weights
%subProb(j,t)=P(mixS(t)=i & subS(t)=j| X(1:T)=obsData(:,1:T) )
aState(i).Gaussians=adaptAccum(pD(i).Gaussians,aState(i).Gaussians,obsData,subProb);
aState(i).MixWeight=aState(i).MixWeight+sum(subProb,2);
end;
end;
|
github
|
txzhao/QbH-Demo-master
|
init.m
|
.m
|
QbH-Demo-master/scripts/@GaussMixD/init.m
| 1,921 |
utf_8
|
5b033aa7d8bc6f8214b3a795ed735a11
|
%pD=init(pD,x);
%initializes a GaussMixD object or array of such objects
%to conform with a set of given observed vectors.
%The agreement is very crude, and should be refined by training,
%using methods adaptStart, adaptAccum, and adaptSet.
%
%Input:
%pD= a single GaussMixD object or array of such objects
%x= matrix with observed vectors stored columnwise
%
%Result:
%pD= initialized GaussMixD object or multidim GaussMixD array
% size(pD)== same as input
%
%Method:
%For a single GaussMixD object: let its Gaussians sub-object do it.
%For a GaussMixD array: First init a GaussD array, then split each cluster.
%This initialization is crude, and should be refined by training.
%
%Arne Leijon 2006-04-21 tested
% 2011-05-26 minor cleanup
function pD=init(pD,x)
nObj=numel(pD);
if nObj>0.5*size(x,2)
error('Too few data vectors');end;%***reduce nObj instead???
if nObj==1
pD.Gaussians=init(pD.Gaussians,x);%let Gaussians do it
nGaussians=length(pD.Gaussians);
pD.MixWeight=ones(nGaussians,1)./nGaussians;%equal mixweights
else
%make a single Gaussians array, and then split each GaussD
g=init(repmat(GaussD,nObj,1),x);%single GaussD at each cluster
[~,bestG]=max(prob(g,x));%assign each data point to nearest GaussD
for i=1:nObj
[pD(i).Gaussians,iOK]=init(pD(i).Gaussians,x(:,bestG==i));%use only nearest data
if any(~iOK)
%delete Gaussians(i) where iOK(i)==0, because of too few data
pD(i).Gaussians=pD(i).Gaussians(iOK==1);
warning('GaussMixD:Init:ReducedSize',...
['GaussMixD no.',num2str(i),' reduced to ',num2str(length(pD(i).Gaussians)),' components']);
end;
nGaussians=length(pD(i).Gaussians);%number of sub-objects in this mix
pD(i).MixWeight=ones(nGaussians,1)./nGaussians;%equal mixweights
end;
end;
|
github
|
txzhao/QbH-Demo-master
|
adaptSet.m
|
.m
|
QbH-Demo-master/scripts/@GaussMixD/adaptSet.m
| 993 |
utf_8
|
4ca6665d6b9e85c675f31abffd865fd3
|
%pD=adaptSet(pD,aState)
%method to finally adapt a GaussMixD object
%using accumulated statistics from observed data.
%
%Input:
%pD= GaussMixD object or array of GaussD objects
%aState= accumulated statistics from previous calls of adaptAccum
%
%Result:
%pD= adapted version of the GaussMixD object
%
%Theory and Method:
%The sub-object GaussD array pD(i).Gaussians has its own adaptSet method.
%In addition, this method adjusts the pD(i).MixWeight vector,
% simply by normalizing the accumulated sum of MixWeight vectors,
% for the observed data.
%
%References:
% Leijon (200x). Pattern Recognition
% Bilmes (1998). A gentle tutorial of the EM algorithm.
%
%Arne Leijon 2004-11-15 tested
function pD=adaptSet(pD,aState)
for i=1:numel(pD)%for all GaussMixD objects
pD(i).Gaussians=adaptSet(pD(i).Gaussians,aState(i).Gaussians);%sub-GaussD sets itself
pD(i).MixWeight=aState(i).MixWeight./sum(aState(i).MixWeight);%set normalized MixWeight
end;%easy!!!
|
github
|
txzhao/QbH-Demo-master
|
logprob.m
|
.m
|
QbH-Demo-master/scripts/@GaussMixD/logprob.m
| 1,334 |
utf_8
|
f8724eb6c7a8d8d4bcddbd8f85a932e3
|
%logP=logprob(pD,x) gives log(probability densities) for given vectors
%assumed to be drawn from a given GaussMixD object
%
%Input:
%pD= GaussMixD object or array of such objects
%x= matrix with given vectors stored columnwise
%
%Result:
%logP= log(probability densities for x)
% size(logP)== [numel(pD),size(x,2)]
%exp(logP)= true probability densities for x
%
%The log representation is useful because the probability densities may be
%extremely small for random vectors with many elements
%
%Arne Leijon 2004-11-15 tested
% 2011-05-24, more robust version, tested
function logP=logprob(pD,x)
%pDsize=size(pD);%size of GaussMixD array
nObj=numel(pD);%number of GaussMixD objects
nx=size(x,2);%number of observed vectors
logP=zeros(nObj,nx);
for n=1:nObj
logPn=logprob(pD(n).Gaussians,x);%prob from all sub-Gaussians
logS=max(logPn);
%if length(pD(n).Gaussians)==1, logS is scalar, otherwise
%size(logS)==[1,nx]; might be -Inf or +Inf at some places
logPn=bsxfun(@minus,logPn,logS);%=logPn-logS expanded to matching size
%logPn(k,t) may be NaN for some k, if logS(t)==-Inf, or logS(t)==+Inf
logPn(isnan(logPn(:)))=0;%corrected
logP(n,:)=logS+log(pD(n).MixWeight'*exp(logPn));
%may be +Inf or -Inf at some places, but this is OK
end;
end
|
github
|
txzhao/QbH-Demo-master
|
rand.m
|
.m
|
QbH-Demo-master/scripts/@GaussMixD/rand.m
| 766 |
utf_8
|
b780c5bb11fdaef5b6a99aec30b89b82
|
%[X,S]=rand(pD,nSamples) returns random vectors drawn from a single GaussMixD object.
%
%Input:
%pD= the GaussMixD object
%nSamples= scalar defining number of wanted random data vectors
%
%Result:
%X= matrix with data vectors drawn from object pD
% size(X)== [DataSize, nSamples]
%S= row vector with indices of the GaussD sub-objects randomly chosen
% size(S)== [1, nSamples]
%
%Arne Leijon 2009-07-21 tested
function [X,S]=rand(pD,nSamples)
if length(pD)>1
error('This method works only for a single GaussMixD object');
end;
S=rand(DiscreteD(pD.MixWeight),nSamples);%random integer sequence, MixWeight distribution
X=zeros(pD.DataSize,nSamples);
for s=1:max(S)
X(:,S==s)=rand(pD.Gaussians(s),sum(S==s));%get from randomly chosen sub-object
end;
|
github
|
txzhao/QbH-Demo-master
|
adaptStart.m
|
.m
|
QbH-Demo-master/scripts/@HMM/adaptStart.m
| 795 |
utf_8
|
bc9660ec5d20fd009c1bcda6006c231e
|
%aState=adaptStart(hmm)
% initialises adaptation data structure for a single HMM object,
% to be saved between subsequent calls to method adaptAccum.
%
%Input:
%hmm= single HMM object
%
%Result:
%aState= struct representing zero weight of previous observed data,
% with fields
%aState.MC for the StateGen sub-object
%aState.Out for the OutputDistr sub-object
%aState.LogProb for accumulated log(prob(observations))
%
%Arne Leijon 2009-07-23 tested
function aState=adaptStart(hmm)
if length(hmm)>1
error('Method works only for a single object');end;
aState.MC=adaptStart(hmm.StateGen);%data to adapt the MarkovChain
aState.Out=adaptStart(hmm.OutputDistr);%data to adapt the OutputDistr
aState.LogProb=0;%to store accumulated observation logprob, to use as stop crit
|
github
|
txzhao/QbH-Demo-master
|
adaptAccum.m
|
.m
|
QbH-Demo-master/scripts/@HMM/adaptAccum.m
| 2,197 |
utf_8
|
6a9249a64c2447d744dce7c50a9dcf77
|
%[aState,logP]=adaptAccum(hmm,aState,obsData)
%method to adapt a single HMM object to observed data,
%by accumulating sufficient statistics from the data,
%for later updating of the object by method adaptSet.
%
%Usage:
%First obtain the storage data structure aState from method adaptStart.
%Then, adaptAccum can be called several times with different observation data subsets.
%The aState data structure must be saved externally between calls to adaptAccum.
%Finally, the HMM object is updated by method adaptSet.
%
%Input:
%hmm= a single HMM object
%obsData= matrix with a sequence of data vectors, stored columnwise,
% supposed to be drawn from this HMM.
%aState= accumulated adaptation state from previous calls
%
%Result:
%aState= accumulated adaptation state, incl. this subset of observed data,
% must be saved externally until next call
%logP= accumulated log( P(obsData | hmm) )
% may be used externally as training stop criterion.
%
%Method: Obtain from sub-object OutputDistr separate observation probabilities
% These are used by sub-object StateGen, which also provides
% conditional state probabilities, given whole obs.sequence.
% These are then used as weights to adapt OutputDistr.
%
%Arne Leijon 2009-07-23 tested
% 2011-05-26, generalized prob method
function [aState,logP]=adaptAccum(hmm,aState,obsData)
% if length(hmm)>1%enough to test this in adaptStart!
% error('Method works only for a single object');end;
[pX,lScale]=prob(hmm.OutputDistr,obsData);%scaled obs.probabilities
%pX(i,t)*exp(lScale(t)) == P[obsData(:,t) | hmm.OutputDistr(i)]
[aState.MC,gamma,logP]=adaptAccum(hmm.StateGen,aState.MC,pX);
%gamma(i,t)=P[HMMstate(t)=j | obsData, hmm]; obtained as side-result to save computation
aState.Out=adaptAccum(hmm.OutputDistr,aState.Out,obsData,gamma);
if length(lScale)==1%can happen only if length(hmm.OutputDistr)==1
aState.LogProb=aState.LogProb+logP+size(obsData,2)*lScale;%=accum. logprob(hmm,obsData)
else
aState.LogProb=aState.LogProb+logP+sum(lScale);%=accum. logprob(hmm,obsData)
end;
logP=aState.LogProb;%return separately, for external code clarity
|
github
|
txzhao/QbH-Demo-master
|
adaptSet.m
|
.m
|
QbH-Demo-master/scripts/@HMM/adaptSet.m
| 524 |
utf_8
|
614d518ee94beac4b3b5ca9eecec1b81
|
%hmm=adaptSet(hmm,aState)
%method to finally adapt a single HMM object
%using accumulated statistics from observed training data sets.
%
%Input:
%hmm= single HMM object
%aState= accumulated statistics from previous calls of adaptAccum
%
%Result:
%hmm= adapted version of the HMM object
%
%Theory and Method:
%
%Arne Leijon 2009-07-23 tested
function hmm=adaptSet(hmm,aState)%just dispatch to sub-objects
hmm.StateGen=adaptSet(hmm.StateGen,aState.MC);
hmm.OutputDistr=adaptSet(hmm.OutputDistr,aState.Out);
|
github
|
txzhao/QbH-Demo-master
|
logprob.m
|
.m
|
QbH-Demo-master/scripts/@HMM/logprob.m
| 1,762 |
utf_8
|
eda55425f67dfb12d14f892a2d14fbf3
|
%logP=logprob(hmm,x) gives conditional log(probability densities)
%for an observed sequence of (possibly vector-valued) samples,
%for each HMM object in an array of HMM objects.
%This can be used to compare how well HMMs can explain data from an unknown source.
%
%Input:
%hmm= array of HMM objects
%x= matrix with a sequence of observed vectors, stored columnwise
%NOTE: hmm DataSize must be same as observed vector length, i.e.
% hmm(i).DataSize == size(x,1), for each hmm(i).
% Otherwise, the probability is, of course, ZERO.
%
%Result:
%logP= array with log probabilities of the complete observed sequence.
%logP(i)= log P[x | hmm(i)]
% size(logP)== size(hmm)
%
%The log representation is useful because the probability densities
%exp(logP) may be extremely small for random vectors with many elements
%
%Method: run the forward algorithm with each hmm on the data.
%
%Ref: Arne Leijon (20xx): Pattern Recognition.
%
%----------------------------------------------------
%Code Authors: Tianxiao Zhao
%----------------------------------------------------
function logP = logprob(hmm, x)
hmmSize = size(hmm);%size of hmm array
logP = zeros(hmmSize);%space for result
for i = 1 : numel(hmm)%for all HMM objects
%Note: array elements can always be accessed as hmm(i),
%regardless of hmmSize, even with multi-dimensional array.
%
%logP(i)= result for hmm(i)
%continue coding from here, and delete the error message.
if hmm(i).DataSize == size(x, 1)
[p, logS] = prob(hmm(i).OutputDistr, x);
[~, c] = forward(hmm(i).StateGen, p.*repmat(exp(logS), size(p, 1), 1));
logP(i) = sum(log(c));
else
logP(i) = log(0);
end
end;
|
github
|
txzhao/QbH-Demo-master
|
viterbi.m
|
.m
|
QbH-Demo-master/scripts/@HMM/viterbi.m
| 1,569 |
utf_8
|
bd3bf13e506663061de4c7d697f181a3
|
%[S,logP]=viterbi(hmm,x)
%calculates optimal HMM state sequence
%for an observed sequence of (possibly vector-valued) samples,
%for each HMM object in an array of HMM objects.
%
%Input:
%hmm= array of HMM objects
%x= matrix with a sequence of observed vectors, stored columnwise
%NOTE: hmm DataSize must be same as vector length, i.e.
% get(hmm(i),'DataSize') == size(x,1), for every hmm(i),
% otherwise probability is ZERO.
%
%Result:
%S= matrix with best state sequences
% S(i,t)= best state of hmm(i) for x(:,t)
% size(S)== [numel(hmm),size(x,2)]
%logP= column vector with log prob of found best state sequence
%logP(i)= lob P(x, S(i,:) | HMM(i) )
% logP can be used to compare HMM:s, BUT NOTE
% logP(i) is NOT log P(x | HMM(i)
%
%Method: for each hmm, calculate logprob for each state, and
%call MarkovChain/viterbi for the actual search algorithm.
%
%Ref: Arne Leijon (200x): Pattern Recognition.
%
%Arne Leijon 2009-07-23
function [S,logP]=viterbi(hmm,x)
hmmLength=numel(hmm);%total number of HMM objects in the array
T=size(x,2);%number of vector samples in observed sequence
S=zeros(hmmLength,T);%space for result
logP=zeros(hmmLength,1);
for i=1:hmmLength%for all HMM objects
if hmm(i).DataSize==size(x,1)
lPx=logprob(hmm(i).OutputDistr,x);
[S(i,:),logP(i)]=viterbi(hmm(i).StateGen,lPx);
else
warning('HMM:viterbi:WrongDataSize',...
['Incompatible DataSize in HMM #',num2str(i)]);%but we can still continue
end;
end;
|
github
|
txzhao/QbH-Demo-master
|
double.m
|
.m
|
QbH-Demo-master/scripts/@DiscreteD/double.m
| 584 |
utf_8
|
c202f8b43b6862ff03458f6960a07b30
|
%pMass=double(pD)
%converts a DiscreteD object or column vector of such objects
%to an array with ProbMass values.
%i.e. inverse of pD=DiscreteD(pMass).
%
%Result:
%pMass(i,z)= P(Z(i)=z), with Z(i)= the i-th discrete random variable.
%
%Arne Leijon 2006-09-03 tested
function pMass=double(pD)
M=0;%max discrete random integer
for i=1:numel(pD)%just in case M is not equal for all distr.
M=max(M,length(pD(i).ProbMass));
end;
pMass=zeros(numel(pD),M);%space for ProbMass matrix
for i=1:numel(pD)
pMass(i,1:length(pD(i).ProbMass))=pD(i).ProbMass';%row ProbMass values
end;
|
github
|
txzhao/QbH-Demo-master
|
adaptStart.m
|
.m
|
QbH-Demo-master/scripts/@DiscreteD/adaptStart.m
| 683 |
utf_8
|
ac7fd76163fba59a09d6d64f8502f5df
|
%aState=adaptStart(pD)
%starts DiscreteD object adaptation to observed data,
%by initializing accumulator data structure for sufficient statistics,
%to be used in subsequent calls to method adaptAccum and adaptSet.
%
%Input:
%pD= DiscreteD object or array of such objects
%
%Result:
%aState= data structure to be used by methods adaptAccum and adaptSet.
%
%Theory is discussed in method adaptSet
%
%Arne Leijon 2005-10-25 tested
function aState=adaptStart(pD)
nObj=numel(pD);
aState=repmat(struct('sumWeight',0),nObj,1);%init storage
% for i=1:nObj%one storage set for each object in the array
% aState(i).sumWeight=0;%sum of all weight factors, already zeroed
% end;
|
github
|
txzhao/QbH-Demo-master
|
adaptAccum.m
|
.m
|
QbH-Demo-master/scripts/@DiscreteD/adaptAccum.m
| 2,271 |
utf_8
|
c66cbdd91fa34e74c190349a907c2346
|
%aState=adaptAccum(pD,aState,obsData,obsWeight)
%method to adapt DiscreteD object, or object array, to observed data,
%by accumulating sufficient statistics from the data,
%for later updating of the object by method adaptSet.
%
%Usage:
%First obtain the storage data structure aState from method adaptStart.
%Then, adaptAccum can be called several times with different observation data subsets.
%The aState data structure must be saved externally between calls to adaptAccum.
%Finally, the DiscreteD object is updated by method adaptSet.
%
%Input:
%pD= a DiscreteD object or multidim array of DiscreteD objects
%obsData= row vector with observed scalar samples,
% each assumed to be drawn from one of the DiscreteD objects
%obsWeight= (optional) matrix with weight factors, one column for each sample in obsData,
% and one row for each object in the DiscreteD array.
% size(obsWeight)== [length(pD(:)), size(obsData,2)]
% obsWeight must have consistent values for all calls.
% No obsWeight given <=> all weights=1.
%aState= accumulated adaptation state preserved from previous calls,
% first obtained from method adaptStart
%
%Result:
%aState= accumulated adaptation data, incl. this observation data set.
%
%Arne Leijon 2011-08-29 tested
function aState=adaptAccum(pD,aState,obsData,obsWeight)
if size(obsData,1) > 1
error('DiscreteD object: only scalar data');
end;
obsData=round(obsData);%quantize to integer values
if min(obsData)<1
error('Data samples out of range');
end;
maxObs=max(obsData);
nData=size(obsData,2);%number of given samples
nObj=numel(pD);%n of objects in array
if nargin<4%no external obsWeight given
obsWeight=ones(nObj,nData);%use all data with equal weight
if nObj>1
warning('Several DiscreteD objects with same training data?');
end;
end;
for i=1:nObj%for all objects
maxM=max(maxObs,length(pD(i).ProbMass));
M=size(aState(i).sumWeight,1);%previous max observed data value
if M<maxM
aState(i).sumWeight=[aState(i).sumWeight;zeros(maxM-M,1)];%extend size as needed
end;
for m=1:maxM%each possible observed value
aState(i).sumWeight(m)=aState(i).sumWeight(m)+sum(obsWeight(i,obsData==m),2);
end;
end;
|
github
|
txzhao/QbH-Demo-master
|
init.m
|
.m
|
QbH-Demo-master/scripts/@DiscreteD/init.m
| 1,531 |
utf_8
|
e68d9547cf0f68b4958d54442b76079d
|
%pD=init(pD,x);
%initializes DiscreteD object or array of such objects
%to conform with a set of given observed data values.
%The agreement is crude, and should be further refined by training,
%using methods adaptStart, adaptAccum, and adaptSet.
%
%Input:
%pD= a single DiscreteD object or multidim array of GaussD objects
%x= row vector with observed data samples
%
%Result:
%pD= initialized DiscreteD object or multidim DiscreteD array
% size(pD)== same as input
%
%Method:
%For a single DiscreteD object: Set ProbMass using all observations.
%For a DiscreteD array: Use all observations for each object,
% and increase probability P[X=i] in pD(i),
%This is crude, but there is no general way to determine
% how "close" observations X=m and X=n are,
% so we cannot define "clusters" in the observed data.
%
%Arne Leijon 2009-07-21
function pD=init(pD,x)
%sizObj=size(pD);
nObj=numel(pD);
if size(x,1)>1
error('DiscreteD object can have only scalar data');end;
x=round(x);
maxObs=max(x);
%collect observation frequencies
fObs=zeros(maxObs,1);%observation frequencies
for m=1:maxObs
fObs(m)=1+sum(x==m);%no zero frequencies
end;
if nObj==1
pD.ProbMass=fObs;
else
if nObj>maxObs
warning('Some DiscreteD objects initialized equal');
end;
for i=1:nObj
m=1+mod(i-1,maxObs);%obs value to be emphasized
p=fObs;
p(m)=2*p(m);%what emphasis factor to use???
pD(i).ProbMass=p;
end;
end;
|
github
|
txzhao/QbH-Demo-master
|
adaptSet.m
|
.m
|
QbH-Demo-master/scripts/@DiscreteD/adaptSet.m
| 1,310 |
utf_8
|
34015b2276663c72b6439b33c8f1065c
|
%pD=adaptSet(pD,aState)
%method to finally adapt a DiscreteD object
%using accumulated statistics from observed data.
%
%Input:
%pD= DiscreteD object or array of such objects
%aState= accumulated statistics from previous calls of adaptAccum
%
%Result:
%pD= adapted version of the DiscreteD object
%
%Theory and Method:
%From observed sample data X(n), n=1....N, we are using the
%accumulated sum of relative weights (relative frequencies)
%
%We have an accumulated weight (column) vector
%sumWeight, with one element for each observed integer value of Z=round(X):
%sumWeight(z)= sum[w(z==Z(n))]
%
%Arne Leijon 2011-08-29, tested
% 2012-06-12, modified use of PseudoCount
function pD=adaptSet(pD,aState)
for i=1:numel(pD)%for all objects in the array
aState(i).sumWeight=aState(i).sumWeight+pD(i).PseudoCount;%/length(aState(i).sumWeight);
%Arne Leijon, 2012-06-12: scalar PseudoCount added to each sumWeight element
%Reasonable, because a Jeffreys prior for the DiscreteD.Weight is
%equivalent to 0.5 "unobserved" count for each possible outcome of the DiscreteD.
pD(i).ProbMass=aState(i).sumWeight;%direct ML estimate
% pD(i).ProbMass=pD(i).ProbMass./sum(pD(i).ProbMass);%normalize probability mass sum
% normalized by DiscreteD.set.ProbMass
end;
|
github
|
txzhao/QbH-Demo-master
|
prob.m
|
.m
|
QbH-Demo-master/scripts/@DiscreteD/prob.m
| 1,350 |
utf_8
|
3c9694ec22c8663ba9577c76bace7113
|
%[p,logS]=prob(pD,Z)
%method to give the probability of a data sequence,
%assumed to be drawn from given Discrete Distribution(s).
%
%Input:
%pD= DiscreteD object or array of DiscreteD objects
%Z= row vector with data assumed to be drawn from a Discrete Distribution
% (Z may be real-valued, but is always rounded to integer values)
%
%Result:
%p= array with probability values for each element in Z,
% for each given DiscreteD object
% size(p)== [length(pD),size(x,2)], if pD is one-dimensional vector
% size(p)== [size(pD),size(x,2)], if pD is multidim array
%logS= scalar log scalefactor, for HMM compatibility, always==0
%
%Arne Leijon 2005-10-06 tested
function [p,logS]=prob(pD,Z)
if size(Z,1)>1
error('Data must be row vector with scalar values');end;
pDsize=size(pD);%size of DiscreteD array
pDlength=prod(pDsize);%number of DiscreteD objects
nZ=size(Z,2);%number of observed data
p=zeros(pDlength,nZ);%zero prob if Z is out of range
Z=round(Z);%make sure it is integer
for i=1:pDlength%for all objects in pD
iDataOK=(Z>=1) & (Z <=length(pD(i).ProbMass));%within range of the DiscreteD
p(i,iDataOK )=pD(i).ProbMass(Z(iDataOK))';
end;
if pDlength>1
p=squeeze(reshape(p,[pDsize,nZ]));%restore array format
end;
logS=0;%always no scaling, only for compatibility with other ProbDistr classes
|
github
|
txzhao/QbH-Demo-master
|
finiteDuration.m
|
.m
|
QbH-Demo-master/scripts/@MarkovChain/finiteDuration.m
| 450 |
utf_8
|
f56a08b101840c331ce18109640c62cf
|
%fd=finiteDuration(mc)
% tests if a given MarkovChain object has finite duration.
%
%Input:
%mc= single MarkovChain object
%
%Result:
%fd= true, if duration is finite.
%
%Arne Leijon 2009-07-19 tested
function fd=finiteDuration(mc)
fd=size(mc.TransitionProb,2)==size(mc.TransitionProb,1)+1;%first condition
if fd
%we use full() just because left-right TransitionProb may be stored as sparse)
fd=full(sum(mc.TransitionProb(:,end)))>0;
end;
|
github
|
txzhao/QbH-Demo-master
|
adaptStart.m
|
.m
|
QbH-Demo-master/scripts/@MarkovChain/adaptStart.m
| 468 |
utf_8
|
366c886f8600a036eead166ba927a598
|
%aS=adaptStart(mc)
% initialises adaptation data structure,
% to be saved externally between subsequent calls to method adaptAccum.
%
%Input:
%mc= single MarkovChain object
%
%Result:
%aS= initialised adaptation data structure.
%
%Arne Leijon 2004-11-10 tested
function aS=adaptStart(mc)
aS.pI=zeros(size(mc.InitialProb));%for sum of P[S(1)=j | each training sub-sequence]
aS.pS=zeros(size(mc.TransitionProb));%for sum of P[S(t)=i & S(t+1)=j | all training data]
|
github
|
txzhao/QbH-Demo-master
|
adaptAccum.m
|
.m
|
QbH-Demo-master/scripts/@MarkovChain/adaptAccum.m
| 3,245 |
utf_8
|
c928e31dfb69d14aaaf35a1820cd4e5c
|
%[aState,gamma,lP]=adaptAccum(mc,aState,pX)
%method to adapt a single MarkovChain object to observed data,
%by accumulating sufficient statistics from the data,
%for later updating of the object by method adaptSet.
%
%Usage:
%First obtain the storage data structure aState from method adaptStart.
%Then, adaptAccum can be called several times with different observation data subsets.
%The aState data structure must be saved externally between calls to adaptAccum.
%Finally, the MarkovChain object is updated by method adaptSet.
%
%Input:
%mc= single MarkovChain object
%pX= matrix prop. to state-conditional observation probabilites, calculated externally,
%pX(j,t)= ScaleFactor* P( X(t)= observed x(t) | S(t)= j ); j=1..N; t=1..T
% Must be pre-calculated externally.
% ScaleFactor is known only externally
%aState= accumulated adaptation state from previous calls
%
%Result:
%aState= accumulated adaptation state, incl. this step,
% must be saved externally until next call
%aState.pI= accumulated sum of P[S(1)=j | each training sub-sequence]
%aState.pS= accumulated sum of P[S(t)=i & S(t+1)=j | all training data]
%gamma= conditional state probability matrix, with elements
%gamma(i,t)=P[ S(t)= i | pX for complete observation sequence]
% returned for external use.
%lP= scalar log(Prob(observed sequence)),
% for external use, to save computation.
% (NOT including external ScaleFactor of given pX)
%
%Method: Results of forward-backward algorithm
% are combined with Baum-Welch update rules.
%
%Ref: Arne Leijon (200x): Pattern Recognition
% Rabiner (1989): Tutorial on HMM. Proc IEEE, 77, 257-286.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Elegant solution provided by Niklas Bergstrom, 2008
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [aState,gamma,lP]=adaptAccum(mc,aState,pX)
T=size(pX,2);
nStates=mc.nStates;%Arne Leijon, 2011-08-03
% Fetch variables from the markov chain
A=mc.TransitionProb;
% Get the scaled forward and backward variables
[alfaHat c] = forward(mc,pX);
betaHat = backward(mc,pX,c);
% Calculate gamma
gamma = alfaHat.*betaHat.*repmat(c(1:T),nStates,1);
% Initial probabilities, aState.pI += gamma(t=1)
aState.pI = aState.pI + gamma(:,1);
% Calculate xi for the current sequence r
% xi(i,j,t) = alfaHat(i,t)*A(i,j)*pX(j,t+1)*betaHat(j,t+1)
% First it's possible to multiply pX and betaHat element wise since they
% correspond to each other
pXbH = pX(:,2:end).*betaHat(:,2:end);
% Then multiply alfaHat with the transpose of the result in order to get
% a matrix of size nStates x nStates with each element summed over t
aHpXbH = alfaHat(:,1:T-1)*pXbH';
% Finally multiply element wise with the previous transition probabilities
xi = aHpXbH.*A(:,1:nStates);
% Add the result to the accumulating variable, aState.pS += xi
aState.pS(:,1:nStates) = aState.pS(:,1:nStates) + xi;
% For finite duration HMM
if(finiteDuration(mc))
aState.pS(:,nStates+1) = aState.pS(:,nStates+1) + alfaHat(:,T).*betaHat(:,T)*c(T);
end
% Calculate log probability for observing the current sequence given the
% current hmm
lP = sum(log(c));%scalar sum
|
github
|
txzhao/QbH-Demo-master
|
adaptSet.m
|
.m
|
QbH-Demo-master/scripts/@MarkovChain/adaptSet.m
| 1,325 |
utf_8
|
cfde9d8065852cfca77a09199016915d
|
%mc=adaptSet(mc,aState)
%method to finally adapt a single MarkovChain object
%using accumulated statistics from observed training data sets.
%
%Input:
%mc= single MarkovChain object
%aState= struct with accumulated statistics from previous calls of adaptAccum
%
%Result:
%mc= adapted version of the MarkovChain object
%
%Method:
%We have accumulated, in aState:
%pI= vector of initial state probabilities, with elements
% pI(i)= scalefactor* P[S(1)=i | all training sub-sequences]
%pS= state-pair probability matrix, with elements
% pS(i,j)=scalefactor* P[S(t)=i & S(t+1)=j | all training data]
%These data directly define the new MarkovChain, after necessary normalization.
%
%Ref: Arne Leijon (20xx) Pattern Recognition, KTH-SIP
%
%Arne Leijon 2004-11-10 tested
% 2011-08-02 keep sparsity
function mc=adaptSet(mc,aState)
if issparse(mc.InitialProb)%keep the sparsity structure
mc.InitialProb=sparse(aState.pI./sum(aState.pI));%normalised
else
mc.InitialProb=aState.pI./sum(aState.pI);%normalised
end;
if issparse(mc.TransitionProb)%keep the sparsity structure
mc.TransitionProb=sparse(aState.pS./repmat(sum(aState.pS,2),1,size(aState.pS,2)));%normalized
else
mc.TransitionProb=aState.pS./repmat(sum(aState.pS,2),1,size(aState.pS,2));%normalized
end;
end
|
github
|
txzhao/QbH-Demo-master
|
logprob.m
|
.m
|
QbH-Demo-master/scripts/@MarkovChain/logprob.m
| 1,280 |
utf_8
|
fdbdaa81b486e2b4dd249ed545208d99
|
%lP=logprob(mc, S)
%calculates log probability of complete observed state sequence
%
%Input:
%mc= the MarkovChain object(s)
%S= row vector with integer state-index sequence.
% For a finite-duration Markov chain,
% S(end) may or may not be the END state flag = nStates+1.
%
%Result:
%lP= vector with log probabilities
% length(lP)== numel(mc)
%
%Arne Leijon, 2009-07-23
function lP=logprob(mc, S)
if isempty(S)
lP=[];
return;
end;
if any(S<0) || any(S ~= round(S)) %not a proper index vector
lP=repmat(-Inf,size(mc));
return;
end
lP=zeros(size(mc));%space
fromS=S(1:end-1);%from S(t)
toS=S(2:end);%to S(t+1)
for i=1:numel(mc)
if S(1)>length(mc(i).InitialProb)
lP(i)=-Inf;%non-existing initial state index
else
lP(i)=log(mc(i).InitialProb(S(1)));%Initial state
end;
if ~isempty(fromS)
if max(fromS)> mc(i).nStates || S(end)>size(mc(i).TransitionProb,2)
lP(i)=-Inf;%encountered a non-existing state index
else
iTrans=sub2ind(size(mc(i).TransitionProb),fromS,toS);
lP(i)=lP(i)+sum(log(mc(i).TransitionProb(iTrans)));
end;
end;
end
end
|
github
|
txzhao/QbH-Demo-master
|
adaptStart.m
|
.m
|
QbH-Demo-master/scripts/@GaussD/adaptStart.m
| 888 |
utf_8
|
ba37a1185528355dc6b4272ea2c74620
|
%aState=adaptStart(pD)
%starts GaussD object adaptation to observed data,
%by initializing accumulator data structure for sufficient statistics,
%to be used in subsequent calls to method adaptAccum and adaptSet.
%
%Input:
%pD= GaussD object or array of GaussD objects
%
%Result:
%aState= data structure to be used by methods adaptAccum and adaptSet.
%
%Theory is discussed in method adaptSet
%
%Arne Leijon 2005-11-16 tested
function aState=adaptStart(pD)
nObj=numel(pD);
aState=repmat(struct('sumDev',0,'sumSqDev',0,'sumWeight',0),nObj,1);%init storage
for i=1:nObj%one storage set for each object in the array
dSize=length(pD(i).Mean);
aState(i).sumDev=zeros(dSize,1);%weighted sum of observed deviations from OLD mean
aState(i).sumSqDev=zeros(dSize,dSize);%matrix with sum of square deviations from OLD mean
aState(i).sumWeight=0;%sum of weight factors
end;
|
github
|
txzhao/QbH-Demo-master
|
adaptAccum.m
|
.m
|
QbH-Demo-master/scripts/@GaussD/adaptAccum.m
| 2,202 |
utf_8
|
4c1025eee7755cca5152ee32a6d44753
|
%aState=adaptAccum(pD,aState,obsData,obsWeight)
%method to adapt GaussD object to observed data,
%by accumulating sufficient statistics from the data,
%for later updating of the object by method adaptSet.
%
%Usage:
%First obtain the storage data structure aState from method adaptStart.
%Then, adaptAccum can be called several times with different observation data subsets.
%The aState data structure must be saved externally between calls to adaptAccum.
%Finally, the GaussD object is updated by method adaptSet.
%
%Input:
%pD= a GaussD object or multidim array of GaussD objects
%obsData= matrix with observed column vectors,
% each assumed to be drawn from one of the GaussD objects
%obsWeight= (optional) matrix with weight factors, one column for each vector in obsData,
% and one row for each object in the GaussD array.
% size(obsWeight)== [length(pD(:)), size(obsData,2)]
% obsWeight must have consistent values for all calls.
% No obsWeight given <=> all weights=1.
%aState= accumulated adaptation state preserved from previous calls,
% first obtained from method adaptStart
%
%Result:
%aState= accumulated adaptation data, incl. this observation data set.
%
%Arne Leijon 2005-11-16 NOT tested for full covariance matrix
function aState=adaptAccum(pD,aState,obsData,obsWeight)
[dSize,nData]=size(obsData);%dataSize, number of given vector samples
nObj=numel(pD);%n of GaussD objects in array
if nargin<4%no external obsWeight given
if nObj==1
obsWeight=ones(nObj,nData);%use all data with equal weight
else
obsWeight=prob(pD,obsData);%assign weight to each GaussD object
obsWeight=obsWeight./repmat(sum(obsWeight),nObj,1);%normalize
%obsWeight(i,t)= P(objS(t)= i | X(t))
end;
end;
for i=1:nObj%for all GaussD objects
Dev=obsData-repmat(pD(i).Mean,1,nData);%deviations from old mean
wDev=Dev.*repmat(obsWeight(i,:),dSize,1);%weighted -"-
aState(i).sumDev=aState(i).sumDev+sum(wDev,2);%for later mean estimationz
aState(i).sumSqDev=aState(i).sumSqDev+Dev*wDev';%for later covar. estim.
aState(i).sumWeight=aState(i).sumWeight+sum(obsWeight(i,:));
end;
|
github
|
txzhao/QbH-Demo-master
|
init.m
|
.m
|
QbH-Demo-master/scripts/@GaussD/init.m
| 4,062 |
utf_8
|
6ac396786af0110b43e3a2b56b07cd24
|
%[pD,iOK]=init(pD,x);
%initializes GaussD object or array of GaussD objects
%to conform with a set of given observed vectors.
%The agreement is very crude, and should be refined by training,
%using methods adaptStart, adaptAccum, and adaptSet.
%
%*****REQUIRES: VQ class ********
%
%Input:
%pD= a single GaussD object or multidim array of GaussD objects
%x= matrix with observed vectors stored columnwise
%
%Result:
%pD= initialized GaussD object or multidim GaussD array
% size(pD)== same as input
%iOK= logical array with element== 1, where corresponding
% pD element was properly initialized,
% and ==0, if there was not enough data for good initialization.
%
%Method:
%For a single GaussD object: set Mean and Variance, based on all observations,
% Previous AllowCorr property is preserved,
% but only diagonal covariance values are initialized,
% because there may not be enough data to set complete cov matrix.
%For a GaussD array: each element initialized to observation sub-set.
% Use crude VQ initialization:
% set Mean vectors at VQ cluster centers,
% and set Variance to variance within each VQ cell.
%
%Arne Leijon 2006-04-21 tested
% 2008-10-09, var() change for compatibility with Matlab v.6.5
% 2009-07-20, changed for Matlab R2008a class definitions
% 2011-05-26, minor cleanup
function [pD,iOK]=init(pD,x)
nObj=numel(pD);
iOK=zeros(size(pD));%space for success indicators
%dSize=size(x,1);
if nObj>size(x,2)
error('Too few data vectors');end;
if size(x,2)==1%var cannot be estimated
warning('GaussD:Init:TooFewData','Only one data point: default variance =1');
varX=ones(size(x));%default variance =1
iOK(1)=0;%variance incorrect
else
% varX=var(x,1,2);%ML (biased) estim var of all observed data
varX=var(x,1,2);%ML (biased) estim var of all observed data
iOK(1)=1;%OK
end;
if nObj==1
pD.Mean=mean(x,2);
if allowsCorr(pD)%set Covariance, to still allow correlations
pD.Covariance=diag(varX);
else
pD.Variance=varX;%set variance
end;
else
% SD=SD./nObj;%assuming evenly spread, maybe this is too small???
% m=selectRandom(x,nObj);
%This method often worked OK, but sometimes very odd start
%
%Use VQ methods instead,
%although first test with VQ method locked on a local maximum.
xVQ=create(VQ,x,nObj);%make VQ
xCenters=xVQ.CodeBook;%VQ centroids
xCode=encode(xVQ,x);%nearest codebook index for each vector
for i=1:nObj
nData=sum(xCode==i);%usable observations for this cluster
if nData<=1%actually ==1: VQ cannot give zero data points
warning('GaussD:Init:TooFewData',['Too few data for GaussD no.',num2str(i)]);
iOK(i)=0;%variance incorrect
%simply use previous (first=total) varX value again
else
% varX=var(x(:,xCode==i),1,2);%variance of VQ sub-cluster
varX=var(x(:,xCode==i),1,2);%variance of VQ sub-cluster
iOK(i)=1;%OK
end;
%use only diag variances, because there may not be enough data to
%estimate all correlations, and cov matrix might become singular
pD(i).Mean=xCenters(:,i);
if allowsCorr(pD(i))
pD(i).Covariance=diag(varX);%full cov
else
pD(i).Variance=varX;%diag cov
end;
end;
%Another attempt to use random initialization and rely on later EM training.
%This is much slower than VQ init,
%but sometimes found correct global maximum, when the VQ did not!
%Arne Leijon, 2004-11-18
%
%For this method, we just throw out some random points near global mean point:
% SD=SD./2;%???????
% xCenter=mean(x,2);
% xCenters=rand(GaussD(xCenter,SD),nObj);%random points near center
% for i=1:nObj
% pD(i)=set(pD(i),'Mean',xCenters(:,i),'StDev',SD);
% end;
end;
% function r=selectRandom(x,n);
% nx=size(x,2);
% nr=round([1:n]*nx./n);
% r=x(:,nr);
|
github
|
txzhao/QbH-Demo-master
|
adaptSet.m
|
.m
|
QbH-Demo-master/scripts/@GaussD/adaptSet.m
| 3,179 |
utf_8
|
c0326b7bc4a99562959e208f63675714
|
%pD=adaptSet(pD,aState)
%method to finally adapt a GaussD object
%using accumulated statistics from observed data.
%
%Input:
%pD= GaussD object or array of GaussD objects
%aState= accumulated statistics from previous calls of adaptAccum
%
%Result:
%pD= adapted version of the GaussD object
%
%Theory and Method:
%From observed sample data X(n), n=1....N, we are using the deviations
% Z(n)=X(n)-myOld, where myOld is the old mean.
%Obviously, E[Z(n)]= E[X(n)]-myOld, and cov[Z(n)]=cov[X(n)]
%
%We have accumulated weighted deviations:
%sumDev= sum[w(n).*Z(n)]
%sumSqDev= sum[w(n).* (Z(n)*Z(n)')]
%sumWeight= sum[w(n)]
%
%Here, weight factor w(n) is the probability that the observation X(n)
% was actually drawn from this GaussD.
%
%To obtain a new unbiased estimate of the GaussD true MEAN, we see that
% E[sumDev]=sum[w(n)] .*E[Z(n)].
%Thus, an unbiased estimate of the MEAN is
%newMean= myOld + sumDev/sumWeight;
%
%To obtain a new estimate of the GaussD covariance,
% we first calculate sq deviations from the new sample MEAN, as:
% Y(n)= Z(n) - sumDev/sumWeight
% S2= sum[w(n).* (Y(n)*Y(n)')]=
% = sumSqDev - sumDev*sumDev'./sumWeight
%The ML estimate of the variance is then
% covEstim= S2./sumWeight;
% (If all w(n)=1, this is the usual newVarML=S2/N)
%
%However, this UNDERESTIMATES the GaussD true VARIANCE, as
% E[S2]= var[Z(n)] .* (sumWeight - sumSqWeight/sumWeight)
%Therefore, an unbiased variance estimate would be, instead,
%newVar= S2/(sumWeight - sumSqWeight/sumWeight)
% (If all w(n)=1, this is the usual estimate newVar= S2/(N-1) )
%However, the purpose of the GaussD training is usually not to estimate
%the covariance parameter in isolation,
%but rather to make the total density function fit the training data.
%For this purpose, the ML mean and covariance are indeed optimal.
%
%Arne Leijon 2005-11-16 tested
%Arne Leijon 2010-07-30, corrected for degenerate case with only one data point
% In this case, we just set StDev=Inf,
% to effectively kill the affected GaussD object.
function pD=adaptSet(pD,aState)
for i=1:numel(pD)%for all GaussD objects
if aState(i).sumWeight>max(eps(pD(i).Mean))%We had at least some data
pD(i).Mean=pD(i).Mean+ aState(i).sumDev/aState(i).sumWeight;%new mean value
S2=aState(i).sumSqDev-(aState(i).sumDev*aState(i).sumDev')./aState(i).sumWeight;%sqsum around new mean
covEstim=S2./aState(i).sumWeight;%ML covariance estimate
if any(diag(covEstim)<eps(pD(i).Mean))%near zero, or negative
warning('GaussD:ZeroVar',['Not enough data for GaussD #',num2str(i),'. StDev forced to Inf']);
covEstim=diag(repmat(Inf,size(pD(i).Mean)));
%Force ZERO probability for this GaussD, for any data
end;
if allowsCorr(pD(i))
pD(i)=setCov(pD(i),covEstim);%adapt full covariance matrix
else
pD(i).StDev=sqrt(diag(covEstim));%keep it diagonal
end;
end;%else no observations here, do nothing
end;
|
github
|
txzhao/QbH-Demo-master
|
logprob.m
|
.m
|
QbH-Demo-master/scripts/@GaussD/logprob.m
| 1,536 |
utf_8
|
b605f3e2165466af2693cc2cbfb225b3
|
%logP=logprob(pD,x) gives log(probability densities) for given vectors
%assumed to be drawn from a given GaussD object
%
%Input:
%pD= GaussD object or array of GaussD objects
%x= matrix with given vectors stored columnwise
%
%Result:
%logP= log(probability densities for x)
% size(logP)== [numel(pD),size(x,2)]
%exp(logP)= true probability densities for x
%
%The log representation is useful because the probability densities may be
%extremely small for random vectors with many elements
%
%Arne Leijon 2005-11-16 tested
function logP=logprob(pD,x)
%pDsize=size(pD);%size of GaussD array
nObj=numel(pD);%number of GaussD objects
nx=size(x,2);%number of observed vectors
logP=zeros(nObj,nx);%space or result
for i=1:nObj%for all GaussD objects in pD
dSize=length(pD(i).Mean);%GaussD random vector size
if dSize==size(x,1)%observed vector size OK
z=pD(i).CovEigen'*(x-repmat(pD(i).Mean,1,nx));%transform to uncorrelated zero-mean elements
z=z./repmat(pD(i).StDev,1,nx);%and normalized StDev
logP(i,:)=-sum(z.*z,1)/2;%normalized Gaussian exponent
logP(i,:)=logP(i,:)-sum(log(pD(i).StDev))-dSize*log(2*pi)/2;%include log(determinant) scale factor
else
warning('GaussD:WrongDataSize','Incompatible data size');
logP(i,:)=repmat(-Inf,1,nx);%zero probability
end;
end;
%*** reshape removed 2011-05-26, for compatibility. Arne Leijon
% if nObj>1
% logP=squeeze(reshape(logP,[pDsize,nx]));%restore array format
% end;
|
github
|
txzhao/QbH-Demo-master
|
rand.m
|
.m
|
QbH-Demo-master/scripts/@GaussD/rand.m
| 887 |
utf_8
|
0040c8e3bac1a02ac5ab547250da5bf4
|
%R=rand(pD,nData) returns random vectors drawn from a single GaussD object.
%
%Input:
%pD= the GaussD object
%nData= scalar defining number of wanted random data vectors
%
%Result:
%R= matrix with data vectors drawn from object pD
% size(R)== [length(pD.Mean), nData]
%
%Arne Leijon 2005-11-16 tested
% 2006-04-18 sub-state output added for compatibility
% 2009-07-20 sub-state output removed again
%function [R,U]=rand(pD,nData)%OLD version
function R=rand(pD,nData)
if length(pD)>1
error('This method works only for a single GaussD object');
end;
R=randn(length(pD.Mean),nData);%normalized independent Gaussian random variables
% if nargout>1
% U=zeros(1,nData);end;%GaussD has no sub-states
R=diag(pD.StDev)*R;%scaled to correct standard deviations
R=pD.CovEigen*R;%rotate to proper correlations
R=R+repmat(pD.Mean,1,nData);%translate to desired mean
|
github
|
MarcoSaerens/networkDLA_matlab-master
|
Alg_10_07_ForcedirectedLayoutGraph.m
|
.m
|
networkDLA_matlab-master/ToReview/Chapter10_GraphEmbedding/Alg_10_07_ForcedirectedLayoutGraph.m
| 4,271 |
utf_8
|
ba5ca9c2bc40a1f85943daedf9870531
|
function X = Alg_10_07_ForcedirectedLayoutGraph(W, w, X_0, a, r)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Authors: Gilissen & Edouard Lardinois, revised by Guillaume Guex (2017).
%
% Source: Francois Fouss, Marco Saerens and Masashi Shimbo (2016).
% "Algorithms and models for network data and link analysis".
% Cambridge University Press.
%
% Description: Computes (a, r) force-directed layout for a graph
%
% INPUT:
% -------
% - W: a n x n matrix containing edges weights a weighted,
% undirected graph G.
% - w: a n x 1 vector containing non-negative node weights
% - X_0: n x p initial position for the n nodes
% - a: the attraction power parameter
% - r: the repulsion power parameter
%
% OUTPUT:
% -------
% - X : the (n x p) data matrix containing the coordinates of the nodes
% on its rows.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Checks of arguments
[n, ~] = size(W);
% Check if W is a symmetric matrix
if ~isequal(W, W')
error('The edge weights matrix is not symmetric.')
end
% Check if W are positive
if min(min(W)) < 0
error('The edge weights matrix must be a non-negative.')
end
% Check if w are positive
if min(min(w)) < 0
error('The node weights vector must be a non-negative.')
end
% Check if w correspond to W
n_w = length(w);
if n ~= n_w
error('The node weight vector length must correspond to the edge weights matrix')
end
% Check the dimension of Initial matrix
[n_0, p] = size(X_0);
if n ~= n_0
error('Input X_0 must have a number of rows equal to the size of W')
end
%% Algorithm
% Initialize the embedding
X = X_0;
energy_prev = realmax;
energy = 1e15;
B = eye(n*p);
gradient_unfold_prev = false;
while abs(energy_prev - energy) > 1e-6
% Store previous energy value
energy_prev = energy;
% Compute the gradient
Gradient_X = zeros(n, p);
random_index = randperm(n);
for k = random_index
for j = 1:n
if j ~= k
xk_xj = X(k, :) - X(j, :);
norm_xk_xj = (norm(xk_xj) + 1e-20);
Gradient_X(k, :) = Gradient_X(k, :) + W(j, k)*(norm_xk_xj^(a-1) - w(j)*w(k)*norm_xk_xj^(r-1))*xk_xj;
end
end
end
%%% Perform a min search of a BFGS quasi-Newton step on potential energy
% Unfold gradient
gradient_unfold = Gradient_X(:);
% Compute B (if this is not the first step)
if gradient_unfold_prev ~= false
y = gradient_unfold - gradient_unfold_prev;
B = B + y*y' / (y'*dir_unfold) - (B*dir_unfold)*(dir_unfold'*B) / (dir_unfold'*B*dir_unfold);
end
% Obtain direction
dir_unfold = linsolve(B, -gradient_unfold);
% Fold
Dir_X = reshape(dir_unfold, [n, p]);
% Find stepsize
[beta_opt, ~] = fminsearch(@(beta) potential_energy(W, w, X, Dir_X, a, r, beta), 1e-2);
% Make step
X = X + beta_opt*Dir_X;
% Store new energy
energy = potential_energy(W, w, X, Dir_X, a, r, 0);
end
end
function e = potential_energy(W, w, X, Dir_X, a, r, beta)
[n, ~] = size(X);
X_beta = X + beta*Dir_X;
e = 0;
for i = 1:(n-1)
for j = (i+1):n
norm_xi_xj = norm(X_beta(i, :) - X_beta(j, :)) + 1e-20;
if a == -1
e = e + W(i, j) * log(norm_xi_xj);
else
e = e + W(i, j) * norm_xi_xj^(a + 1) / (a + 1);
end
if r == -1
e = e - w(i)*w(j) * log(norm_xi_xj);
else
e = e - w(i)*w(j) * norm_xi_xj^(r + 1) / (r + 1);
end
end
end
end
|
github
|
MarcoSaerens/networkDLA_matlab-master
|
Alg_10_06_SpringNetworkLayout.m
|
.m
|
networkDLA_matlab-master/ToReview/Chapter10_GraphEmbedding/Alg_10_06_SpringNetworkLayout.m
| 3,796 |
utf_8
|
a24847cc20a410bd55af38f55fa8091d
|
function X = Alg_10_06_SpringNetworkLayout(D, X_0, l_0, k)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Authors: Gilissen & Edouard Lardinois, revised by Guillaume Guex (2017).
%
% Source: Francois Fouss, Marco Saerens and Masashi Shimbo (2016).
% "Algorithms and models for network data and link analysis".
% Cambridge University Press.
%
% Description: Computes the spring network layout of a graph.
%
% INPUT:
% -------
% - D: n x n symmetric all-pairs shortest-path distances matrix
% associated with graph G, providing distances Delta_ij
% for each pair of nodes i, j
% - X_0: n x p initial position for the n nodes
% - l_0: drawing length constant
% - k: global strength constant
%
% OUTPUT:
% -------
% - X : the (n x p) data matrix containing the coordinates of the nodes
% on its rows.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Checks of arguments
[n, ~] = size(D);
% Check if D is a symmetric matrix
if ~isequal(D, D')
error('The adjacency matrix is not symmetric.')
end
% Check the dimension of Initial matrix
[n_0, p] = size(X_0);
if n ~= n_0
error('Input X_0 must have a number of rows equal to the size of D')
end
% Check if display length is positive
if l_0 <= 0
error('Display length l_0 must be more than zero')
end
% Check if global strength constant is positive
if k <= 0
error('Global strength constant k must be more than zero')
end
%% Algorithm
% Set equilibrium length of each spring
L_0 = l_0 * D / max(max(D));
% Set stiffness of each spring
K_stiff = k ./ D.^2 ;
% Initialize the embedding
X = X_0;
energy_prev = realmax;
energy = 1e15;
B = eye(n*p);
gradient_unfold_prev = false;
while abs(energy_prev - energy) > 1e-6
% Store previous energy value
energy_prev = energy;
% Compute the gradient
Gradient_X = zeros(n, p);
for k = 1:n
for j = 1:n
if j ~= k
xk_xj = X(k, :) - X(j, :);
Gradient_X(k, :) = Gradient_X(k, :) + K_stiff(k,j) * (1 - L_0(k,j)/(norm(xk_xj) + 1e-20)) * xk_xj;
end
end
end
%%% Perform a min search of a BFGS quasi-Newton step on springs energy
% Unfold gradient
gradient_unfold = Gradient_X(:);
% Compute B (if this is not the first step)
if gradient_unfold_prev ~= false
y = gradient_unfold - gradient_unfold_prev;
B = B + y*y' / (y'*dir_unfold) - (B*dir_unfold)*(dir_unfold'*B) / (dir_unfold'*B*dir_unfold);
end
% Obtain direction
dir_unfold = linsolve(B, -gradient_unfold);
% Fold
Dir_X = reshape(dir_unfold, [n, p]);
% Find stepsize
[beta_opt, ~] = fminsearch(@(beta) energy_spring(K_stiff, L_0, X, Dir_X, beta), 1e-3);
% Make step
X = X + beta_opt*Dir_X;
% Store new energy
energy = energy_spring(K_stiff, L_0, X, Gradient_X, 0);
end
end
%% Energy spring function
function e = energy_spring(K_stiff, L_0, X, Dir_X, beta)
[n, ~] = size(X);
X_beta = X + beta*Dir_X;
e = 0;
for i = 1:(n-1)
for j = (i+1):n
e = e + K_stiff(i, j)/2 * ( norm(X_beta(i, :)-X_beta(j, :)) - L_0(i, j) )^2;
end
end
end
|
github
|
MarcoSaerens/networkDLA_matlab-master
|
Alg_10_05_LatentSocialMap.m
|
.m
|
networkDLA_matlab-master/ToReview/Chapter10_GraphEmbedding/Alg_10_05_LatentSocialMap.m
| 4,891 |
utf_8
|
076cc66bd262c1997e3ac511abf9853e
|
function X = Alg_10_05_LatentSocialMap(A, p, X_0)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Authors: Gilissen & Edouard Lardinois, revised by Guillaume Guex (2017).
%
% Source: Francois Fouss, Marco Saerens and Masashi Shimbo (2016).
% "Algorithms and models for network data and link analysis".
% Cambridge University Press.
%
% Description: Computes the latent social embedding of a graph.
%
% INPUT:
% -------
% - A : a (n x n) unweighted adjacency matrix associated with an undirected
% graph G.
% - p : a integer containing the number of dimensions kept for
% the embedding.
% - X_0 : a (n x p_0) initial position for the n nodes.
%
% OUTPUT:
% -------
% - X : the (n x p) data matrix containing the coordinates of the nodes
% for the embedding.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Checks of arguments
% Check if squared matrix
[n, m] = size(A);
if n ~= m
error('The adjacency matrix must be a square matrix')
end
% Check if symmetric matrix / graph is undirected
if ~isequal(A, A')
error('The adjacency matrix is not symmetric.')
end
% Check if binary matrix
if ~isequal(A, (A > 0))
error('The adjacency matrix is not binary.')
end
% Check the dimension of Initial matrix
[n_0, p_0] = size(X_0);
if n_0 ~= n
error('The initial position matrix must have a number of rows equal to the size of the adjacency matrix')
end
% Check if number of dimensions is pertinent
if p > p_0
error('The number of kept dimensions must be less or equal than to the number of dimensions');
end
%% Algorithm
% Setting initial parameters
alpha = 1;
X = X_0(:,1:p);
D = zeros(n);
Y_hat = zeros(n);
B = eye(n*p + 1);
gradient_unfold_prev = false;
minus_l_prev = realmax;
minus_l = 1e15;
% Main loop
while abs(minus_l_prev - minus_l) > 1e-4
% Store previous minus likelihood
minus_l_prev = minus_l;
for i = 1:n
for j = 1:n
% Compute the distances in the latent space
D(i, j) = sqrt( (X(i, :) - X(j, :)) * (X(i, :) - X(j, :))' );
% Compute the predicted pobability of each link
Y_hat(i, j) = 1 / ( 1 + exp( -alpha * (1 - D(i, j)) ) );
end
end
% Compute the gradient with respect to alpha
G_alpha_mat = (D - 1) .* (Y_hat - A);
G_alpha_mat(1:n+1:end) = 0;
gradient_alpha = 1/2 * sum(sum(G_alpha_mat));
% Compute the gradient with respect to position vector x_k
Gradient_X = zeros(n, p);
for k = 1:n
for j = 1:n
if j ~= k
xk_xj = (X(k, :) - X(j, :));
Gradient_X(k, :) = Gradient_X(k, :) + alpha * (Y_hat(k, j) - A(k, j)) * xk_xj / (norm(xk_xj) + 1e-20) ;
end
end
end
%%% Perform a min search of a BFGS quasi-Newton step on minus likelihood
% Unfold gradient
gradient_unfold = [gradient_alpha; Gradient_X(:)];
% Compute B (if this is not the first step)
if gradient_unfold_prev ~= false
y = gradient_unfold - gradient_unfold_prev;
B = B + y*y' / (y'*dir_unfold) - (B*dir_unfold)*(dir_unfold'*B) / (dir_unfold'*B*dir_unfold);
end
% Obtain direction
dir_unfold = linsolve(B, -gradient_unfold);
% Fold
dir_alpha = dir_unfold(1);
Dir_X = reshape(dir_unfold(2:end), [n, p]);
% Find stepsize
[beta_opt, ~] = fminsearch(@(beta) minus_likelihood(A, alpha, X, dir_alpha, Dir_X, beta), 0.5);
% Make step
X = X + beta_opt*Dir_X;
alpha = alpha + beta_opt*dir_alpha;
% Store new minus likelihood
minus_l = minus_likelihood(A, alpha, X, dir_alpha, Dir_X, 0);
end
end
%% Minus likelihood function
function minus_l = minus_likelihood(A, alpha, X, dir_alpha, Dir_X, beta)
n = size(X, 1);
X_beta = X + beta*Dir_X;
alpha_beta = alpha + beta*dir_alpha;
D = zeros(n);
for i=1:n
for j=1:n
D(i,j) = sqrt( (X_beta(i,:)-X_beta(j,:))*(X_beta(i,:)-X_beta(j,:))' );
end
end
components_of_likelihood = (alpha_beta * A .* (1 - D) - log(1 + exp(alpha_beta * (1 - D))) );
components_of_likelihood(1:n+1:end) = 0;
minus_l = - 1/2 * sum(sum(components_of_likelihood));
end
|
github
|
MarcoSaerens/networkDLA_matlab-master
|
Alg_08_07_LouvainMethod.m
|
.m
|
networkDLA_matlab-master/ToReview/Chapter08_DenseRegions/Alg_08_07_LouvainMethod.m
| 7,334 |
utf_8
|
b8688a9c52465d0f82da551516d8f79e
|
function U = Alg_08_07_LouvainMethod(A,mix)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Authors: Ilkka Kivimaki (2015),revised by Sylvain Courtain (2017).
% Direct source: Francois Fouss, Marco Saerens and Masashi Shimbo (2016).
% "Algorithms and models for network data and link analysis".
% Cambridge University Press.
%
% Performs the Louvain Method
%
% INPUT:
% ------
% - A weighted undirected graph containing n nodes
% - A, the n x n adjancencty matrix of the graph
%
% OUTPUT:
% -------
% - U, the n x m membership matrix.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% CHECK ERRORS:
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if nargin == 0
error('The algorithm needs an adjacency matrix as input!');
end
[N,n] = size(A);
if (N ~= n)
error('Non-square matrix A!');
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Step 1 : Local optimization
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Copy the matrix A before mix
Adj = A;
% Mix the nodes in random order:
if mix
N = size(A, 1);
randidx = randperm(N);
A = A(randidx, :);
A = A(:, randidx);
else
randidx = 1:N;
end
level=1;
% Initialize the binary membership matrix
N = size(A, 1); % N = # of nodes
assigns = 1:N; % Assign each node initially to its own cluster.
U = eye(N); % Cluster membership matrix
% Compute the modularity matrix
Q = ComputeModularityMatrix(A);
i=0; % Initialize node index
conv=1; % Initialize convergence counter
while conv<N % Repeat iterations until no move of a node occurs
i = i+1; % Choose node i
clust_i = assigns(i); % Which cluster does i belong to?
U_clust_i = U(:, clust_i); % Membership vector of i's cluster
% Contribution to modularity of having i in clust_i:
Qloss = U_clust_i'*Q(:, i) + Q(i, :)*U_clust_i - 2*Q(i, i);
% Compute the set of all ajacent clusters of node i
neigh_i = (Adj(i, :) > 0); % Neighbors of i (according to A)
clust_neigh_i = unique(assigns(neigh_i)); % Cluster nhood of i
clust_neigh_i = setdiff(clust_neigh_i, clust_i); % Don't consider the cluster of i
% Combine i with all neighboring clusters and find the best:
best_Qgain = -Inf; % Initialize best modularity change
for J=clust_neigh_i
% Contribution to modularity of having i in cluster J:
Qgain = U(:,J)'*Q(:, i) + Q(i, :)*U(:, J);
% Find the best move for node i
if Qgain > best_Qgain % Check if better than earlier
best_Qgain = Qgain;
best_J = J; % Save index
end
end
% Move i to cluster best_J, if it increases modularity:
if best_Qgain > max(0, Qloss)
assigns(i) = best_J;
U(i,clust_i) = 0;
U(i,best_J) = 1;
conv = 1; % Start the countdown from the beginning
% Otherwise increase the counter:
else
conv = conv+1;
end
% After node N, go back to node 1:
if i==N
i=0;
end
end
% Reorganize clusters, rebuild U and calculate cluster sizes:
[U, assigns] = LMBOP_reorganize(assigns);
% If the indexes were mixed, then reorganize them:
if mix
origassigns = 0*assigns; % allocation
for i = 1:N
origassigns(i) = assigns(randidx==i);
end
end
fullassigns = assigns';
while true
%% Step 2 and Convergence
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Construct the new graph by merging clusters:
A_merge = U'*A*U;
%% Step 1 : Local optimization
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Initialize the binary membership matrix
M = size(A_merge, 1); % N = # of nodes
newassigns = 1:M; % Assign each node initially to its own cluster.
U = eye(M); % Cluster membership matrix
level = level + 1;
% Compute the modularity matrix
Q = ComputeModularityMatrix(A_merge);
i=0; % Initialize node index
conv=1; % Initialize convergence counter
while conv<M % Repeat iterations until no move of a node occurs
i = i+1; % Choose node i
clust_i = newassigns(i); % Which cluster does i belong to?
U_clust_i = U(:, clust_i); % Membership vector of i's cluster
% Contribution to modularity of having i in clust_i:
Qloss = U_clust_i'*Q(:, i) + Q(i, :)*U_clust_i - 2*Q(i, i);
% Compute the set of all ajacent clusters of node i
neigh_i = (A_merge(i, :) > 0); % Neighbors of i (according to A)
clust_neigh_i = unique(newassigns(neigh_i)); % Cluster nhood of i
clust_neigh_i = setdiff(clust_neigh_i, clust_i); % Don't consider the cluster of i
% Combine i with all neighboring clusters and find the best:
best_Qgain = -Inf; % Initialize best modularity change
for J=clust_neigh_i
% Contribution to modularity of having i in cluster J:
Qgain = U(:, J)'*Q(:, i) + Q(i, :)*U(:, J);
% Find the best move for node i
if Qgain > best_Qgain % Check if better than earlier
best_Qgain = Qgain;
best_J = J; % Save index
end
end
% Move i to cluster best_J, if it increases modularity:
if best_Qgain > max(0, Qloss)
newassigns(i) = best_J;
U(i, clust_i) = 0;
U(i, best_J) = 1;
conv = 1; % Start the countdown from the beginning
% Otherwise increase the counter:
else
conv = conv+1;
end
% After node N, go back to node 1:
if i==M
i=0;
end
end
[U, newassigns] = LMBOP_reorganize(newassigns);
%% Convergence Step
% Announce the clustering in terms of the original graph:
oldassigns = fullassigns;
for i = 1:max(oldassigns)
J = (oldassigns == i);
fullassigns(J) = newassigns(i);
end
% Check convergence:
if fullassigns == oldassigns
U = LMBOP_reorganize(origassigns);
break;
end
% If not converged, continue:
% Reorganize clusters, rebuild U and compute cluster sizes:
[U, fullassigns] = LMBOP_reorganize(fullassigns);
% Reorganize according to original indices:
origassigns = fullassigns;
for i = 1:N
origassigns(i) = fullassigns(randidx==i);
end
end
function Q = ComputeModularityMatrix(A)
% Compute the modularity matrix
vol = sum(sum(A));
din = sum(A,1); % the indegree vector
dout = sum(A,2); % the outdegree vector
D = (dout * din)/vol;
Q = (A - D); % Compute the modularity matrix
function [U, newassigns] = LMBOP_reorganize(oldassigns)
N = numel(oldassigns);
U = zeros(N);
for i = 1:N,
U(:,i) = (oldassigns == i);
end;
n_of_clusters = numel(unique(oldassigns));
[clsizes, idx] = sort(sum(U), 'descend');
idx = idx(1:n_of_clusters);
U = U(:,idx);
newassigns = 0*oldassigns;
for i = 1:n_of_clusters
old_id = idx(i);
newassigns(oldassigns == old_id) = i;
end;
|
github
|
MarcoSaerens/networkDLA_matlab-master
|
Alg_04_02_Brandes.m
|
.m
|
networkDLA_matlab-master/ToReview/Chapter04_CentralityMeasures/Alg_04_02_Brandes/Alg_04_02_Brandes.m
| 4,966 |
ibm852
|
acadc590c403c640520a474836f74692
|
function bet = Alg_04_02_Brandes(C)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Author: Masashi Shimbo (2018).
%
% Source: François Fouss, Marco Saerens and Masashi Shimbo (2016).
% "Algorithms and models for network data and link analysis".
% Cambridge University Press.
%
% Description: Computes the vector containing the Freeman's centrality scores
% using Brandes's algorithm.
%
% INPUT:
% -------
% - C: the (n x n) cost matrix of a directed graph.
% To allow a sparse matrix to represent C, components 0 in C are taken
% as Inf; that is, corresponding edges are nonexistent.
%
% OUTPUT:
% -------
% - bet: The (n x 1) vector of betweenness scores.
%
% NOTE: Requires PriorityQueue.m.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
[n, nc] = size(C);
if (n ~= nc)
error('Cost matrix not square');
end
% Both 0 and +Inf in cost matrix C represent "no edge".
% C(C == Inf) = 0; % use 0 uniformly to represent "no edge" from now on.
bet = zeros(n, 1);
for i = 1:n
% pass 1: run Dijkstra's method with node i as the origin
[~, sigma, pred, S] = Dijkstra(C, i);
% pass 2: collect dependency scores
bet = collectDeps(bet, S, sigma, pred, i, n);
end
% N.B. According to the original definition of Freeman's betweenness,
% 'bet' must be halved for undirected graphs, but this process is omitted.
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [dist, sigma, pred, S] = Dijkstra(C, origin)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Description: A veriation of Dijkstra's shortest-path algorithm that
% computes all shortest paths from a given origin node.
%
% INPUT:
% -------
% - C : the (n x n) cost matrix of a directed graph
% : A 0 entry represents no edge exists between the corresponding nodes
% : (equivalent of +Inf)
% - origin : the initial node
%
% OUTPUT:
% -------
% - dist : (1 x n) vector of shortest distance to each node from origin
% - sigma: (1 x n) vector holding the number of shortest-distance paths
% to each node
% - pred : (n x n) binary matrix representing the shortest-path DAG
% pred(i, j) == 1 implies that node j is a predecessor of
% node i on a shortest path from the origin
% - S : list of nodes reachable from the origin, in decscending order
% of the distance from the origin (i.e., furthest first, origin
% last)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
n = size(C, 1); % number of nodes
% pred(k, :) = vector indicating the optimal predecessors of node k
pred = zeros(n, n); % pred = sparse(n, n); % for huge graphs, use sparse
sigma = zeros(1, n);
sigma(origin) = 1;
% '\delta' on the book
dist = Inf(1, n);
dist(origin) = 0;
S = []; % stack of "closed" nodes
Q = PriorityQueue(n);
Q.insert(origin, 0);
while Q.size ~= 0
[j, ~] = Q.extractMin;
S = [j, S];
% C(:, j) = 0; % optional; not sure how effective this is for speed up
% "relax" edges emanating from j
succs = (C(j, :) > 0 & C(j, :) ~= Inf);
altDist = C(j, :) + dist(j) * succs;
updatedOrTied = succs & (altDist <= dist);
updated = updatedOrTied & (altDist < dist);
for k = find(updated)
sigma(k) = 0;
pred(k, :) = 0;
if dist(k) == Inf
Q.insert(k, altDist(k));
else
Q.decreaseKey(k, altDist(k));
end
dist(k) = altDist(k);
end
pred(updatedOrTied, j) = 1;
sigma = sigma + sigma(j) * updatedOrTied;
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function bet = collectDeps(bet, S, sigma, pred, origin, n)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Description: Collect the dependency scores of nodes, traversing the
% shortest-path DAG backward towards the origin.
%
% INPUT:
% -------
% - bet : (n x 1) vector holding the betweenness scores of nodes.
% - sigma : (1 x n) vector holding the number of shortest-distance paths
% to each node.
% - pred : (n x n) matrix holding the set of predecessors along shortest
% paths representing a shortest-path DAG from the origin.
% - origin: the initial node.
% - S : list of nodes reachable from the origin, in decscending order
% of the distance from the origin (i.e., furthest node first).
%
% OUTPUT:
% -------
% - bet: updated vector of betweeness scores (n x 1).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
dep = zeros(1, n);
pred(:, origin) = 0;
for k = S
preds = find(pred(k, :));
dep(preds) = dep(preds) + (sigma(preds) / sigma(k)) * (1 + dep(k));
bet(k) = bet(k) + dep(k);
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
github
|
fangq/brain2mesh-master
|
polylinesimplify.m
|
.m
|
brain2mesh-master/polylinesimplify.m
| 1,639 |
utf_8
|
775bed53f6eef19868915b208d8f9c92
|
function [newnodes, len]=polylinesimplify(nodes, minangle)
%
% [newnodes, len]=polylinesimplify(nodes, minangle)
%
% Calculate a simplified polyline by removing nodes where two adjacent
% segment have an angle less than a specified limit
%
% author: Qianqian Fang (q.fang at neu.edu)
%
% input:
% node: an N x 3 array defining each vertex of the polyline in
% sequential order
% minangle:(optional) minimum segment angle in radian, if not given, use
% 0.75*pi
%
% output:
% newnodes: the updated node list; start/end will not be removed
% len: the length of each segment between the start and the end points
%
%
% -- this function is part of brain2mesh toolbox (http://mcx.space/brain2mesh)
% License: GPL v3 or later, see LICENSE.txt for details
%
if(nargin<2)
minangle=0.75*pi;
end
v=segvec(nodes(1:end-1,:), nodes(2:end,:));
ang=acos(max(min(sum(-v(1:end-1,:).*(v(2:end,:)),2),1),-1));
newnodes=nodes;
newv=v;
newang=ang;
idx=find(newang<minangle);
while(~isempty(idx))
newnodes(idx+1,:)=[];
newv(idx+1,:)=[];
newang(idx)=[];
idx=unique(idx-(0:(length(idx)-1))');
idx1=idx(idx<size(newnodes,1));
newv(idx1,:) =segvec(newnodes(idx1,:),newnodes(idx1+1,:));
idx1=idx(idx<size(newv,1));
newang(idx1) =acos(sum(-newv(idx1,:).*(newv(idx1+1,:)),2));
idx0=idx(idx>1);
newang(idx0-1)=acos(sum(-newv(idx0-1,:).*(newv(idx0,:)),2));
idx=find(newang<minangle);
end
if(nargout>1)
len=newnodes(1:end-1,:) - newnodes(2:end,:);
len=sqrt(sum(len.*len,2));
end
function v=segvec(n1, n2)
v=n2-n1;
normals=sqrt(sum(v.*v,2));
v=v./repmat(normals,1,size(v,2));
|
github
|
fangq/brain2mesh-master
|
brain1020.m
|
.m
|
brain2mesh-master/brain1020.m
| 16,256 |
utf_8
|
699e84568f37319e6530f1b69a9217cd
|
function [landmarks, curves, initpoints]=brain1020(node, face, initpoints, perc1, perc2, varargin)
%
% landmarks=brain1020(node, face)
% or
% landmarks=brain1020(node, face, [], perc1, perc2)
% landmarks=brain1020(node, face, initpoints)
% landmarks=brain1020(node, face, initpoints, perc1, perc2)
% [landmarks, curves, initpoints]=brain1020(node, face, initpoints, perc1, perc2, options)
%
% compute 10-20-like scalp landmarks with user-specified density on a head mesh
%
% author: Qianqian Fang (q.fang at neu.edu)
%
% == Input ==
% node: full head mesh node list
% face: full head mesh element list- a 3-column array defines face list
% for the exterior (scalp) surface; a 4-column array defines the
% tetrahedral mesh of the full head.
% initpoints:(optional) one can provide the 3-D coordinates of the below
% 5 landmarks: nz, iz, lpa, rpa, cz0 (cz0 is the initial guess of cz)
% initpoints can be a struct with the above landmark names
% as subfield, or a 5x3 array definining these points in the above
% mentioned order (one can use the output landmarks as initpoints)
% perc1:(optional) the percentage of geodesic distance towards the rim of
% the landmarks; this is the first number of the 10-20 or 10-10 or
% 10-5 systems, in this case, it is 10 (for 10%). default is 10.
% perc2:(optional) the percentage of geodesic distance twoards the center
% of the landmarks; this is the 2nd number of the 10-20 or 10-10 or
% 10-5 systems, which are 20, 10, 5, respectively, default is 20
% options: one can add additional 'name',value pairs to the function
% call to provide additional control. Supported optional names
% include
% 'display' : [1] or 0, if set to 1, plot landmarks and curves
% 'cztol' : [1e-6], the tolerance for searching cz that bisects
% saggital and coronal reference curves
% 'maxcziter' : [10] the maximum number of iterations to update
% cz to bisect both cm and sm curves
% 'baseplane' : [1] or 0, if set to 1, create the reference
% curves along the primary control points (nz,iz,lpa,rpa)
% 'minangle' : [0] if set to a positive number, this specifies
% the minimum angle (radian) between adjacent segments in
% the reference curves to avoid sharp turns (such as the
% dips near ear canals), this parameter will be
% passed to polylinesimplify to simplify the curve first.
% Please be noted that the landmarks generated with
% simplified curves may not land exactly on the surface.
%
% == Output ==
% landmarks: a structure storing all computed landmarks. The subfields
% include two sections:
% 1) 'nz','iz','lpa','rpa','cz': individual 3D positions defining
% the 5 principle reference points: nasion (nz), inion (in),
% left-pre-auricular-point (lpa), right-pre-auricular-point
% (rpa) and vertex (cz) - cz is updated from initpoints to bisect
% the saggital and coronal ref. curves.
% 2) landmarks along specific cross-sections, each cross section
% may contain more than 1 position. The cross-sections are
% named in the below format:
% 2.1: a fieldname starting from 'c', 's' and 'a' indicates
% the cut is along coronal, saggital and axial directions,
% respectively;
% 2.2: a name starting from 'pa' indicates the cut is along the
% axial plane acrossing principle reference points
% 2.3: the following letter 'm', 'a','p' suggests the 'medial',
% 'anterior' and 'posterior', respectively
% 2.4: the last letter 'l' or 'r' suggests the 'left' and
% 'right' side, respectively
% 2.5: non-medial coronal cuts are divided into two groups, the
% anterior group (ca{l,r}) and the posterior group
% (cp{lr}), with a number indicates the node spacing
% stepping away from the medial plane.
%
% for example, landmarks.cm refers to the landmarks along the
% medial-coronal plane, anterior-to-posteior order
% similarly, landmarks.cpl_3 refers to the landmarks along the
% coronal (c) cut plane located in the posterior-left side
% of the head, with 3 saggital landmark spacing from the
% medial-coronal reference curve.
% curves: a structure storing all computed cross-section curves. The
% subfields are named similarly to landmarks, except that
% landmarks stores the 10-? points, and curves stores the
% detailed cross-sectional curves
% initpoints: a 5x3 array storing the principle reference points in the
% orders of 'nz','iz','lpa','rpa','cz'
%
%
% == Example ==
% See brain2mesh/examples/SPM_example_brain.m for an example
% https://github.com/fangq/brain2mesh/blob/master/examples/SPM_example_brain.m
%
% == Dependency ==
% This function requires a pre-installed Iso2Mesh Toolbox
% Download URL: http://github.com/fangq/iso2mesh
% Website: http://iso2mesh.sf.net
%
% == Reference ==
% If you use this function in your publication, the authors of this toolbox
% apprecitate if you can cite the below paper
%
% Anh Phong Tran, Shijie Yan and Qianqian Fang, "Improving model-based
% fNIRS analysis using mesh-based anatomical and light-transport models,"
% Neurophotonics, 7(1), 015008, 2020
% URL: http://dx.doi.org/10.1117/1.NPh.7.1.015008
%
%
% -- this function is part of brain2mesh toolbox (http://mcx.space/brain2mesh)
% License: GPL v3 or later, see LICENSE.txt for details
%
if(nargin<2)
error('one must provide a head-mesh to call this function');
end
if(isempty(node) || isempty(face) || size(face,2)<=2 || size(node,2)<3)
error('input node must have 3 columns, face must have at least 3 columns');
end
if(nargin<5)
perc2=20;
if(nargin<4)
perc1=10;
if(nargin<3)
initpoints=[];
end
end
end
% parse user options
opt=varargin2struct(varargin{:});
showplot=jsonopt('display',1,opt);
baseplane=jsonopt('baseplane',1,opt);
tol=jsonopt('cztol',1e-6,opt);
dosimplify=jsonopt('minangle',0,opt);
maxcziter=jsonopt('maxcziter',10,opt);
if(nargin >=2 && ...
((isstruct(initpoints) && ~isfield(initpoints, 'iz')) ...
|| (~isstruct(initpoints) && size(initpoints,1)==3)))
if(isstruct(initpoints))
nz=initpoints.nz(:).';
lpa=initpoints.lpa(:).';
rpa=initpoints.rpa(:).';
else
nz=initpoints(1,:);
lpa=initpoints(2,:);
rpa=initpoints(3,:);
end
% This assume nz, lpa, rpa, iz are on the same plane to find iz on the head
% surface
pa_mid = mean([lpa;rpa]);
v0 = pa_mid-nz;
[iz, e0] = ray2surf(node, face, nz, v0, '>');
% To find cz, we assume that the vector from iz nz midpoint to cz is perpendicular
% to the plane defined by nz, lpa, and rpa.
iznz_mid = (nz+iz)*0.5;
v0 = cross(nz-rpa, lpa-rpa);
[cz, e0] = ray2surf(node, face, iznz_mid, v0, '>');
if(isstruct(initpoints))
initpoints.iz=iz;
initpoints.cz=cz;
else
initpoints=[initpoints(1,:); iz; initpoints(2:3,:); cz];
end
end
% convert initpoints input to a 5x3 array
if(isstruct(initpoints))
initpoints=struct('nz', initpoints.nz(:).','iz', initpoints.iz(:).',...
'lpa',initpoints.lpa(:).','rpa',initpoints.rpa(:).',...
'cz', initpoints.cz(:).');
landmarks=initpoints;
if(exist('struct2array','file'))
initpoints=struct2array(initpoints);
initpoints=reshape(initpoints(:),3,length(initpoints(:))/3)';
else
initpoints=[initpoints.nz(:).'; initpoints.iz(:).';initpoints.lpa(:).';initpoints.rpa(:).';initpoints.cz(:).'];
end
end
% convert tetrahedral mesh into a surface mesh
if(size(face,2)>=4)
face=volface(face(:,1:4));
end
% remove nodes not located in the surface
[node,face]=removeisolatednode(node,face);
% if initpoints is not sufficient, ask user to interactively select nz, iz, lpa, rpa and cz first
if(isempty(initpoints) || size(initpoints,1)<5)
hf=figure;
plotmesh(node,face);
set(hf,'userdata',initpoints);
if(~isempty(initpoints))
hold on;
plotmesh(initpoints,'gs', 'LineWidth',4);
end
idx=size(initpoints,1)+1;
landmarkname={'Nasion','Inion','Left-pre-auricular-point','Right-pre-auricular-point','Vertex/Cz','Done'};
title(sprintf('Rotate the mesh, select data cursor, click on P%d: %s',idx, landmarkname{idx}));
rotate3d('on');
set(datacursormode(hf),'UpdateFcn',@myupdatefcn);
end
% wait until all 5 points are defined
if(exist('hf','var'))
try
while(size(get(hf,'userdata'),1)<5)
pause(0.1);
end
catch
error('user aborted');
end
datacursormode(hf,'off');
initpoints=get(hf,'userdata');
close(hf);
end
if(showplot)
disp(initpoints);
end
% save input initpoints to landmarks output, cz is not finalized
if(size(initpoints,1)>=5)
landmarks=struct('nz', initpoints(1,:),'iz', initpoints(2,:),...
'lpa',initpoints(3,:),'rpa',initpoints(4,:),...
'cz', initpoints(5,:));
end
% at this point, initpoints contains {nz, iz, lpa, rpa, cz0}
% plot the head mesh
if(showplot)
figure;
hp=plotmesh(node,face,'facealpha',0.6,'facecolor',[1 0.8 0.7]);
if(~isoctavemesh)
set(hp,'linestyle','none');
end
camlight;
lighting gouraud
hold on;
end
lastcz=[1 1 1]*inf;
cziter=0;
%% Find cz that bisects cm and sm curves within a tolerance, using UI 10-10 approach
while(norm(initpoints(5,:)-lastcz)>tol && cziter<maxcziter)
%% Step 1: nz, iz and cz0 to determine saggital reference curve
nsagg=slicesurf(node, face, initpoints([1,2,5],:));
%% Step 1.1: get cz1 as the mid-point between iz and nz
[slen, nsagg]=polylinelen(nsagg, initpoints(1,:), initpoints(2,:), initpoints(5,:));
if(dosimplify)
[nsagg, slen]=polylinesimplify(nsagg,dosimplify);
end
[idx, weight, cz]=polylineinterp(slen, sum(slen)*0.5, nsagg);
initpoints(5,:)=cz(1,:);
%% Step 1.2: lpa, rpa and cz1 to determine coronal reference curve, update cz1
curves.cm=slicesurf(node, face, initpoints([3,4,5],:));
[len, curves.cm]=polylinelen(curves.cm, initpoints(3,:), initpoints(4,:), initpoints(5,:));
if(dosimplify)
[curves.cm, len]=polylinesimplify(curves.cm,dosimplify);
end
[idx, weight, coro]=polylineinterp(len, sum(len)*0.5, curves.cm);
lastcz=initpoints(5,:);
initpoints(5,:)=coro(1,:);
cziter=cziter+1;
if(showplot)
fprintf('cz iteration %d error %e\n',cziter, norm(initpoints(5,:)-lastcz));
end
end
% set the finalized cz to output
landmarks.cz=initpoints(5,:);
if(showplot)
disp(initpoints);
end
%% Step 2: subdivide saggital (sm) and coronal (cm) ref curves
[idx, weight, coro]=polylineinterp(len, sum(len)*(perc1:perc2:(100-perc1))*0.01, curves.cm);
landmarks.cm=coro; % t7, c3, cz, c4, t8
curves.sm=slicesurf(node, face, initpoints([1,2,5],:));
[slen, curves.sm]=polylinelen(curves.sm, initpoints(1,:), initpoints(2,:), initpoints(5,:));
if(dosimplify)
[curves.sm, slen]=polylinesimplify(curves.sm,dosimplify);
end
[idx, weight, sagg]=polylineinterp(slen, sum(slen)*(perc1:perc2:(100-perc1))*0.01, curves.sm);
landmarks.sm=sagg; % fpz, fz, cz, pz, oz
%% Step 3: fpz, t7 and oz to determine left 10% axial reference curve
[landmarks.aal, curves.aal, landmarks.apl, curves.apl]=slicesurf3(node,face,landmarks.sm(1,:), landmarks.cm(1,:), landmarks.sm(end,:),perc2*2);
%% Step 4: fpz, t8 and oz to determine right 10% axial reference curve
[landmarks.aar, curves.aar, landmarks.apr, curves.apr]=slicesurf3(node,face,landmarks.sm(1,:), landmarks.cm(end,:),landmarks.sm(end,:), perc2*2);
%% show plots of the landmarks
if(showplot)
plotmesh(curves.sm,'r-','LineWidth',1);
plotmesh(curves.cm,'g-','LineWidth',1);
plotmesh(curves.aal,'k-','LineWidth',1);
plotmesh(curves.aar,'k-','LineWidth',1);
plotmesh(curves.apl,'b-','LineWidth',1);
plotmesh(curves.apr,'b-','LineWidth',1);
plotmesh(landmarks.sm,'ro','LineWidth',2);
plotmesh(landmarks.cm,'go','LineWidth',2);
plotmesh(landmarks.aal,'ko','LineWidth',2);
plotmesh(landmarks.aar,'mo','LineWidth',2);
plotmesh(landmarks.apl,'ko','LineWidth',2);
plotmesh(landmarks.apr,'mo','LineWidth',2);
end
%% Step 5: computing all anterior coronal cuts, moving away from the medial cut (cm) toward frontal
idxcz=closestnode(landmarks.sm,landmarks.cz);
skipcount=max(floor(10/perc2),1);
for i=1:size(landmarks.aal,1)-skipcount
step=(perc2*25)*0.1*(1+((perc2<20 + perc2<10) && i==size(landmarks.aal,1)-skipcount));
[landmarks.(sprintf('cal_%d',i)), leftpart, landmarks.(sprintf('car_%d',i)), rightpart]=slicesurf3(node,face,landmarks.aal(i,:), landmarks.sm(idxcz-i,:), landmarks.aar(i,:),step);
if(showplot)
plotmesh(leftpart,'k-','LineWidth',1);
plotmesh(rightpart,'k-','LineWidth',1);
plotmesh(landmarks.(sprintf('cal_%d',i)),'yo','LineWidth',2);
plotmesh(landmarks.(sprintf('car_%d',i)),'co','LineWidth',2);
end
end
%% Step 6: computing all posterior coronal cuts, moving away from the medial cut (cm) toward occipital
for i=1:size(landmarks.apl,1)-skipcount
step=(perc2*25)*0.1*(1+((perc2<20 + perc2<10) && i==size(landmarks.apl,1)-skipcount));
[landmarks.(sprintf('cpl_%d',i)), leftpart, landmarks.(sprintf('cpr_%d',i)), rightpart]=slicesurf3(node,face,landmarks.apl(i,:), landmarks.sm(idxcz+i,:), landmarks.apr(i,:),step);
if(showplot)
plotmesh(leftpart,'k-','LineWidth',1);
plotmesh(rightpart,'k-','LineWidth',1);
plotmesh(landmarks.(sprintf('cpl_%d',i)),'yo','LineWidth',2);
plotmesh(landmarks.(sprintf('cpr_%d',i)),'co','LineWidth',2);
end
end
%% Step 7: create the axial cuts across priciple ref. points: left: nz, lpa, iz, right: nz, rpa, iz
if(baseplane && perc2<=10)
[landmarks.paal, curves.paal, landmarks.papl, curves.papl]=slicesurf3(node,face,landmarks.nz, landmarks.lpa, landmarks.iz, perc2*2);
[landmarks.paar, curves.paar, landmarks.papr, curves.papr]=slicesurf3(node,face,landmarks.nz, landmarks.rpa, landmarks.iz, perc2*2);
if(showplot)
plotmesh(curves.paal,'k-','LineWidth',1);
plotmesh(curves.paar,'k-','LineWidth',1);
plotmesh(curves.papl,'k-','LineWidth',1);
plotmesh(curves.papr,'k-','LineWidth',1);
plotmesh(landmarks.paal,'yo','LineWidth',2);
plotmesh(landmarks.papl,'co','LineWidth',2);
plotmesh(landmarks.paar,'yo','LineWidth',2);
plotmesh(landmarks.papr,'co','LineWidth',2);
end
end
%%-------------------------------------------------------------------------------------
% helper functions
%--------------------------------------------------------------------------------------
% the respond function when a data-cursor tip to popup
function txt=myupdatefcn(empt,event_obj)
pt=get(gcf,'userdata');
pos = get(event_obj,'Position');
%idx= get(event_obj,'DataIndex');
txt = {['x: ',num2str(pos(1))],...
['y: ',num2str(pos(2))],['z: ',num2str(pos(3))]};
if(~isempty(pt) && ismember(pos,pt,'rows'))
return;
end
targetup=get(get(event_obj,'Target'),'parent');
idx=size(pt,1)+2;
landmarkname={'Nasion','Inion','Left-pre-auricular-point','Right-pre-auricular-point','Vertex/Cz','Done'};
title(sprintf('Rotate the mesh, select data cursor, click on P%d: %s',idx, landmarkname{idx}));
set(targetup,'userdata',struct('pos',pos));
pt=[pt;pos];
if(size(pt,1)<6)
set(gcf,'userdata',pt);
end
hpt=findobj(gcf,'type','line');
if(~isempty(hpt))
set(hpt,'xdata',pt(:,1),'ydata',pt(:,2),'zdata',pt(:,3));
else
hold on;
plotmesh([pt;pos],'gs', 'LineWidth',4);
end
disp(['Adding landmark ' landmarkname{idx-1} ':' txt]);
datacursormode(gcf,'off');
rotate3d('on');
|
github
|
fangq/brain2mesh-master
|
intriangulation.m
|
.m
|
brain2mesh-master/intriangulation.m
| 19,161 |
utf_8
|
91270a58325ab59566ceae6f98ae81f1
|
function in = intriangulation(vertices,faces,testp,heavytest)
% intriangulation: Test points in 3d wether inside or outside a (closed) triangulation
% usage: in = intriangulation(vertices,faces,testp,heavytest)
%
% arguments: (input)
% vertices - points in 3d as matrix with three columns
%
% faces - description of triangles as matrix with three columns.
% Each row contains three indices into the matrix of vertices
% which gives the three cornerpoints of the triangle.
%
% testp - points in 3d as matrix with three columns
%
% heavytest - int n >= 0. Perform n additional randomized rotation tests.
%
% IMPORTANT: the set of vertices and faces has to form a watertight surface!
%
% arguments: (output)
% in - a vector of length size(testp,1), containing 0 and 1.
% in(nr) = 0: testp(nr,:) is outside the triangulation
% in(nr) = 1: testp(nr,:) is inside the triangulation
% in(nr) = -1: unable to decide for testp(nr,:)
%
% Thanks to Adam A for providing the FEX submission voxelise. The
% algorithms of voxelise form the algorithmic kernel of intriangulation.
%
% Thanks to Sven to discussions about speed and avoiding problems in
% special cases.
%
% Example usage:
%
% n = 10;
% vertices = rand(n, 3)-0.5; % Generate random points
% tetra = delaunayn(vertices); % Generate delaunay triangulization
% faces = freeBoundary(TriRep(tetra,vertices)); % use free boundary as triangulation
% n = 1000;
% testp = 2*rand(n,3)-1; % Generate random testpoints
% in = intriangulation(vertices,faces,testp);
% % Plot results
% h = trisurf(faces,vertices(:,1),vertices(:,2),vertices(:,3));
% set(h,'FaceColor','black','FaceAlpha',1/3,'EdgeColor','none');
% hold on;
% plot3(testp(:,1),testp(:,2),testp(:,3),'b.');
% plot3(testp(in==1,1),testp(in==1,2),testp(in==1,3),'ro');
%
% See also: intetrahedron, tsearchn, inpolygon
%
% Author: Johannes Korsawe, heavily based on voxelise from Adam A.
% E-mail: [email protected]
% Release: 1.3
% Release date: 25/09/2013
% check number of inputs
if nargin<3,
fprintf('??? Error using ==> intriangulation\nThree input matrices are needed.\n');in=[];return;
end
if nargin==3,
heavytest = 0;
end
% check size of inputs
if size(vertices,2)~=3 || size(faces,2)~=3 || size(testp,2)~=3,
fprintf('??? Error using ==> intriagulation\nAll input matrices must have three columns.\n');in=[];return;
end
ipmax = max(faces(:));zerofound = ~isempty(find(faces(:)==0, 1));
if ipmax>size(vertices,1) || zerofound,
fprintf('??? Error using ==> intriangulation\nThe triangulation data is defect. use trisurf(faces,vertices(:,1),vertices(:,2),vertices(:,3)) for test of deficiency.\n');return;
end
% loop for heavytest
inreturn = zeros(size(testp,1),1);VER = vertices;TESTP = testp;
for n = 1:heavytest+1,
% Randomize
if n>1,
v=rand(1,3);D=rotmatrix(v/norm(v),rand*180/pi);vertices=VER*D;testp = TESTP*D;
else,
vertices=VER;
end
% Preprocessing data
meshXYZ = zeros(size(faces,1),3,3);
for loop = 1:3,
meshXYZ(:,:,loop) = vertices(faces(:,loop),:);
end
% Basic idea (ingenious from FeX-submission voxelise):
% If point is inside, it will cross the triangulation an uneven number of times in each direction (x, -x, y, -y, z, -z).
% The function VOXELISEinternal is about 98% identical to its version inside voxelise.m.
% This includes the elaborate comments. Thanks to Adam A!
% z-direction:
% intialization of results and correction list
[in,cl] = VOXELISEinternal(testp(:,1),testp(:,2),testp(:,3),meshXYZ);
% x-direction:
% has only to be done for those points, that were not determinable in the first step --> cl
[in2,cl2] = VOXELISEinternal(testp(cl,2),testp(cl,3),testp(cl,1),meshXYZ(:,[2,3,1],:));
% Use results of x-direction that determined "inside"
in(cl(in2==1)) = 1;
% remaining indices with unclear result
cl = cl(cl2);
% y-direction:
% has only to be done for those points, that were not determinable in the first and second step --> cl
[in3,cl3] = VOXELISEinternal(testp(cl,3),testp(cl,1),testp(cl,2),meshXYZ(:,[3,1,2],:));
% Use results of y-direction that determined "inside"
in(cl(in3==1)) = 1;
% remaining indices with unclear result
cl = cl(cl3);
% mark those indices, where all three tests have failed
in(cl) = -1;
if n==1,
inreturn = in; % Starting guess
else,
% if ALWAYS inside, use as inside!
% I = find(inreturn ~= in);
% inreturn(I(in(I)==0)) = 0;
% if AT LEAST ONCE inside, use as inside!
I = find(inreturn ~= in);
inreturn(I(in(I)==1)) = 1;
end
end
in = inreturn;
end
%==========================================================================
function [OUTPUT,correctionLIST] = VOXELISEinternal(testx,testy,testz,meshXYZ)
% Prepare logical array to hold the logical data:
OUTPUT = false(size(testx,1),1);
%Identify the min and max x,y coordinates of the mesh:
meshZmin = min(min(meshXYZ(:,3,:)));meshZmax = max(max(meshXYZ(:,3,:)));
%Identify the min and max x,y,z coordinates of each facet:
meshXYZmin = min(meshXYZ,[],3);meshXYZmax = max(meshXYZ,[],3);
%======================================================
% TURN OFF DIVIDE-BY-ZERO WARNINGS
%======================================================
%This prevents the Y1predicted, Y2predicted, Y3predicted and YRpredicted
%calculations creating divide-by-zero warnings. Suppressing these warnings
%doesn't affect the code, because only the sign of the result is important.
%That is, 'Inf' and '-Inf' results are ok.
%The warning will be returned to its original state at the end of the code.
warningrestorestate = warning('query', 'MATLAB:divideByZero');
%warning off MATLAB:divideByZero
%======================================================
% START COMPUTATION
%======================================================
correctionLIST = []; %Prepare to record all rays that fail the voxelisation. This array is built on-the-fly, but since
%it ought to be relatively small should not incur too much of a speed penalty.
% Loop through each testpoint.
% The testpoint-array will be tested by passing rays in the z-direction through
% each x,y coordinate of the testpoints, and finding the locations where the rays cross the mesh.
facetCROSSLIST = zeros(1,1e3); % uses countindex: nf
nm = size(meshXYZmin,1);
for loop = 1:length(OUTPUT),
nf = 0;
% % - 1a - Find which mesh facets could possibly be crossed by the ray:
% possibleCROSSLISTy = find( meshXYZmin(:,2)<=testy(loop) & meshXYZmax(:,2)>=testy(loop) );
% % - 1b - Find which mesh facets could possibly be crossed by the ray:
% possibleCROSSLIST = possibleCROSSLISTy( meshXYZmin(possibleCROSSLISTy,1)<=testx(loop) & meshXYZmax(possibleCROSSLISTy,1)>=testx(loop) );
% Do - 1a - and - 1b - faster
possibleCROSSLISTy = find((testy(loop)-meshXYZmin(:,2)).*(meshXYZmax(:,2)-testy(loop))>0);
possibleCROSSLISTx = (testx(loop)-meshXYZmin(possibleCROSSLISTy,1)).*(meshXYZmax(possibleCROSSLISTy,1)-testx(loop))>0;
possibleCROSSLIST = possibleCROSSLISTy(possibleCROSSLISTx);
if isempty(possibleCROSSLIST)==0 %Only continue the analysis if some nearby facets were actually identified
% - 2 - For each facet, check if the ray really does cross the facet rather than just passing it close-by:
% GENERAL METHOD:
% 1. Take each edge of the facet in turn.
% 2. Find the position of the opposing vertex to that edge.
% 3. Find the position of the ray relative to that edge.
% 4. Check if ray is on the same side of the edge as the opposing vertex.
% 5. If this is true for all three edges, then the ray definitely passes through the facet.
%
% NOTES:
% 1. If the ray crosses exactly on an edge, this is counted as crossing the facet.
% 2. If a ray crosses exactly on a vertex, this is also taken into account.
for loopCHECKFACET = possibleCROSSLIST'
%Check if ray crosses the facet. This method is much (>>10 times) faster than using the built-in function 'inpolygon'.
%Taking each edge of the facet in turn, check if the ray is on the same side as the opposing vertex. If so, let testVn=1
Y1predicted = meshXYZ(loopCHECKFACET,2,2) - ((meshXYZ(loopCHECKFACET,2,2)-meshXYZ(loopCHECKFACET,2,3)) * (meshXYZ(loopCHECKFACET,1,2)-meshXYZ(loopCHECKFACET,1,1))/(meshXYZ(loopCHECKFACET,1,2)-meshXYZ(loopCHECKFACET,1,3)));
YRpredicted = meshXYZ(loopCHECKFACET,2,2) - ((meshXYZ(loopCHECKFACET,2,2)-meshXYZ(loopCHECKFACET,2,3)) * (meshXYZ(loopCHECKFACET,1,2)-testx(loop))/(meshXYZ(loopCHECKFACET,1,2)-meshXYZ(loopCHECKFACET,1,3)));
if (Y1predicted > meshXYZ(loopCHECKFACET,2,1) && YRpredicted > testy(loop)) || (Y1predicted < meshXYZ(loopCHECKFACET,2,1) && YRpredicted < testy(loop)) || (meshXYZ(loopCHECKFACET,2,2)-meshXYZ(loopCHECKFACET,2,3)) * (meshXYZ(loopCHECKFACET,1,2)-testx(loop)) == 0
% testV1 = 1; %The ray is on the same side of the 2-3 edge as the 1st vertex.
else
% testV1 = 0; %The ray is on the opposite side of the 2-3 edge to the 1st vertex.
% As the check is for ALL three checks to be true, we can continue here, if only one check fails
continue;
end %if
Y2predicted = meshXYZ(loopCHECKFACET,2,3) - ((meshXYZ(loopCHECKFACET,2,3)-meshXYZ(loopCHECKFACET,2,1)) * (meshXYZ(loopCHECKFACET,1,3)-meshXYZ(loopCHECKFACET,1,2))/(meshXYZ(loopCHECKFACET,1,3)-meshXYZ(loopCHECKFACET,1,1)));
YRpredicted = meshXYZ(loopCHECKFACET,2,3) - ((meshXYZ(loopCHECKFACET,2,3)-meshXYZ(loopCHECKFACET,2,1)) * (meshXYZ(loopCHECKFACET,1,3)-testx(loop))/(meshXYZ(loopCHECKFACET,1,3)-meshXYZ(loopCHECKFACET,1,1)));
if (Y2predicted > meshXYZ(loopCHECKFACET,2,2) && YRpredicted > testy(loop)) || (Y2predicted < meshXYZ(loopCHECKFACET,2,2) && YRpredicted < testy(loop)) || (meshXYZ(loopCHECKFACET,2,3)-meshXYZ(loopCHECKFACET,2,1)) * (meshXYZ(loopCHECKFACET,1,3)-testx(loop)) == 0
% testV2 = 1; %The ray is on the same side of the 3-1 edge as the 2nd vertex.
else
% testV2 = 0; %The ray is on the opposite side of the 3-1 edge to the 2nd vertex.
% As the check is for ALL three checks to be true, we can continue here, if only one check fails
continue;
end %if
Y3predicted = meshXYZ(loopCHECKFACET,2,1) - ((meshXYZ(loopCHECKFACET,2,1)-meshXYZ(loopCHECKFACET,2,2)) * (meshXYZ(loopCHECKFACET,1,1)-meshXYZ(loopCHECKFACET,1,3))/(meshXYZ(loopCHECKFACET,1,1)-meshXYZ(loopCHECKFACET,1,2)));
YRpredicted = meshXYZ(loopCHECKFACET,2,1) - ((meshXYZ(loopCHECKFACET,2,1)-meshXYZ(loopCHECKFACET,2,2)) * (meshXYZ(loopCHECKFACET,1,1)-testx(loop))/(meshXYZ(loopCHECKFACET,1,1)-meshXYZ(loopCHECKFACET,1,2)));
if (Y3predicted > meshXYZ(loopCHECKFACET,2,3) && YRpredicted > testy(loop)) || (Y3predicted < meshXYZ(loopCHECKFACET,2,3) && YRpredicted < testy(loop)) || (meshXYZ(loopCHECKFACET,2,1)-meshXYZ(loopCHECKFACET,2,2)) * (meshXYZ(loopCHECKFACET,1,1)-testx(loop)) == 0
% testV3 = 1; %The ray is on the same side of the 1-2 edge as the 3rd vertex.
else
% testV3 = 0; %The ray is on the opposite side of the 1-2 edge to the 3rd vertex.
% As the check is for ALL three checks to be true, we can continue here, if only one check fails
continue;
end %if
nf=nf+1;facetCROSSLIST(nf)=loopCHECKFACET;
end %for
% Use only values ~=0
facetCROSSLIST = facetCROSSLIST(1:nf);
% - 3 - Find the z coordinate of the locations where the ray crosses each facet:
gridCOzCROSS = zeros(1,nf);
for loopFINDZ = facetCROSSLIST
% METHOD:
% 1. Define the equation describing the plane of the facet. For a
% more detailed outline of the maths, see:
% http://local.wasp.uwa.edu.au/~pbourke/geometry/planeeq/
% Ax + By + Cz + D = 0
% where A = y1 (z2 - z3) + y2 (z3 - z1) + y3 (z1 - z2)
% B = z1 (x2 - x3) + z2 (x3 - x1) + z3 (x1 - x2)
% C = x1 (y2 - y3) + x2 (y3 - y1) + x3 (y1 - y2)
% D = - x1 (y2 z3 - y3 z2) - x2 (y3 z1 - y1 z3) - x3 (y1 z2 - y2 z1)
% 2. For the x and y coordinates of the ray, solve these equations to find the z coordinate in this plane.
planecoA = meshXYZ(loopFINDZ,2,1)*(meshXYZ(loopFINDZ,3,2)-meshXYZ(loopFINDZ,3,3)) + meshXYZ(loopFINDZ,2,2)*(meshXYZ(loopFINDZ,3,3)-meshXYZ(loopFINDZ,3,1)) + meshXYZ(loopFINDZ,2,3)*(meshXYZ(loopFINDZ,3,1)-meshXYZ(loopFINDZ,3,2));
planecoB = meshXYZ(loopFINDZ,3,1)*(meshXYZ(loopFINDZ,1,2)-meshXYZ(loopFINDZ,1,3)) + meshXYZ(loopFINDZ,3,2)*(meshXYZ(loopFINDZ,1,3)-meshXYZ(loopFINDZ,1,1)) + meshXYZ(loopFINDZ,3,3)*(meshXYZ(loopFINDZ,1,1)-meshXYZ(loopFINDZ,1,2));
planecoC = meshXYZ(loopFINDZ,1,1)*(meshXYZ(loopFINDZ,2,2)-meshXYZ(loopFINDZ,2,3)) + meshXYZ(loopFINDZ,1,2)*(meshXYZ(loopFINDZ,2,3)-meshXYZ(loopFINDZ,2,1)) + meshXYZ(loopFINDZ,1,3)*(meshXYZ(loopFINDZ,2,1)-meshXYZ(loopFINDZ,2,2));
planecoD = - meshXYZ(loopFINDZ,1,1)*(meshXYZ(loopFINDZ,2,2)*meshXYZ(loopFINDZ,3,3)-meshXYZ(loopFINDZ,2,3)*meshXYZ(loopFINDZ,3,2)) - meshXYZ(loopFINDZ,1,2)*(meshXYZ(loopFINDZ,2,3)*meshXYZ(loopFINDZ,3,1)-meshXYZ(loopFINDZ,2,1)*meshXYZ(loopFINDZ,3,3)) - meshXYZ(loopFINDZ,1,3)*(meshXYZ(loopFINDZ,2,1)*meshXYZ(loopFINDZ,3,2)-meshXYZ(loopFINDZ,2,2)*meshXYZ(loopFINDZ,3,1));
if abs(planecoC) < 1e-14
planecoC=0;
end
gridCOzCROSS(facetCROSSLIST==loopFINDZ) = (- planecoD - planecoA*testx(loop) - planecoB*testy(loop)) / planecoC;
end %for
if isempty(gridCOzCROSS),continue;end
%Remove values of gridCOzCROSS which are outside of the mesh limits (including a 1e-12 margin for error).
gridCOzCROSS = gridCOzCROSS( gridCOzCROSS>=meshZmin-1e-12 & gridCOzCROSS<=meshZmax+1e-12 );
%Round gridCOzCROSS to remove any rounding errors, and take only the unique values:
gridCOzCROSS = round(gridCOzCROSS*1e10)/1e10;
% Replacement of the call to unique (gridCOzCROSS = unique(gridCOzCROSS);) by the following line:
tmp = sort(gridCOzCROSS);I=[0,tmp(2:end)-tmp(1:end-1)]~=0;gridCOzCROSS = [tmp(1),tmp(I)];
% - 4 - Label as being inside the mesh all the voxels that the ray passes through after crossing one facet before crossing another facet:
if rem(numel(gridCOzCROSS),2)==0 % Only rays which cross an even number of facets are voxelised
for loopASSIGN = 1:(numel(gridCOzCROSS)/2)
voxelsINSIDE = (testz(loop)>gridCOzCROSS(2*loopASSIGN-1) & testz(loop)<gridCOzCROSS(2*loopASSIGN));
OUTPUT(loop) = voxelsINSIDE;
if voxelsINSIDE,break;end
end %for
elseif numel(gridCOzCROSS)~=0 % Remaining rays which meet the mesh in some way are not voxelised, but are labelled for correction later.
correctionLIST = [ correctionLIST; loop ];
end %if
end %if
end %for
%======================================================
% RESTORE DIVIDE-BY-ZERO WARNINGS TO THE ORIGINAL STATE
%======================================================
warning(warningrestorestate)
% J.Korsawe: A correction is not possible as the testpoints need not to be
% ordered in any way.
% voxelise contains a correction algorithm which is appended here
% without changes in syntax.
return
%======================================================
% USE INTERPOLATION TO FILL IN THE RAYS WHICH COULD NOT BE VOXELISED
%======================================================
%For rays where the voxelisation did not give a clear result, the ray is
%computed by interpolating from the surrounding rays.
countCORRECTIONLIST = size(correctionLIST,1);
if countCORRECTIONLIST>0
%If necessary, add a one-pixel border around the x and y edges of the
%array. This prevents an error if the code tries to interpolate a ray at
%the edge of the x,y grid.
if min(correctionLIST(:,1))==1 || max(correctionLIST(:,1))==numel(gridCOx) || min(correctionLIST(:,2))==1 || max(correctionLIST(:,2))==numel(gridCOy)
gridOUTPUT = [zeros(1,voxcountY+2,voxcountZ);zeros(voxcountX,1,voxcountZ),gridOUTPUT,zeros(voxcountX,1,voxcountZ);zeros(1,voxcountY+2,voxcountZ)];
correctionLIST = correctionLIST + 1;
end
for loopC = 1:countCORRECTIONLIST
voxelsforcorrection = squeeze( sum( [ gridOUTPUT(correctionLIST(loopC,1)-1,correctionLIST(loopC,2)-1,:) ,...
gridOUTPUT(correctionLIST(loopC,1)-1,correctionLIST(loopC,2),:) ,...
gridOUTPUT(correctionLIST(loopC,1)-1,correctionLIST(loopC,2)+1,:) ,...
gridOUTPUT(correctionLIST(loopC,1),correctionLIST(loopC,2)-1,:) ,...
gridOUTPUT(correctionLIST(loopC,1),correctionLIST(loopC,2)+1,:) ,...
gridOUTPUT(correctionLIST(loopC,1)+1,correctionLIST(loopC,2)-1,:) ,...
gridOUTPUT(correctionLIST(loopC,1)+1,correctionLIST(loopC,2),:) ,...
gridOUTPUT(correctionLIST(loopC,1)+1,correctionLIST(loopC,2)+1,:) ,...
] ) );
voxelsforcorrection = (voxelsforcorrection>=4);
gridOUTPUT(correctionLIST(loopC,1),correctionLIST(loopC,2),voxelsforcorrection) = 1;
end %for
%Remove the one-pixel border surrounding the array, if this was added
%previously.
if size(gridOUTPUT,1)>numel(gridCOx) || size(gridOUTPUT,2)>numel(gridCOy)
gridOUTPUT = gridOUTPUT(2:end-1,2:end-1,:);
end
end %if
%disp([' Ray tracing result: ',num2str(countCORRECTIONLIST),' rays (',num2str(countCORRECTIONLIST/(voxcountX*voxcountY)*100,'%5.1f'),'% of all rays) exactly crossed a facet edge and had to be computed by interpolation.'])
end %function
%==========================================================================
function D = rotmatrix(v,deg)
% calculate the rotation matrix about v by deg degrees
deg=deg/180*pi;
if deg~=0,
v=v/norm(v);
v1=v(1);v2=v(2);v3=v(3);ca=cos(deg);sa=sin(deg);
D=[ca+v1*v1*(1-ca),v1*v2*(1-ca)-v3*sa,v1*v3*(1-ca)+v2*sa;
v2*v1*(1-ca)+v3*sa,ca+v2*v2*(1-ca),v2*v3*(1-ca)-v1*sa;
v3*v1*(1-ca)-v2*sa,v3*v2*(1-ca)+v1*sa,ca+v3*v3*(1-ca)];
else,
D=eye(3,3);
end
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
MakeObj.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Dubins_SBONL/plotting/MakeObj.m
| 683 |
utf_8
|
a81a3fae729eca365c5bffe0df3d8124
|
% returns convex hull from point cloud
function obj = MakeObj(points, color)
%figure()
% create face representation and create convex hull
F = convhull(points(1,:), points(2,:));
fill(points(1,F),points(2,F),color);
%{
S.Vertices = transpose(points(1:2,:));
S.Faces = F;
S.FaceVertexCData = jet(size(points,1));
S.FaceColor = 'interp';
if(strcmp(color,'red'))
obj = patch('Faces',S.Faces,'Vertices',S.Vertices,'FaceColor','red');
elseif(strcmp(color,'green'))
obj = patch('Faces',S.Faces,'Vertices',S.Vertices,'FaceColor','green');
else
obj = patch(S);
end
%}
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
PlotSetBasedSim.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Dubins_SBONL/plotting/PlotSetBasedSim.m
| 1,537 |
utf_8
|
41159ed792610c3e67a56c471efe2ea5
|
% PlotSetBasedSim(agentPos, obst, threshold)
%
% plots associated sets with the simulation
function PlotSetBasedSim(agentPos, obst, threshold, target)
figure()
hold on
% mA - coordinate (usually size 3)
% nA - time step, equal to iterations in simulation
[mA,nA] = size(agentPos);
% create objects/convex hulls for each set at each time step
for i = 1:nA
% format points for MakeObj function
curSet = zeros(3,4);
curSet(:,1) = [agentPos(1,i);agentPos(3,i);0];
curSet(:,2) = [agentPos(1,i);agentPos(4,i);0];
curSet(:,3) = [agentPos(2,i);agentPos(3,i);0];
curSet(:,4) = [agentPos(2,i);agentPos(4,i);0];
% make the object
MakeObj(curSet, 'g');
end
[mO,nO,pO] = size(obst);
% NOTE: pA == nO
% plot obstacle with threshold
for i = 1:nO
for j = 1:pO
% create sphere for obstacle (threshold and obstacle location)
[x,y,z] = sphere;
x = threshold*x+obst(1,i,j);
y = threshold*y+obst(2,i,j);
z = threshold*z+obst(3,i,j);
[mS,nS] = size(z);
% plot obst
C = zeros(mS,nS,3);
C(:,:,1) = C(:,:,1) + 1;
s = surf(x,y,z,C);
scatter3(obst(1,i,j), obst(2,i,j), obst(3,i,j), '*')
end
end
scatter3(target(1), target(2), target(3), '*')
xlabel('x axis')
ylabel('y axis')
zlabel('z axis')
grid on
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
PlotSimDistance.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Dubins_SBONL/plotting/PlotSimDistance.m
| 2,137 |
utf_8
|
247c9aed4e54b4c7c47b5b92b7ddae44
|
% PlotSetBasedSim(agentPos, obst, threshold)
%
% plots max distance to target and min distance to projectile throughout
% the simulation
function PlotSimDistance(agentPos, obst, threshold, target)
figure()
hold on
% mA - coordinate (usually size 3)
% nA - time step, equal to iterations in simulation
[mA,nA] = size(agentPos);
% mO - coordinate (usually size 3)
% nO - time step, equal to iterations in simulation
% pO - obstacle number
[mO,nO,pO] = size(obst);
ObstDist = zeros(nA,pO);
for i = 1:pO
% create objects/convex hulls for each set at each time step
for j = 1:nA
% format points for MakeObj function
curSet = zeros(3,4);
curSet(:,1) = [agentPos(1,j);agentPos(3,j);0];
curSet(:,2) = [agentPos(1,j);agentPos(4,j);0];
curSet(:,3) = [agentPos(2,j);agentPos(3,j);0];
curSet(:,4) = [agentPos(2,j);agentPos(4,j);0];
% min distance to projectile
temp_xObst = [obst(:,j,i),obst(:,j,i)]; % polytope dist function has trouble with only one point
polyOptions = optimoptions('fmincon','Display','notify-detailed','algorithm','active-set');
ObstDist(j,i) = PolytopeMinDist(curSet,temp_xObst,polyOptions);
end
plot(0:1:(nA-1),transpose(ObstDist(:,i)));
plot(0:1:(nA-1),threshold*ones(1,(nA)));
end
title('Minimum distance between vehicle polytope and obstacle polytope');
xlabel('time');
ylabel('distance');
figure()
hold on;
dist = zeros(nA,1);
for i = 1:nA
dist(i) = dist(i) + norm([agentPos(1,i);agentPos(3,i);0]-target)^2 ...
+ norm([agentPos(1,i);agentPos(4,i);0]-target)^2 ...
+ norm([agentPos(2,i);agentPos(3,i);0]-target)^2 ...
+ norm([agentPos(2,i);agentPos(4,i);0]-target)^2;
end
plot(0:1:(nA-1),dist);
title('Minimum distance between vehicle polytope and target polytope');
xlabel('time');
ylabel('distance');
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
PlotOptimalPredicted.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Dubins_SBONL/plotting/PlotOptimalPredicted.m
| 1,446 |
utf_8
|
c9b3ae67ab8a11a7f45b94800201e13c
|
function PlotOptimalPredicted(agentPos, obst, threshold, target)
figure()
hold on
% mA - coordinate (usually size 3)
% nA - time step, equal to iterations in simulation
[mA,nA] = size(agentPos);
% create objects/convex hulls for each set at each time step
for i = 1:nA
% format points for MakeObj function
curSet = zeros(3,4);
curSet(:,1) = [agentPos(1,i);agentPos(3,i);0];
curSet(:,2) = [agentPos(1,i);agentPos(4,i);0];
curSet(:,3) = [agentPos(2,i);agentPos(3,i);0];
curSet(:,4) = [agentPos(2,i);agentPos(4,i);0];
% make the object
MakeObj(curSet, 'g');
end
[mO,nO,pO] = size(obst);
% NOTE: pA == nO
% plot obstacle with threshold
for i = 1:nO
for j = 1:pO
% create sphere for obstacle (threshold and obstacle location)
[x,y,z] = sphere;
x = threshold*x+obst(1,i,j);
y = threshold*y+obst(2,i,j);
z = threshold*z+obst(3,i,j);
[mS,nS] = size(z);
% plot obst
C = zeros(mS,nS,3);
C(:,:,1) = C(:,:,1) + 1;
s = surf(x,y,z,C);
scatter3(obst(1,i,j), obst(2,i,j), obst(3,i,j), '*')
end
end
scatter3(target(1), target(2), target(3), '*')
xlabel('x axis')
ylabel('y axis')
zlabel('z axis')
grid on
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
Cost.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Dubins_SBONL/system/Cost.m
| 1,054 |
utf_8
|
143e65321cbb4cc55eae7ee3646a6247
|
% c = Cost(x0_set, u, ts, target)
%
% custom cost function - sum of distance from each vertex to target squared
function c = Cost(x0_set, u, ts, target, L, terminalWeight)
% predict system state with set based dynamics
x_set = Dubin(x0_set,u,ts,L);
% calculate cost of prediction for set based dynamics
% - iterate through each vertex at each time step and calculate the
% distance between that point and the target
[m,n] = size(x_set);
c = 0;
% stage cost
for i = 1:n-1
c = c + norm([x_set(1,i);x_set(3,i);0]-target)^2 ...
+ norm([x_set(1,i);x_set(4,i);0]-target)^2 ...
+ norm([x_set(2,i);x_set(3,i);0]-target)^2 ...
+ norm([x_set(2,i);x_set(4,i);0]-target)^2;
end
% terminal cost
c = c + terminalWeight*norm([x_set(1,n);x_set(3,n);0]-target)^2 ...
+ terminalWeight*norm([x_set(1,n);x_set(4,n);0]-target)^2 ...
+ terminalWeight*norm([x_set(2,n);x_set(3,n);0]-target)^2 ...
+ terminalWeight*norm([x_set(2,n);x_set(4,n);0]-target)^2;
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
FindOptimalInput.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Dubins_SBONL/system/FindOptimalInput.m
| 1,220 |
utf_8
|
4d99d1329218ef002c43fdbd6e4312f2
|
% function u0 = FindOptimalInput(x0, N, ts, target)
%
% uses fmincon to minimize cost function given system dynamics and
% nonlinear constraints, returns optimal input sequence
function uopt = FindOptimalInput(x0_set, N, ts, target, xObst, threshold, L, speedBound, steeringBound, terminalWeight)
A = [];
b = [];
Aeq = [];
beq = [];
% set lower and upper bounds on inputs to dubins model
lb(1,:) = zeros(1,N); % lower bound is zero (speed)
lb(2,:) = -steeringBound*ones(1,N);
ub(1,:) = speedBound*ones(1,N);
ub(2,:) = steeringBound*ones(1,N);
% initial input guess
uInit = zeros(2,N);
uInit(1,:) = speedBound*ones(1,N);
% solve optimization
options = optimoptions('fmincon','Display','notify-detailed','algorithm','active-set','MaxFunEvals',1000,'ConstraintTolerance',1e-04);
polyOptions = optimoptions('fmincon','Display','notify-detailed','algorithm','active-set');
[uopt ,fval,exitflag,output] = fmincon(@(u) Cost(x0_set,u,ts,target,L,terminalWeight),uInit,A,b,Aeq,beq,lb,ub, @(u) ObstConstraint(x0_set,u,ts,xObst,threshold,L,polyOptions),options);
output
% return optimal input sequence
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
ObstConstraint.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Dubins_SBONL/system/ObstConstraint.m
| 1,482 |
utf_8
|
04e6c3e7aa641c9244adfd3c6e4d36c9
|
% [c,ceq] = ObstConstraint(x0_set, u, ts, xObst, threshold)
%
% defines the non linear constraint - agent polytope to maintain
% a distance from the obstacle position above threshold
function [c,ceq] = ObstConstraint(x0_set, u, ts, xObst, threshold,L,options)
% predict agent with set based dynamics
% coords X time X set points
x_set = Dubin(x0_set, u, ts,L);
% find distance between agent polytope and obstacle
[mA,nA] = size(x_set);
[mO,nO,pO] = size(xObst);
ObstDist = zeros(1,pO*nA);
obstDistCount = 1;
for i = 1:nA % time
for j = 1:pO % number of obstacles
% format the agents set for polytope minimization for time step i
xPolytope = zeros(3,4);
xPolytope(:,:) = [x_set(1,i), x_set(1,i), x_set(2,i), x_set(2,i);
x_set(3,i), x_set(4,i), x_set(3,i), x_set(4,i);
0, 0, 0, 0];
% calculate distance between agent and obstacle
% xPolytope is a 3xp matrix
% xObst(:,i,j) is a 3x1 matrix
temp_xObst = [xObst(:,i,j),xObst(:,i,j)]; % polytope dist function has trouble with only one point
ObstDist(obstDistCount) = PolytopeMinDist(xPolytope,temp_xObst,options);
obstDistCount = obstDistCount+1;
end
end
% calculate constraints
c = -ObstDist+threshold;
ceq = [];
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
SingleIntegrator.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Dubins_SBONL/system/SingleIntegrator.m
| 412 |
utf_8
|
103d816561c56f118cee17686dc4c4c4
|
% x = SingleIntegrator(x0_set, u, ts)
%
% set based dynamics of single integrator
function x = SingleIntegrator(x0_set, u, ts)
[mP,nP] = size(x0_set);
[mH,nH] = size(u);
% coords X time X set points
x = zeros(3,nH+1,nP);
x(:,1,:) = x0_set;
% apply integrator dynamics
for j = 1:nP
for i = 1:nH
x(:,i+1,j) = x(:,i,j) + ts*u(:,i);
end
end
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
SimulationProjectilePredict.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Dubins_SBONL/projectile/SimulationProjectilePredict.m
| 764 |
utf_8
|
8ea559140e8b2248afec5e16b1990cb5
|
% [trajectory, velocity] = SimulationProjectilePredict(p_0, simTime)
%
% calls on simulink to predict projectile state given initial conditions
function [trajectory, velocity] = SimulationProjectilePredict(p_0, simTime)
% set up simulink
set_param('projectile/rx','Value',num2str(p_0(1)));
set_param('projectile/ry','Value',num2str(p_0(2)));
set_param('projectile/rz','Value',num2str(p_0(3)));
set_param('projectile/vx','Value',num2str(p_0(4)));
set_param('projectile/vy','Value',num2str(p_0(5)));
set_param('projectile/vz','Value',num2str(p_0(6)));
set_param('projectile', 'StopTime', num2str(simTime));
% run simulation
sim('projectile');
trajectory = projectilePos;
velocity = projectileVel;
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
CreateSphere.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Dubins_SBONL/polytope/CreateSphere.m
| 913 |
utf_8
|
e7fc2c7730cbb8e66f70c29e702d9c20
|
% creates a point cloud in a sphere around the center
function points = CreateSphere(center, r, thetadis, phidis)
% angle discretization
thetas = linspace(0,2*pi,thetadis);
phis = linspace(0,pi,phidis);
% point calculation
points = [];
x = [];
y = [];
z = [];
for i = 1:length(phis)
for j = 1:length(thetas)
% removes duplicate point at theta = 2*pi
if(thetas(j) == 2*pi)
break
end
x = (r * sin(phis(i)) * cos(thetas(j))) + center(1);
y = (r * sin(phis(i)) * sin(thetas(j))) + center(2);
z = (r * cos(phis(i))) + center(3);
nextPoint = [x;y;z];
points = [points, nextPoint];
% removes duplicate points at the top and bottom of sphere
if(phis(i) == 0 || phis(i) == pi)
break
end
end
end
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
PolytopeMinDist.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Dubins_SBONL/polytope/PolytopeMinDist.m
| 1,150 |
utf_8
|
5a4decdbc742e1a03013ff2627495551
|
% minDist = PolytopeMinDist(X1,X2)
%
% finds the minimum distance between two polytopes X1 and X2
function minDist = PolytopeMinDist(X1,X2,options)
% declare constraints for fmincon
lb = [];
ub = [];
% get sizes of vertices for polytopes
[m1,n1] = size(X1);
[m2,n2] = size(X2);
if(m1 ~= m2)
error('Incorrect Dimensions');
end
n = n1+n2;
A = [eye(n); -eye(n)];
b = [ones(n,1); zeros(n,1)];
Aeq = [ones(1,n1) zeros(1,n2);
zeros(1,n1) ones(1,n2)];
beq = [1;1];
nonlcon = [];
% create lambda vectors
x0 = zeros(n,1);
x0(1) = 1;
x0(n1+1) = 1;
% declare function to be minimized
%fun = @(lambda)(norm((X1 * lambda(1:n1))-(X2 * lambda(n1+1:n)))^2);
% evaluate fmincon
%x = fmincon(@(lambda) PolytopeDist(X1,X2,lambda,n1,n2,n),x0,A,b,Aeq,beq,lb,ub,nonlcon,options);
[x,fval,exitflag,output] = fmincon(@(lambda) PolytopeDist(X1,X2,lambda,n1,n2,n),x0,A,b,Aeq,beq,lb,ub,nonlcon,options);
%output
% return min distance
minDist = sqrt(PolytopeDist(X1,X2,x,n1,n2,n));
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
PolytopeDist.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Dubins_SBONL/polytope/PolytopeDist.m
| 668 |
utf_8
|
34f5332d650d2fb60f349903cb7edf17
|
function [f,g] = PolytopeDist(X1,X2,lambda,n1,n2,n)
f = norm((X1 * lambda(1:n1))-(X2 * lambda(n1+1:n)))^2;
%{
g = zeros(n,1);
for i = 1:n1
g(i) = 2*((X1(1,:)*lambda(1:n1))-(X2(1,:)*lambda(n1+1:n)))*X1(1,i) ...
+ 2*((X1(2,:)*lambda(1:n1))-(X2(2,:)*lambda(n1+1:n)))*X1(2,i) ...
+ 2*((X1(3,:)*lambda(1:n1))-(X2(3,:)*lambda(n1+1:n)))*X1(3,i);
end
for i = 1:n2
g(i) = 2*((X1(1,:)*lambda(1:n1))-(X2(1,:)*lambda(n1+1:n)))*(-X2(1,i)) ...
+ 2*((X1(2,:)*lambda(1:n1))-(X2(2,:)*lambda(n1+1:n)))*(-X2(2,i)) ...
+ 2*((X1(3,:)*lambda(1:n1))-(X2(3,:)*lambda(n1+1:n)))*(-X2(3,i));
end
%}
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
GJK.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK/GJK.m
| 5,909 |
utf_8
|
acc17476d868c4bb652640495a721180
|
function flag = GJK(shape1,shape2,iterations)
% GJK Gilbert-Johnson-Keerthi Collision detection implementation.
% Returns whether two convex shapes are are penetrating or not
% (true/false). Only works for CONVEX shapes.
%
% Inputs:
% shape1:
% must have fields for XData,YData,ZData, which are the x,y,z
% coordinates of the vertices. Can be the same as what comes out of a
% PATCH object. It isn't required that the points form faces like patch
% data. This algorithm will assume the convex hull of the x,y,z points
% given.
%
% shape2:
% Other shape to test collision against. Same info as shape1.
%
% iterations:
% The algorithm tries to construct a tetrahedron encompassing
% the origin. This proves the objects have collided. If we fail within a
% certain number of iterations, we give up and say the objects are not
% penetrating. Low iterations means a higher chance of false-NEGATIVES
% but faster computation. As the objects penetrate more, it takes fewer
% iterations anyway, so low iterations is not a huge disadvantage.
%
% Outputs:
% flag:
% true - objects collided
% false - objects not collided
%
%
% This video helped me a lot when making this: https://mollyrocket.com/849
% Not my video, but very useful.
%
% Matthew Sheen, 2016
%
%Point 1 and 2 selection (line segment)
v = [0.8 0.5 1];
[a,b] = pickLine(v,shape2,shape1);
%Point 3 selection (triangle)
[a,b,c,flag] = pickTriangle(a,b,shape2,shape1,iterations);
%Point 4 selection (tetrahedron)
if flag == 1 %Only bother if we could find a viable triangle.
[a,b,c,d,flag] = pickTetrahedron(a,b,c,shape2,shape1,iterations);
end
end
function [a,b] = pickLine(v,shape1,shape2)
%Construct the first line of the simplex
b = support(shape2,shape1,v);
a = support(shape2,shape1,-v);
end
function [a,b,c,flag] = pickTriangle(a,b,shape1,shape2,IterationAllowed)
flag = 0; %So far, we don't have a successful triangle.
%First try:
ab = b-a;
ao = -a;
v = cross(cross(ab,ao),ab); % v is perpendicular to ab pointing in the general direction of the origin.
c = b;
b = a;
a = support(shape2,shape1,v);
for i = 1:IterationAllowed %iterations to see if we can draw a good triangle.
%Time to check if we got it:
ab = b-a;
ao = -a;
ac = c-a;
%Normal to face of triangle
abc = cross(ab,ac);
%Perpendicular to AB going away from triangle
abp = cross(ab,abc);
%Perpendicular to AC going away from triangle
acp = cross(abc,ac);
%First, make sure our triangle "contains" the origin in a 2d projection
%sense.
%Is origin above (outside) AB?
if dot(abp,ao) > 0
c = b; %Throw away the furthest point and grab a new one in the right direction
b = a;
v = abp; %cross(cross(ab,ao),ab);
%Is origin above (outside) AC?
elseif dot(acp, ao) > 0
b = a;
v = acp; %cross(cross(ac,ao),ac);
else
flag = 1;
break; %We got a good one.
end
a = support(shape2,shape1,v);
end
end
function [a,b,c,d,flag] = pickTetrahedron(a,b,c,shape1,shape2,IterationAllowed)
%Now, if we're here, we have a successful 2D simplex, and we need to check
%if the origin is inside a successful 3D simplex.
%So, is the origin above or below the triangle?
flag = 0;
ab = b-a;
ac = c-a;
%Normal to face of triangle
abc = cross(ab,ac);
ao = -a;
if dot(abc, ao) > 0 %Above
d = c;
c = b;
b = a;
v = abc;
a = support(shape2,shape1,v); %Tetrahedron new point
else %below
d = b;
b = a;
v = -abc;
a = support(shape2,shape1,v); %Tetrahedron new point
end
for i = 1:IterationAllowed %Allowing 10 tries to make a good tetrahedron.
%Check the tetrahedron:
ab = b-a;
ao = -a;
ac = c-a;
ad = d-a;
%We KNOW that the origin is not under the base of the tetrahedron based on
%the way we picked a. So we need to check faces ABC, ABD, and ACD.
%Normal to face of triangle
abc = cross(ab,ac);
if dot(abc, ao) > 0 %Above triangle ABC
%No need to change anything, we'll just iterate again with this face as
%default.
else
acd = cross(ac,ad);%Normal to face of triangle
if dot(acd, ao) > 0 %Above triangle ACD
%Make this the new base triangle.
b = c;
c = d;
ab = ac;
ac = ad;
abc = acd;
else
adb = cross(ad,ab);%Normal to face of triangle
if dot(adb, ao) > 0 %Above triangle ADB
%Make this the new base triangle.
c = b;
b = d;
ac = ab;
ab = ad;
abc = adb;
else
flag = 1;
break; %It's inside the tetrahedron.
end
end
end
%try again:
if dot(abc, ao) > 0 %Above
d = c;
c = b;
b = a;
v = abc;
a = support(shape2,shape1,v); %Tetrahedron new point
else %below
d = b;
b = a;
v = -abc;
a = support(shape2,shape1,v); %Tetrahedron new point
end
end
end
function point = getFarthestInDir(shape, v)
%Find the furthest point in a given direction for a shape
XData = get(shape,'XData'); % Making it more compatible with previous MATLAB releases.
YData = get(shape,'YData');
ZData = get(shape,'ZData');
dotted = XData*v(1) + YData*v(2) + ZData*v(3);
[maxInCol,rowIdxSet] = max(dotted);
[maxInRow,colIdx] = max(maxInCol);
rowIdx = rowIdxSet(colIdx);
point = [XData(rowIdx,colIdx), YData(rowIdx,colIdx), ZData(rowIdx,colIdx)];
end
function point = support(shape1,shape2,v)
%Support function to get the Minkowski difference.
point1 = getFarthestInDir(shape1, v);
point2 = getFarthestInDir(shape2, -v);
point = point1 - point2;
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
convexhull.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK/convexhull.m
| 1,701 |
utf_8
|
cb09453005f6a6fce441276524c4e5c7
|
%How many iterations to allow for collision detection.
iterationsAllowed = 6;
% Make a figure
figure(1)
hold on
% constants for set making
cntr_1 = [0.0, 0.0, 0.0];
cntr_2 = [1.0, 0.0, 0.0];
r_1 = 0.5;
r_2 = 0.2;
tdis = 11;
pdis = 6;
% create point cloud
sphere_1 = CreateSphere(cntr_1, r_1, tdis, pdis);
sphere_2 = CreateSphere(cntr_2, r_2, tdis, pdis);
% make individual convex hulls
S1Obj = makeObj(sphere_1);
S2Obj = makeObj(sphere_2);
% Make tube
figure(2)
% create points cloud
sphere_3 = [sphere_1; sphere_2];
% make combined convex hull
S3Obj = makeObj(sphere_3);
% check for collision
flag = GJK(S1Obj, S2Obj, iterationsAllowed)
% returns convex hull from point cloud
function obj = makeObj(points)
% create face representation and create convex hull
F = convhull(points(:,1), points(:,2), points(:,3));
S.Vertices = points;
S.Faces = F;
S.FaceVertexCData = jet(size(points,1));
S.FaceColor = 'interp';
obj = patch(S);
end
% creates a point cloud in a sphere around the center
function points = CreateSphere(center, r, thetadis, phidis)
% angle discretization
thetas = linspace(0,2*pi,thetadis);
phis = linspace(0,pi,phidis);
% point calculation
points = [];
x = [];
y = [];
z = [];
for i = 1:length(phis)
for j = 1:length(thetas)
x = (r * sin(phis(i)) * cos(thetas(j))) + center(1);
y = (r * sin(phis(i)) * sin(thetas(j))) + center(2);
z = (r * cos(phis(i))) + center(3);
points = [points; x, y, z];
% removes duplicate points at the top and bottom of sphere
if(phis(i) == 0 || phis(i) == pi)
break
end
end
end
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
CreateSphere.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK/functions/CreateSphere.m
| 866 |
utf_8
|
3ea485e3c5956a7fef00a0fe4c32bddb
|
% creates a point cloud in a sphere around the center
function points = CreateSphere(center, r, thetadis, phidis)
% angle discretization
thetas = linspace(0,2*pi,thetadis);
phis = linspace(0,pi,phidis);
% point calculation
points = [];
x = [];
y = [];
z = [];
for i = 1:length(phis)
for j = 1:length(thetas)
% removes duplicate point at theta = 2*pi
if(thetas(j) == 2*pi)
break
end
x = (r * sin(phis(i)) * cos(thetas(j))) + center(1);
y = (r * sin(phis(i)) * sin(thetas(j))) + center(2);
z = (r * cos(phis(i))) + center(3);
points = [points; x, y, z];
% removes duplicate points at the top and bottom of sphere
if(phis(i) == 0 || phis(i) == pi)
break
end
end
end
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
SBPC.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK/functions/SBPC.m
| 5,030 |
utf_8
|
949b15cab9a8feb3e5d93327897e1e38
|
% prediction algorithm
% state - [x, y, z, px, py, pz, pxdot, pydot, pzdot]
function input = SBPC(state,target,QR,PR,TDIS,PDIS,N,K,TIMESTEP,VELOCITY)
% number of iterations to allow for collision detection.
iterationsAllowed = 6;
% get possible velocities
S = length(VELOCITY);
%% projectile set based prediction
% get initial coordinates of projectile
projcntr = state(4:6);
velcntr = state(7:9);
% create point cloud from initial condition
s_0 = CreateSphere(projcntr, PR, TDIS, PDIS); % position
v_0 = CreateSphere(velcntr, PR, TDIS, PDIS); % velocity
% middle point with simulink
simLength = double(N*TIMESTEP);
projtraj = ProjectilePredict(state(4:9), simLength);
% set points
[m,n] = size(s_0);
for i = 1:m
% create initial condition for point in set
setState = [s_0(i,:), v_0(i,:)];
% predict trajectory
projtraj(:,:,i+1) = ProjectilePredict(setState, simLength);
end
%% quadrotor set based prediction
% get initial coordinates of quad
quadcntr = state(1:3);
% create point cloud from initial condition
s_1 = CreateSphere(quadcntr, QR, TDIS, PDIS);
[m,n] = size(s_1);
% N+1 because of initial condition
% K-1 because 0 and 2pi given equivalent trajectories
quadtraj = ones(N+1,3,S,K-1);
theta = linspace(0, 2*pi, K);
j = linspace(0,N,N+1);
for i = 1:m+1
for k = 1:K-1
% first is middle point then the set
if(i == 1)
% integrator dynamics
x = quadcntr(1)+transpose(j)*TIMESTEP*VELOCITY*cos(theta(k));
y = quadcntr(2)+transpose(j)*TIMESTEP*VELOCITY*sin(theta(k));
z = quadcntr(3)*ones(N+1,S);
quadtraj(:,1,:,k) = x;
quadtraj(:,2,:,k) = y;
quadtraj(:,3,:,k) = z;
else
% integrator dynamics
x = s_1(i-1,1)+transpose(j)*TIMESTEP*VELOCITY*cos(theta(k));
y = s_1(i-1,2)+transpose(j)*TIMESTEP*VELOCITY*sin(theta(k));
z = s_1(i-1,3)*ones(N+1,S);
quadtraj(:,1,:,k) = x;
quadtraj(:,2,:,k) = y;
quadtraj(:,3,:,k) = z;
end
end
setquadtraj(:,:,:,:,i) = quadtraj;
end
%% collision detection
safeTraj = ones(S,K-1);
for i = 1:N-1
% put data in right form for making convex hull
for j = 1:m+1
projset1(j,:) = projtraj(i,:,j);
projset2(j,:) = projtraj(i+1,:,j);
end
% create intersample convex hull for projectile
projintersampleSet = [projset1; projset2];
projectileConvexHull = MakeObj(projintersampleSet, 'red');
% check which trajectories are safe
for k = 1:K-1
for s = 1:S
% only continue to calculate for the trajectory if no previous
% collision - saves computation time
if(safeTraj(s,k))
% put data in right form for making convex hull
for j = 1:m
quadset1(j,:) = setquadtraj(i,:,s,k,j);
quadset2(j,:) = setquadtraj(i+1,:,s,k,j);
end
% create intersample convex hull for trajectory k for quadrotor
quadIntersampleSet = [quadset1; quadset2];
quadrotorConvexHull = MakeObj(quadIntersampleSet, 'green');
% run collision detection algorithm
collisionFlag = GJK(projectileConvexHull, quadrotorConvexHull, iterationsAllowed);
if(collisionFlag)
fprintf('Collision in trajectory %d at speed %d\n\r', k,VELOCITY(s));
safeTraj(s,k) = 0;
end
end
end
end
end
%% soft constraint - optimization
trajCost = zeros(S,K-1);
% check each trajectory
for k = 1:K-1
for s = 1:S
% calculate cost of trajectory
% cost is inf if the trajectory results in collsion
if(safeTraj(s,k))
trajCost(s,k) = CostSum(setquadtraj(:,:,s,k,1), target, N);
else
trajCost(s,k) = inf;
end
end
end
% find minimum cost and return first coordinate in that trajectory
[minCostList, minCostIndexList] = min(trajCost);
[minCost, minCostKIndex] = min(minCostList);
minCostSIndex = minCostIndexList(minCostKIndex);
u_opt = setquadtraj(2,:,minCostSIndex,minCostKIndex,1);
input = u_opt;
xlabel('x axis');
ylabel('y axis');
zlabel('z axis');
grid on
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
MakeObj.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK/functions/MakeObj.m
| 613 |
utf_8
|
9842e71e7c6229282ca128ecd7b965bf
|
% returns convex hull from point cloud
function obj = MakeObj(points, color)
%figure()
% create face representation and create convex hull
F = convhull(points(:,1), points(:,2), points(:,3));
S.Vertices = points;
S.Faces = F;
S.FaceVertexCData = jet(size(points,1));
S.FaceColor = 'interp';
if(strcmp(color,'red'))
obj = patch('Faces',S.Faces,'Vertices',S.Vertices,'FaceColor','red');
elseif(strcmp(color,'green'))
obj = patch('Faces',S.Faces,'Vertices',S.Vertices,'FaceColor','green');
else
obj = patch(S);
end
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
SimulationMakeObj.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK/functions/SimulationMakeObj.m
| 367 |
utf_8
|
d4f1152a27a0887bcaaf5135543da40a
|
% returns convex hull from point cloud
function obj = SimulationMakeObj(points)
%figure()
% create face representation and create convex hull
F = convhull(points(:,1), points(:,2), points(:,3));
S.Vertices = points;
S.Faces = F;
S.FaceVertexCData = jet(size(points,1));
S.FaceColor = 'interp';
obj = patch(S,'visible','off');
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
SimulationSBPC.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK/functions/SimulationSBPC.m
| 5,434 |
utf_8
|
dd7ff212697c5fe6aad585ea6b6e6848
|
% prediction algorithm
% state - [x, y, z, px, py, pz, pxdot, pydot, pzdot]
function input = SimulationSBPC(state,target,QR,PR,TDIS,PDIS,N,K,TIMESTEP,VELOCITY)
% number of iterations to allow for collision detection.
iterationsAllowed = 6;
% get possible velocities
S = length(VELOCITY);
%% projectile set based prediction
% get initial coordinates of projectile
projcntr = state(4:6);
velcntr = state(7:9);
% create point cloud from initial condition
s_0 = CreateSphere(projcntr, PR, TDIS, PDIS); % position
v_0 = CreateSphere(velcntr, PR, TDIS, PDIS); % velocity
% middle point with simulink
simLength = double(N*TIMESTEP);
[projtraj, projvel] = SimulationProjectilePredict(state(4:9), simLength);
% set points
[m,n] = size(s_0);
for i = 1:m
% create initial condition for point in set
setState = [s_0(i,:), v_0(i,:)];
% predict trajectory
[projtraj(:,:,i+1), projvel(:,:,i+1)] = SimulationProjectilePredict(setState, simLength);
end
%% quadrotor set based prediction
% get initial coordinates of quad
quadcntr = state(1:3);
% create point cloud from initial condition
s_1 = CreateSphere(quadcntr, QR, TDIS, PDIS);
[m,n] = size(s_1);
% N+1 because of initial condition
% K-1 because 0 and 2pi given equivalent trajectories
quadtraj = ones(N+1,3,S,K-1);
theta = linspace(0, 2*pi, K);
j = linspace(0,N,N+1);
for i = 1:m+1
for k = 1:K-1
% first is middle point then the set
if(i == 1)
% integrator dynamics
x = quadcntr(1)+transpose(j)*TIMESTEP*VELOCITY*cos(theta(k));
y = quadcntr(2)+transpose(j)*TIMESTEP*VELOCITY*sin(theta(k));
z = quadcntr(3)*ones(N+1,S);
quadtraj(:,1,:,k) = x;
quadtraj(:,2,:,k) = y;
quadtraj(:,3,:,k) = z;
else
% integrator dynamics
x = s_1(i-1,1)+transpose(j)*TIMESTEP*VELOCITY*cos(theta(k));
y = s_1(i-1,2)+transpose(j)*TIMESTEP*VELOCITY*sin(theta(k));
z = s_1(i-1,3)*ones(N+1,S);
quadtraj(:,1,:,k) = x;
quadtraj(:,2,:,k) = y;
quadtraj(:,3,:,k) = z;
end
end
setquadtraj(:,:,:,:,i) = quadtraj;
end
%% collision detection
safeTraj = ones(S,K-1);
for i = 1:N-1
% put data in right form for making convex hull
for j = 1:m+1
projset1(j,:) = projtraj(i,:,j);
projset2(j,:) = projtraj(i+1,:,j);
end
% create intersample convex hull for projectile
projintersampleSet = [projset1; projset2];
projectileConvexHull = SimulationMakeObj(projintersampleSet);
% check which trajectories are safe
for k = 1:K-1
for s = 1:S
% only continue to calculate for the trajectory if no previous
% collision - saves computation time
if(safeTraj(s,k))
% put data in right form for making convex hull
for j = 1:m
quadset1(j,:) = setquadtraj(i,:,s,k,j);
quadset2(j,:) = setquadtraj(i+1,:,s,k,j);
end
% create intersample convex hull for trajectory k for quadrotor
quadIntersampleSet = [quadset1; quadset2];
quadrotorConvexHull = SimulationMakeObj(quadIntersampleSet);
% run collision detection algorithm
collisionFlag = GJK(projectileConvexHull, quadrotorConvexHull, iterationsAllowed);
if(collisionFlag)
fprintf('Collision in trajectory %d at speed %d\n\r', k,VELOCITY(s));
safeTraj(s,k) = 0;
end
end
end
end
end
%% soft constraint - optimization
trajCost = zeros(S,K-1);
% check each trajectory
for k = 1:K-1
for s = 1:S
% calculate cost of trajectory
% cost is inf if the trajectory results in collsion
if(safeTraj(s,k))
trajCost(s,k) = CostSum(setquadtraj(:,:,s,k,1), target, N);
else
trajCost(s,k) = inf;
end
end
end
% throw error if all trajectories have infinity cost
if(range(range(trajCost)) == 0) % make sure all costs are the same
if(trajCost(1,1) == inf) % make sure the costs are inf (collision)
msg = 'No collision free trajectories available';
error(msg)
end
end
% find minimum cost and return first coordinate in that trajectory
[minCostList, minCostIndexList] = min(trajCost);
[minCost, minCostKIndex] = min(minCostList);
minCostSIndex = minCostIndexList(minCostKIndex);
u_opt = setquadtraj(2,:,minCostSIndex,minCostKIndex,1);
input = [u_opt,projtraj(2,:,1), projvel(2,:,1)];
xlabel('x axis');
ylabel('y axis');
zlabel('z axis');
grid on
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
CostSum.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK/functions/CostSum.m
| 313 |
utf_8
|
49502b1d0e6bda46cdf68a30ef0c6178
|
% calculates total cost of a trajectory
function totalCost = CostSum(trajectory, target, N)
totalCost = 0;
% sum distances between each point in trajectory and target
for i = 1:N
cost = pdist([trajectory(i,:); target], 'euclidean');
totalCost = totalCost + cost;
end
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
Cost.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/OptimizationNonlinear/system/Cost.m
| 310 |
utf_8
|
51f685855353dc4fd9e48dfe8b08777d
|
% function c = Cost(x0, u, ts, target)
%
% custom cost function - distance to target squared
function c = Cost(x0, u, ts, target)
% predict system state
x = SingleIntegrator(x0,u,ts);
% calculate cost of prediction
[m,n] = size(x);
c = norm(x-target*ones(1,n))^2;
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
FindOptimalInput.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/OptimizationNonlinear/system/FindOptimalInput.m
| 592 |
utf_8
|
618a4324d35b4942fef032b67ed76eb4
|
% function u0 = FindOptimalInput(x0, N, ts, target)
%
% uses fmincon to minimize cost function given system dynamics and
% nonlinear constraints
function u0 = FindOptimalInput(x0, N, ts, target, xObst, threshold)
A = [];
b = [];
Aeq = [];
beq = [];
% set lower and upper bounds on inputs to integrator
lb = -1*ones(3,N);
ub = ones(3,N);
uInit = ones(3,N);
% solve optimization
uopt = fmincon(@(u) Cost(x0,u,ts,target),uInit,A,b,Aeq,beq,lb,ub, @(u) ObstConstraint(x0,u,ts,xObst,threshold));
% return first input
u0 = uopt(:,1);
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
ObstConstraint.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/OptimizationNonlinear/system/ObstConstraint.m
| 537 |
utf_8
|
5da3f5721088cc5ffbd7bbdd7ef040a2
|
% [c,ceq] = ObstConstraint(x0, u, ts, xObst, threshold)
%
% defines the non linear constraint - maintain a distance above threshold
% from the obstacle position
function [c,ceq] = ObstConstraint(x0, u, ts, xObst, threshold)
% predict agent
x = SingleIntegrator(x0, u, ts);
% calculate distance between agent and obstacle
[m,n] = size(x);
ObstDist = zeros(1,n);
for i = 1:n
ObstDist(i) = norm(x(:,i)-xObst(:,i));
end
% define constraints
c = -ObstDist+threshold;
ceq = [];
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
SingleIntegrator.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/OptimizationNonlinear/system/SingleIntegrator.m
| 293 |
utf_8
|
7d9d571566f2467b1d62668899c1f5d6
|
% function x = SingleIntegrator(x0, u, ts)
%
% dynamics of single integrator
function x = SingleIntegrator(x0, u, ts)
[m,n] = size(u);
x = zeros(3,n+1);
x(:,1) = x0;
% apply integrator dynamics
for i = 1:n
x(:,i+1) = x(:,i) + ts*u(:,i);
end
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
SimulationProjectilePredict.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/OptimizationNonlinear/projectile/SimulationProjectilePredict.m
| 764 |
utf_8
|
8ea559140e8b2248afec5e16b1990cb5
|
% [trajectory, velocity] = SimulationProjectilePredict(p_0, simTime)
%
% calls on simulink to predict projectile state given initial conditions
function [trajectory, velocity] = SimulationProjectilePredict(p_0, simTime)
% set up simulink
set_param('projectile/rx','Value',num2str(p_0(1)));
set_param('projectile/ry','Value',num2str(p_0(2)));
set_param('projectile/rz','Value',num2str(p_0(3)));
set_param('projectile/vx','Value',num2str(p_0(4)));
set_param('projectile/vy','Value',num2str(p_0(5)));
set_param('projectile/vz','Value',num2str(p_0(6)));
set_param('projectile', 'StopTime', num2str(simTime));
% run simulation
sim('projectile');
trajectory = projectilePos;
velocity = projectileVel;
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
PolytopeMinDist.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/OptimizationNonlinear/polytope/PolytopeMinDist.m
| 863 |
utf_8
|
072ed9efcd311aebe3e16b4c074dbc1d
|
% minDist = PolytopeMinDist(X1,X2)
%
% finds the minimum distance between two polytopes X1 and X2
function minDist = PolytopeMinDist(X1,X2)
% declare constraints for fmincon
lb = [];
ub = [];
% get sizes of vertices for polytopes
[m1,n1] = size(X1);
[m2,n2] = size(X2);
if(m1 ~= m2)
error('Incorrect Dimensions');
end
n = n1+n2;
A = [eye(n); -eye(n)];
b = [ones(n,1); zeros(n,1)];
Aeq = [ones(1,n1) zeros(1,n2);
zeros(1,n1) ones(1,n2)];
beq = [1;1];
% create lambda vectors
x0 = zeros(n,1);
x0(1) = 1;
x0(n1+1) = 1;
fun = @(lambda)(norm((X1 * lambda(1:n1))-(X2 * lambda(n1+1:n)))^2);
% evaluate fmincon
x = fmincon(fun,x0,A,b,Aeq,beq,lb,ub);
% return min distance
minDist = sqrt(fun(x));
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
MakeObj.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/SetBasedOptimizationNonlinear/plotting/MakeObj.m
| 624 |
utf_8
|
21b5894435118a86bc40d17143893710
|
% returns convex hull from point cloud
function obj = MakeObj(points, color)
%figure()
% create face representation and create convex hull
F = convhull(points(1,:), points(2,:), points(3,:));
S.Vertices = transpose(points);
S.Faces = F;
S.FaceVertexCData = jet(size(points,1));
S.FaceColor = 'interp';
if(strcmp(color,'red'))
obj = patch('Faces',S.Faces,'Vertices',S.Vertices,'FaceColor','red');
elseif(strcmp(color,'green'))
obj = patch('Faces',S.Faces,'Vertices',S.Vertices,'FaceColor','green');
else
obj = patch(S);
end
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
PlotSetBasedSim.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/SetBasedOptimizationNonlinear/plotting/PlotSetBasedSim.m
| 1,233 |
utf_8
|
b30a722967dfb8f695b2211db0d86669
|
% PlotSetBasedSim(agentPos, obst, threshold)
%
% plots associated sets with the simulation
function PlotSetBasedSim(agentPos, obst, threshold, target)
figure()
hold on
% mA - coordinate (usually size 3)
% nA - number of points in each set
% pA - time step, equal to iterations in simulation
[mA,nA,pA] = size(agentPos);
% create objects/convex hulls for each set at each time step
for i = 1:pA
% format points for MakeObj function
curSet = zeros(mA,nA);
for j = 1:nA
curSet(:,j) = agentPos(:,j,i);
end
% make the object
MakeObj(curSet, 'green');
end
% plot obstacle with threshold
for i = 1:pA
% create sphere for obstacle (threshold and obstacle location)
[x,y,z] = sphere;
x = threshold*x+obst(1,i);
y = threshold*y+obst(2,i);
z = threshold*z+obst(3,i);
[mS,nS] = size(z);
% plot obst
C = zeros(mS,nS,3);
C(:,:,1) = C(:,:,1) + 1;
s = surf(x,y,z,C);
end
scatter3(obst(1,1), obst(2,1), obst(2,1), '*')
scatter3(target(1), target(2), target(3), '*')
grid on
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
Cost.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/SetBasedOptimizationNonlinear/system/Cost.m
| 804 |
utf_8
|
12eaf7fb318a8f3e60546adda1dbf9e0
|
% c = Cost(x0_set, u, ts, target)
%
% custom cost function - sum of distance from each vertex to target squared
function c = Cost(x0_set, u, ts, target)
% predict system state with set based dynamics
x_set = SingleIntegrator(x0_set,u,ts);
% calculate cost of prediction for set based dynamics
% - iterate through each vertex at each time step and calculate the
% distance between that point and the target
[m,n,p] = size(x_set);
c = 0;
for i = 1:p
for j = 1:n
c = c + norm(x_set(:,j,i)-target)^2;
end
end
%{
c = 0;
for i = 1:n
polySet = zeros(3,p);
for j = 1:p
polySet(:,j) = x_set(:,i,j);
end
c = c + PolytopeMinDist(polySet, target);
end
%}
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
FindOptimalInput.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/SetBasedOptimizationNonlinear/system/FindOptimalInput.m
| 790 |
utf_8
|
c5a509e96bfdd548f829af064cad15c3
|
% function u0 = FindOptimalInput(x0, N, ts, target)
%
% uses fmincon to minimize cost function given system dynamics and
% nonlinear constraints, returns optimal input sequence
function u0 = FindOptimalInput(x0_set, N, ts, target, xObst, threshold)
A = [];
b = [];
Aeq = [];
beq = [];
% set lower and upper bounds on inputs to integrator
bound = 0.1;
lb = -bound*ones(3,N);
ub = bound*ones(3,N);
uInit = zeros(3,N);
% solve optimization
options = optimoptions('fmincon','Display','notify-detailed','algorithm','sqp','MaxFunEvals',1000);
uopt = fmincon(@(u) Cost(x0_set,u,ts,target),uInit,A,b,Aeq,beq,lb,ub, @(u) ObstConstraint(x0_set,u,ts,xObst,threshold),options);
% return optimal input sequence
u0 = uopt;
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
ObstConstraint.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/SetBasedOptimizationNonlinear/system/ObstConstraint.m
| 945 |
utf_8
|
6c6d8b6f1fcfb85521cf4e989817ad15
|
% [c,ceq] = ObstConstraint(x0_set, u, ts, xObst, threshold)
%
% defines the non linear constraint - agent polytope to maintain
% a distance from the obstacle position above threshold
function [c,ceq] = ObstConstraint(x0_set, u, ts, xObst, threshold)
% predict agent with set based dynamics
x_set = SingleIntegrator(x0_set, u, ts);
% find distance between agent polytope and obstacle
[m,n,p] = size(x_set);
ObstDist = zeros(1,n);
for i = 1:n
% format the set for polytope minimization for time step i
xPolytope = zeros(3,p);
for j = 1:p
xPolytope(:,j) = x_set(:,i,j);
end
% calculate distance between agent and obstacle
% xPolytope is a 3xp matrix
% xObst(:,i) is a 3x1 matrix
ObstDist(i) = PolytopeMinDist(xPolytope,xObst(:,1:2));
end
% define constraints
c = -ObstDist+threshold;
ceq = [];
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
SingleIntegrator.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/SetBasedOptimizationNonlinear/system/SingleIntegrator.m
| 379 |
utf_8
|
9f0d16760b6c1ffa95bb300f5b505a9f
|
% x = SingleIntegrator(x0_set, u, ts)
%
% set based dynamics of single integrator
function x = SingleIntegrator(x0_set, u, ts)
[mP,nP] = size(x0_set);
[mH,nH] = size(u);
x = zeros(3,nH+1,nP);
x(:,1,:) = x0_set;
% apply integrator dynamics
for j = 1:nP
for i = 1:nH
x(:,i+1,j) = x(:,i,j) + ts*u(:,i);
end
end
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
SimulationProjectilePredict.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/SetBasedOptimizationNonlinear/projectile/SimulationProjectilePredict.m
| 764 |
utf_8
|
8ea559140e8b2248afec5e16b1990cb5
|
% [trajectory, velocity] = SimulationProjectilePredict(p_0, simTime)
%
% calls on simulink to predict projectile state given initial conditions
function [trajectory, velocity] = SimulationProjectilePredict(p_0, simTime)
% set up simulink
set_param('projectile/rx','Value',num2str(p_0(1)));
set_param('projectile/ry','Value',num2str(p_0(2)));
set_param('projectile/rz','Value',num2str(p_0(3)));
set_param('projectile/vx','Value',num2str(p_0(4)));
set_param('projectile/vy','Value',num2str(p_0(5)));
set_param('projectile/vz','Value',num2str(p_0(6)));
set_param('projectile', 'StopTime', num2str(simTime));
% run simulation
sim('projectile');
trajectory = projectilePos;
velocity = projectileVel;
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
CreateSphere.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/SetBasedOptimizationNonlinear/polytope/CreateSphere.m
| 913 |
utf_8
|
e7fc2c7730cbb8e66f70c29e702d9c20
|
% creates a point cloud in a sphere around the center
function points = CreateSphere(center, r, thetadis, phidis)
% angle discretization
thetas = linspace(0,2*pi,thetadis);
phis = linspace(0,pi,phidis);
% point calculation
points = [];
x = [];
y = [];
z = [];
for i = 1:length(phis)
for j = 1:length(thetas)
% removes duplicate point at theta = 2*pi
if(thetas(j) == 2*pi)
break
end
x = (r * sin(phis(i)) * cos(thetas(j))) + center(1);
y = (r * sin(phis(i)) * sin(thetas(j))) + center(2);
z = (r * cos(phis(i))) + center(3);
nextPoint = [x;y;z];
points = [points, nextPoint];
% removes duplicate points at the top and bottom of sphere
if(phis(i) == 0 || phis(i) == pi)
break
end
end
end
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
PolytopeMinDist.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/SetBasedOptimizationNonlinear/polytope/PolytopeMinDist.m
| 960 |
utf_8
|
20c5308cdee25a3c8505d60826371706
|
% minDist = PolytopeMinDist(X1,X2)
%
% finds the minimum distance between two polytopes X1 and X2
function minDist = PolytopeMinDist(X1,X2)
% declare constraints for fmincon
lb = [];
ub = [];
% get sizes of vertices for polytopes
[m1,n1] = size(X1);
[m2,n2] = size(X2);
if(m1 ~= m2)
error('Incorrect Dimensions');
end
n = n1+n2;
A = [eye(n); -eye(n)];
b = [ones(n,1); zeros(n,1)];
Aeq = [ones(1,n1) zeros(1,n2);
zeros(1,n1) ones(1,n2)];
beq = [1;1];
nonlcon = [];
% create lambda vectors
x0 = zeros(n,1);
x0(1) = 1;
x0(n1+1) = 1;
fun = @(lambda)(norm((X1 * lambda(1:n1))-(X2 * lambda(n1+1:n)))^2);
% evaluate fmincon
options = optimoptions('fmincon','Display','notify-detailed');
x = fmincon(fun,x0,A,b,Aeq,beq,lb,ub,nonlcon,options);
% return min distance
minDist = sqrt(fun(x));
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
Cost.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Optimization/system/Cost.m
| 311 |
utf_8
|
fc4f6c88e62c79980b6e20d8f3cd646a
|
% function c = Cost(x0, u, ts, target)
%
% custom cost function - distance to target squared
function c = Cost(x0, u, ts, target)
% predict system state
x = SingleIntegrator(x0,u,ts);
% calculate cost of prediction
[m,n] = size(x);
c = norm(x-target*ones(1,n))^2
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
FindOptimalInput.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Optimization/system/FindOptimalInput.m
| 502 |
utf_8
|
411fc54f433b2b1cf39f577e1e47e3cc
|
% function u0 = FindOptimalInput(x0, N, ts, target)
%
% uses fmincon to minimize cost function given system dynamics
function u0 = FindOptimalInput(x0, N, ts, target)
A = [];
b = [];
Aeq = [];
beq = [];
% set lower and upper bounds on inputs to integrator
lb = -1*ones(2,N);
ub = ones(2,N);
u_init = ones(2,N);
% solve optimization
uopt = fmincon(@(u) Cost(x0,u,ts,target),u_init,A,b,Aeq,beq,lb,ub);
% return first input
u0 = uopt(:,1);
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
SingleIntegrator.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Optimization/system/SingleIntegrator.m
| 293 |
utf_8
|
68f06aba97ead41f96c992494e24fba4
|
% function x = SingleIntegrator(x0, u, ts)
%
% dynamics of single integrator
function x = SingleIntegrator(x0, u, ts)
[m,n] = size(u);
x = zeros(2,n+1);
x(:,1) = x0;
% apply integrator dynamics
for i = 1:n
x(:,i+1) = x(:,i) + ts*u(:,i);
end
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
PolytopeMinDist.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/Optimization/polytope/PolytopeMinDist.m
| 863 |
utf_8
|
072ed9efcd311aebe3e16b4c074dbc1d
|
% minDist = PolytopeMinDist(X1,X2)
%
% finds the minimum distance between two polytopes X1 and X2
function minDist = PolytopeMinDist(X1,X2)
% declare constraints for fmincon
lb = [];
ub = [];
% get sizes of vertices for polytopes
[m1,n1] = size(X1);
[m2,n2] = size(X2);
if(m1 ~= m2)
error('Incorrect Dimensions');
end
n = n1+n2;
A = [eye(n); -eye(n)];
b = [ones(n,1); zeros(n,1)];
Aeq = [ones(1,n1) zeros(1,n2);
zeros(1,n1) ones(1,n2)];
beq = [1;1];
% create lambda vectors
x0 = zeros(n,1);
x0(1) = 1;
x0(n1+1) = 1;
fun = @(lambda)(norm((X1 * lambda(1:n1))-(X2 * lambda(n1+1:n)))^2);
% evaluate fmincon
x = fmincon(fun,x0,A,b,Aeq,beq,lb,ub);
% return min distance
minDist = sqrt(fun(x));
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
GJK.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK_Distance/GJK.m
| 5,909 |
utf_8
|
acc17476d868c4bb652640495a721180
|
function flag = GJK(shape1,shape2,iterations)
% GJK Gilbert-Johnson-Keerthi Collision detection implementation.
% Returns whether two convex shapes are are penetrating or not
% (true/false). Only works for CONVEX shapes.
%
% Inputs:
% shape1:
% must have fields for XData,YData,ZData, which are the x,y,z
% coordinates of the vertices. Can be the same as what comes out of a
% PATCH object. It isn't required that the points form faces like patch
% data. This algorithm will assume the convex hull of the x,y,z points
% given.
%
% shape2:
% Other shape to test collision against. Same info as shape1.
%
% iterations:
% The algorithm tries to construct a tetrahedron encompassing
% the origin. This proves the objects have collided. If we fail within a
% certain number of iterations, we give up and say the objects are not
% penetrating. Low iterations means a higher chance of false-NEGATIVES
% but faster computation. As the objects penetrate more, it takes fewer
% iterations anyway, so low iterations is not a huge disadvantage.
%
% Outputs:
% flag:
% true - objects collided
% false - objects not collided
%
%
% This video helped me a lot when making this: https://mollyrocket.com/849
% Not my video, but very useful.
%
% Matthew Sheen, 2016
%
%Point 1 and 2 selection (line segment)
v = [0.8 0.5 1];
[a,b] = pickLine(v,shape2,shape1);
%Point 3 selection (triangle)
[a,b,c,flag] = pickTriangle(a,b,shape2,shape1,iterations);
%Point 4 selection (tetrahedron)
if flag == 1 %Only bother if we could find a viable triangle.
[a,b,c,d,flag] = pickTetrahedron(a,b,c,shape2,shape1,iterations);
end
end
function [a,b] = pickLine(v,shape1,shape2)
%Construct the first line of the simplex
b = support(shape2,shape1,v);
a = support(shape2,shape1,-v);
end
function [a,b,c,flag] = pickTriangle(a,b,shape1,shape2,IterationAllowed)
flag = 0; %So far, we don't have a successful triangle.
%First try:
ab = b-a;
ao = -a;
v = cross(cross(ab,ao),ab); % v is perpendicular to ab pointing in the general direction of the origin.
c = b;
b = a;
a = support(shape2,shape1,v);
for i = 1:IterationAllowed %iterations to see if we can draw a good triangle.
%Time to check if we got it:
ab = b-a;
ao = -a;
ac = c-a;
%Normal to face of triangle
abc = cross(ab,ac);
%Perpendicular to AB going away from triangle
abp = cross(ab,abc);
%Perpendicular to AC going away from triangle
acp = cross(abc,ac);
%First, make sure our triangle "contains" the origin in a 2d projection
%sense.
%Is origin above (outside) AB?
if dot(abp,ao) > 0
c = b; %Throw away the furthest point and grab a new one in the right direction
b = a;
v = abp; %cross(cross(ab,ao),ab);
%Is origin above (outside) AC?
elseif dot(acp, ao) > 0
b = a;
v = acp; %cross(cross(ac,ao),ac);
else
flag = 1;
break; %We got a good one.
end
a = support(shape2,shape1,v);
end
end
function [a,b,c,d,flag] = pickTetrahedron(a,b,c,shape1,shape2,IterationAllowed)
%Now, if we're here, we have a successful 2D simplex, and we need to check
%if the origin is inside a successful 3D simplex.
%So, is the origin above or below the triangle?
flag = 0;
ab = b-a;
ac = c-a;
%Normal to face of triangle
abc = cross(ab,ac);
ao = -a;
if dot(abc, ao) > 0 %Above
d = c;
c = b;
b = a;
v = abc;
a = support(shape2,shape1,v); %Tetrahedron new point
else %below
d = b;
b = a;
v = -abc;
a = support(shape2,shape1,v); %Tetrahedron new point
end
for i = 1:IterationAllowed %Allowing 10 tries to make a good tetrahedron.
%Check the tetrahedron:
ab = b-a;
ao = -a;
ac = c-a;
ad = d-a;
%We KNOW that the origin is not under the base of the tetrahedron based on
%the way we picked a. So we need to check faces ABC, ABD, and ACD.
%Normal to face of triangle
abc = cross(ab,ac);
if dot(abc, ao) > 0 %Above triangle ABC
%No need to change anything, we'll just iterate again with this face as
%default.
else
acd = cross(ac,ad);%Normal to face of triangle
if dot(acd, ao) > 0 %Above triangle ACD
%Make this the new base triangle.
b = c;
c = d;
ab = ac;
ac = ad;
abc = acd;
else
adb = cross(ad,ab);%Normal to face of triangle
if dot(adb, ao) > 0 %Above triangle ADB
%Make this the new base triangle.
c = b;
b = d;
ac = ab;
ab = ad;
abc = adb;
else
flag = 1;
break; %It's inside the tetrahedron.
end
end
end
%try again:
if dot(abc, ao) > 0 %Above
d = c;
c = b;
b = a;
v = abc;
a = support(shape2,shape1,v); %Tetrahedron new point
else %below
d = b;
b = a;
v = -abc;
a = support(shape2,shape1,v); %Tetrahedron new point
end
end
end
function point = getFarthestInDir(shape, v)
%Find the furthest point in a given direction for a shape
XData = get(shape,'XData'); % Making it more compatible with previous MATLAB releases.
YData = get(shape,'YData');
ZData = get(shape,'ZData');
dotted = XData*v(1) + YData*v(2) + ZData*v(3);
[maxInCol,rowIdxSet] = max(dotted);
[maxInRow,colIdx] = max(maxInCol);
rowIdx = rowIdxSet(colIdx);
point = [XData(rowIdx,colIdx), YData(rowIdx,colIdx), ZData(rowIdx,colIdx)];
end
function point = support(shape1,shape2,v)
%Support function to get the Minkowski difference.
point1 = getFarthestInDir(shape1, v);
point2 = getFarthestInDir(shape2, -v);
point = point1 - point2;
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
convexhull.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK_Distance/convexhull.m
| 1,701 |
utf_8
|
cb09453005f6a6fce441276524c4e5c7
|
%How many iterations to allow for collision detection.
iterationsAllowed = 6;
% Make a figure
figure(1)
hold on
% constants for set making
cntr_1 = [0.0, 0.0, 0.0];
cntr_2 = [1.0, 0.0, 0.0];
r_1 = 0.5;
r_2 = 0.2;
tdis = 11;
pdis = 6;
% create point cloud
sphere_1 = CreateSphere(cntr_1, r_1, tdis, pdis);
sphere_2 = CreateSphere(cntr_2, r_2, tdis, pdis);
% make individual convex hulls
S1Obj = makeObj(sphere_1);
S2Obj = makeObj(sphere_2);
% Make tube
figure(2)
% create points cloud
sphere_3 = [sphere_1; sphere_2];
% make combined convex hull
S3Obj = makeObj(sphere_3);
% check for collision
flag = GJK(S1Obj, S2Obj, iterationsAllowed)
% returns convex hull from point cloud
function obj = makeObj(points)
% create face representation and create convex hull
F = convhull(points(:,1), points(:,2), points(:,3));
S.Vertices = points;
S.Faces = F;
S.FaceVertexCData = jet(size(points,1));
S.FaceColor = 'interp';
obj = patch(S);
end
% creates a point cloud in a sphere around the center
function points = CreateSphere(center, r, thetadis, phidis)
% angle discretization
thetas = linspace(0,2*pi,thetadis);
phis = linspace(0,pi,phidis);
% point calculation
points = [];
x = [];
y = [];
z = [];
for i = 1:length(phis)
for j = 1:length(thetas)
x = (r * sin(phis(i)) * cos(thetas(j))) + center(1);
y = (r * sin(phis(i)) * sin(thetas(j))) + center(2);
z = (r * cos(phis(i))) + center(3);
points = [points; x, y, z];
% removes duplicate points at the top and bottom of sphere
if(phis(i) == 0 || phis(i) == pi)
break
end
end
end
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
CreateSphere.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK_Distance/functions/CreateSphere.m
| 866 |
utf_8
|
3ea485e3c5956a7fef00a0fe4c32bddb
|
% creates a point cloud in a sphere around the center
function points = CreateSphere(center, r, thetadis, phidis)
% angle discretization
thetas = linspace(0,2*pi,thetadis);
phis = linspace(0,pi,phidis);
% point calculation
points = [];
x = [];
y = [];
z = [];
for i = 1:length(phis)
for j = 1:length(thetas)
% removes duplicate point at theta = 2*pi
if(thetas(j) == 2*pi)
break
end
x = (r * sin(phis(i)) * cos(thetas(j))) + center(1);
y = (r * sin(phis(i)) * sin(thetas(j))) + center(2);
z = (r * cos(phis(i))) + center(3);
points = [points; x, y, z];
% removes duplicate points at the top and bottom of sphere
if(phis(i) == 0 || phis(i) == pi)
break
end
end
end
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
SBPC.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK_Distance/functions/SBPC.m
| 5,186 |
utf_8
|
9465ecd091b943cce8a5775605bcae44
|
% prediction algorithm
% state - [x, y, z, px, py, pz, pxdot, pydot, pzdot]
function input = SBPC(state,target,sigma,QR,PR,TDIS,PDIS,N,K,TIMESTEP,VELOCITY)
% number of iterations to allow for collision detection.
iterationsAllowed = 6;
% target object
targetSet = CreateSphere(target, 0.001, 5, 5);
targetObj = MakeObj(targetSet, 'none');
% get possible velocities
S = length(VELOCITY);
%% projectile set based prediction
% get initial coordinates of projectile
projcntr = state(4:6);
velcntr = state(7:9);
% create point cloud from initial condition
s_0 = CreateSphere(projcntr, PR, TDIS, PDIS); % position
v_0 = CreateSphere(velcntr, PR, TDIS, PDIS); % velocity
% middle point with simulink
simLength = double(N*TIMESTEP);
projtraj = ProjectilePredict(state(4:9), simLength);
% set points
[m,n] = size(s_0);
for i = 1:m
% create initial condition for point in set
setState = [s_0(i,:), v_0(i,:)];
% predict trajectory
projtraj(:,:,i+1) = ProjectilePredict(setState, simLength);
end
%% quadrotor set based prediction
% get initial coordinates of quad
quadcntr = state(1:3);
% create point cloud from initial condition
s_1 = CreateSphere(quadcntr, QR, TDIS, PDIS);
[m,n] = size(s_1);
% N+1 because of initial condition
% K-1 because 0 and 2pi given equivalent trajectories
quadtraj = ones(N+1,3,S,K-1);
theta = linspace(0, 2*pi, K);
j = linspace(0,N,N+1);
for i = 1:m+1
for k = 1:K-1
% first is middle point then the set
if(i == 1)
% integrator dynamics
x = quadcntr(1)+transpose(j)*TIMESTEP*VELOCITY*cos(theta(k));
y = quadcntr(2)+transpose(j)*TIMESTEP*VELOCITY*sin(theta(k));
z = quadcntr(3)*ones(N+1,S);
quadtraj(:,1,:,k) = x;
quadtraj(:,2,:,k) = y;
quadtraj(:,3,:,k) = z;
else
% integrator dynamics
x = s_1(i-1,1)+transpose(j)*TIMESTEP*VELOCITY*cos(theta(k));
y = s_1(i-1,2)+transpose(j)*TIMESTEP*VELOCITY*sin(theta(k));
z = s_1(i-1,3)*ones(N+1,S);
quadtraj(:,1,:,k) = x;
quadtraj(:,2,:,k) = y;
quadtraj(:,3,:,k) = z;
end
end
setquadtraj(:,:,:,:,i) = quadtraj;
end
%% collision detection and trajectory optimization
safeTraj = ones(S,K-1);
for i = 1:N-1
% put data in right form for making convex hull
for j = 1:m+1
projset1(j,:) = projtraj(i,:,j);
projset2(j,:) = projtraj(i+1,:,j);
end
% create intersample convex hull for projectile
projintersampleSet = [projset1; projset2];
projectileConvexHull(i) = MakeObj(projintersampleSet, 'red');
end
% evaluate each trajectory
trajCost = zeros(S,K-1);
for k = 1:K-1
for s = 1:S
% run collision detection algorithm
for i = 1:N-1
% put data in right form for making convex hull
% all points from successive sets
for j = 1:m
quadset1(j,:) = setquadtraj(i,:,s,k,j);
quadset2(j,:) = setquadtraj(i+1,:,s,k,j);
end
% create intersample convex hull for trajectory k for quadrotor
quadIntersampleSet = [quadset1; quadset2];
quadrotorConvexHull = MakeObj(quadIntersampleSet, 'green');
%collisionFlag = GJK(projectileConvexHull(i), quadrotorConvexHull, iterationsAllowed);
[dist,~,~,~]=GJK_dist(projectileConvexHull(i),quadrotorConvexHull);
if(dist < sigma)
fprintf('Collision in trajectory %d at speed %d at time step %d\n\r', k,VELOCITY(s),i);
safeTraj(s,k) = 0;
trajCost(s,k) = inf;
break;
else
[cost,~,~,~] = GJK_dist(targetObj,quadrotorConvexHull);
collisionFlag = GJK(targetObj,quadrotorConvexHull,iterationsAllowed);
if(collisionFlag)
cost
cost = 0;
error('must increase minimum cost');
end
trajCost(s,k) = trajCost(s,k) + cost;
end
end
end
end
%% find optimal input
% find minimum cost and return first coordinate in that trajectory
[minCostList, minCostIndexList] = min(trajCost);
[minCost, minCostKIndex] = min(minCostList);
minCostSIndex = minCostIndexList(minCostKIndex);
u_opt = setquadtraj(2,:,minCostSIndex,minCostKIndex,1);
input = u_opt;
xlabel('x axis');
ylabel('y axis');
zlabel('z axis');
grid on
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
MakeObj.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK_Distance/functions/MakeObj.m
| 613 |
utf_8
|
9842e71e7c6229282ca128ecd7b965bf
|
% returns convex hull from point cloud
function obj = MakeObj(points, color)
%figure()
% create face representation and create convex hull
F = convhull(points(:,1), points(:,2), points(:,3));
S.Vertices = points;
S.Faces = F;
S.FaceVertexCData = jet(size(points,1));
S.FaceColor = 'interp';
if(strcmp(color,'red'))
obj = patch('Faces',S.Faces,'Vertices',S.Vertices,'FaceColor','red');
elseif(strcmp(color,'green'))
obj = patch('Faces',S.Faces,'Vertices',S.Vertices,'FaceColor','green');
else
obj = patch(S);
end
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
SimulationMakeObj.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK_Distance/functions/SimulationMakeObj.m
| 367 |
utf_8
|
d4f1152a27a0887bcaaf5135543da40a
|
% returns convex hull from point cloud
function obj = SimulationMakeObj(points)
%figure()
% create face representation and create convex hull
F = convhull(points(:,1), points(:,2), points(:,3));
S.Vertices = points;
S.Faces = F;
S.FaceVertexCData = jet(size(points,1));
S.FaceColor = 'interp';
obj = patch(S,'visible','off');
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
SimulationSBPC.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK_Distance/functions/SimulationSBPC.m
| 5,284 |
utf_8
|
60c22e645f032eb5bd2dfa58890c8fa7
|
% prediction algorithm
% state - [x, y, z, px, py, pz, pxdot, pydot, pzdot]
function input = SimulationSBPC(state,target,sigma,QR,PR,TDIS,PDIS,N,K,TIMESTEP,VELOCITY)
% number of iterations to allow for collision detection.
iterationsAllowed = 3;
% target object
targetSet = CreateSphere(target, 0.001, 5, 5);
targetObj = MakeObj(targetSet, 'none');
% get possible velocities
S = length(VELOCITY);
%% projectile set based prediction
% get initial coordinates of projectile
projcntr = state(4:6);
velcntr = state(7:9);
% create point cloud from initial condition
s_0 = CreateSphere(projcntr, PR, TDIS, PDIS); % position
v_0 = CreateSphere(velcntr, PR, TDIS, PDIS); % velocity
% middle point with simulink
simLength = double(N*TIMESTEP);
[projtraj, projvel] = SimulationProjectilePredict(state(4:9), simLength);
% set points
[m,n] = size(s_0);
for i = 1:m
% create initial condition for point in set
setState = [s_0(i,:), v_0(i,:)];
% predict trajectory
[projtraj(:,:,i+1), projvel(:,:,i+1)] = SimulationProjectilePredict(setState, simLength);
end
%% quadrotor set based prediction
% get initial coordinates of quad
quadcntr = state(1:3);
% create point cloud from initial condition
s_1 = CreateSphere(quadcntr, QR, TDIS, PDIS);
[m,n] = size(s_1);
% N+1 because of initial condition
% K-1 because 0 and 2pi given equivalent trajectories
quadtraj = ones(N+1,3,S,K-1);
theta = linspace(0, 2*pi, K);
j = linspace(0,N,N+1);
for i = 1:m+1
for k = 1:K-1
% first is middle point then the set
if(i == 1)
% integrator dynamics
x = quadcntr(1)+transpose(j)*TIMESTEP*VELOCITY*cos(theta(k));
y = quadcntr(2)+transpose(j)*TIMESTEP*VELOCITY*sin(theta(k));
z = quadcntr(3)*ones(N+1,S);
quadtraj(:,1,:,k) = x;
quadtraj(:,2,:,k) = y;
quadtraj(:,3,:,k) = z;
else
% integrator dynamics
x = s_1(i-1,1)+transpose(j)*TIMESTEP*VELOCITY*cos(theta(k));
y = s_1(i-1,2)+transpose(j)*TIMESTEP*VELOCITY*sin(theta(k));
z = s_1(i-1,3)*ones(N+1,S);
quadtraj(:,1,:,k) = x;
quadtraj(:,2,:,k) = y;
quadtraj(:,3,:,k) = z;
end
end
setquadtraj(:,:,:,:,i) = quadtraj;
end
%% collision detection and trajectory optimization
safeTraj = ones(S,K-1);
for i = 1:N-1
% put data in right form for making convex hull
for j = 1:m+1
projset1(j,:) = projtraj(i,:,j);
projset2(j,:) = projtraj(i+1,:,j);
end
% create intersample convex hull for projectile
projintersampleSet = [projset1; projset2];
projectileConvexHull(i) = SimulationMakeObj(projintersampleSet);
end
% evaluate each trajectory
trajCost = zeros(S,K-1);
for k = 1:K-1
for s = 1:S
% run collision detection algorithm
for i = 1:N-1
% put data in right form for making convex hull
% all points from successive sets
for j = 1:m
quadset1(j,:) = setquadtraj(i,:,s,k,j);
quadset2(j,:) = setquadtraj(i+1,:,s,k,j);
end
% create intersample convex hull for trajectory k for quadrotor
quadIntersampleSet = [quadset1; quadset2];
quadrotorConvexHull = SimulationMakeObj(quadIntersampleSet);
%collisionFlag = GJK(projectileConvexHull(i), quadrotorConvexHull, iterationsAllowed);
[dist,~,~,~]=GJK_dist(projectileConvexHull(i),quadrotorConvexHull);
if(dist < sigma)
%fprintf('Collision in trajectory %d at speed %d at time step %d\n\r', k,VELOCITY(s),i);
safeTraj(s,k) = 0;
trajCost(s,k) = inf;
break;
else
[cost,~,~,~] = GJK_dist(targetObj,quadrotorConvexHull);
collisionFlag = GJK(targetObj,quadrotorConvexHull,iterationsAllowed);
if(collisionFlag)
cost
cost = 0;
error('must increase minimum cost');
end
trajCost(s,k) = trajCost(s,k) + cost;
end
end
end
end
%% find optimal input
% find minimum cost and return first coordinate in that trajectory
[minCostList, minCostIndexList] = min(trajCost);
[minCost, minCostKIndex] = min(minCostList);
minCostSIndex = minCostIndexList(minCostKIndex);
u_opt = setquadtraj(2,:,minCostSIndex,minCostKIndex,1);
input = [u_opt,projtraj(2,:,1), projvel(2,:,1)];
xlabel('x axis');
ylabel('y axis');
zlabel('z axis');
grid on
end
|
github
|
HybridSystemsLab/SetBasedPredictionCollisionAndEvasion-master
|
CostSum.m
|
.m
|
SetBasedPredictionCollisionAndEvasion-master/OldSimFiles/MatlabSim/GJK_Distance/functions/CostSum.m
| 313 |
utf_8
|
49502b1d0e6bda46cdf68a30ef0c6178
|
% calculates total cost of a trajectory
function totalCost = CostSum(trajectory, target, N)
totalCost = 0;
% sum distances between each point in trajectory and target
for i = 1:N
cost = pdist([trajectory(i,:); target], 'euclidean');
totalCost = totalCost + cost;
end
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.