plateform
stringclasses 1
value | repo_name
stringlengths 13
113
| name
stringlengths 3
74
| ext
stringclasses 1
value | path
stringlengths 12
229
| size
int64 23
843k
| source_encoding
stringclasses 9
values | md5
stringlengths 32
32
| text
stringlengths 23
843k
|
---|---|---|---|---|---|---|---|---|
github
|
atcollab/at-master
|
mpi_sweep_octave_example.m
|
.m
|
at-master/utils/mpi_sweep/mpi_sweep_octave_example.m
| 1,866 |
utf_8
|
e5088c7fd19ff8ffbebb60ba590827e1
|
#!/usr/bin/env octave
function mpi_sweep_octave_example
## Make sure that AT source files are in path.
## For this go to atoctave folder and run
## > octave --eval 'bootstrap;savepath'
## this code will run on all MPI nodes
D1.FamName = 'DR01';
D1.Length = 3;
D1.PassMethod = 'DriftPass';
QF.FamName = 'QF';
QF.Length = 1;
QF.K = 0.2;
QF.PassMethod= 'QuadLinearPass';
D2.FamName = 'DR02';
D2.Length = 3;
D2.PassMethod = 'DriftPass';
QD.FamName = 'QD';
QD.Length = 1;
QD.K = -0.2;
QD.PassMethod= 'QuadLinearPass';
FODOCELL = {D1 QF D2 QD};
THERING = [FODOCELL];
function output=generator()
## Generate input parameter list for worker function.
## It will only be executed on node 0 to save on allocations.
output = {{0.1,-0.1}, {0.2,-0.2}, {0.3,-0.3}};
end
function output=worker(input, info)
## additional arguments may be used
## info.name; # name of the processor
## info.rank; # rank (id) of the node
## info.size; # total number of nodes
THERING{findcells(THERING,'FamName','QF')}.K = input{1,1};
THERING{findcells(THERING,'FamName','QD')}.K = input{1,2};
output = findm44(THERING,0);
end
function collector(input)
## Collect computed data.
## This function will only be executed on node 0.
fid = fopen("output.csv", "w");
fprintf(fid, "Kqf,Kqd");
for i=1:4
for j=1:4
fprintf(fid, ",m%d%d", i, j);
endfor
endfor
fprintf(fid, "\n");
for ind=1:size(input)(2)
params = input{1,ind}{1,1};
output = input{1,ind}{1,2};
fprintf(fid, "%d,%d", params{1,1}, params{1,2});
for i=1:4
for j=1:4
fprintf(fid, ",%d", output(i,j));
endfor
endfor
fprintf(fid, "\n");
endfor
fclose(fid);
end
mpi_sweep_octave(@generator, @worker, @collector);
end
|
github
|
atcollab/at-master
|
updateContents.m
|
.m
|
at-master/atmat/updateContents.m
| 5,941 |
utf_8
|
8148907889c137a3c59883ffcfc38592
|
function updateContents(folder)
%UPDATECONTENTS Create a Contents.m file including subdirectories
%
% UPDATECONTENTS scans through the current directory, and
% its subdirectories, and builds a Contents file similar to Matlab's
% report-generated Contents.m files. Any existing Contents.m file will be
% overwritten.
%
% UPDATECONTENTS(FOLDER) scans through the directory FOLDER.
%
% Typing
% help(FOLDER)
% or
% help path/to/folder
%
% will display Contents.m in the Command Window, and display links to the
% help for any functions that are in Matlab's search path.
%
% NB: Do not use Matlab's Contents Report generator to edit the
% Contents.m file. Execute this function to update it.
% Copyright 2016 University of Surrey (Mathworks exchange).
% Modified by L. Nadolski
% apply function in current directory
if nargin<1
folder = cd;
end
% check input is valid
assert(ischar(folder), 'invalidPath', '''directory'' should be a charater array (string)')
assert(exist(folder,'dir')==7, 'invalidPath', [folder ' does not exist'])
% check last character in path is not filesep (e.g. '/')
if strcmp(folder(end),filesep)
folder = folder(1:end-1);
end
fIX = strfind(folder,filesep); fIX = fIX(end);
% name of file to create
filename = 'Contents.m';
% Name of the folder
[~,name] = fileparts(folder);
% delete if it already exists
if exist([folder filesep filename],'file')==2
delete([folder filesep filename])
end
% get subfolders
dirs = getContents(folder,'filter','folders','rec',true,'path','full','sort',true);
dirs = [{folder}; dirs];
% get files
files = cell(0,1);
H1_lines = cell(0,1);
for d = 1:length(dirs)
temp = getContents(dirs{d},'filter','*.m','sort',true);
if ~isempty(temp)
temp = temp(cellfun(@(x) isempty(strfind(x,'~')),temp)); % remove temporary files
temp = temp(cellfun(@(x) isempty(strfind(x,'.mex')),temp)); % remove compiled mex files
temp = temp(cellfun(@(x) ~strcmp(x,filename),temp)); % remove Contents.m
H1_lines = [H1_lines; {''}; {''}]; %#ok<AGROW> % insert blank lines where no functions will be
% determine package prefix
pkgprefix = strrep(dirs{d},[filesep '+'],'.');
pkgprefix = strrep(pkgprefix,[filesep '@'],'.');
dots = strfind(pkgprefix,'.');
if ~isempty(dots)
pkgprefix = [pkgprefix(dots(1)+1:end) '.'];
else
pkgprefix = '';
end
for f = 1:length(temp) % read H1 lines
H1_lines = [H1_lines; {get_H1_line([dirs{d} filesep temp{f}])}]; %#ok<AGROW> % add H1 lines
% remove extension from and add package prefix to m-files
[~,fname,ext] = fileparts(temp{f});
if strcmpi(ext,'.m')
temp{f} = [pkgprefix fname];
end
end
files = [files; {''}; {upper(dirs{d}(fIX+1:end))}; temp;]; %#ok<AGROW> % add filenames
end
end
% longest file name (so appropriate space can be added between files and H1 lines
longest_word = max(cellfun(@length,files(cellfun(@(x) ~isempty(x),H1_lines))));
% write to output
nrows = length(files);
fid = fopen(filename, 'w'); % open file for writing
fprintf(fid, '%s\n%% See also \n%% \n', ['% ' upper(name)]);
fprintf(fid, '%s\n', ['% Contents file for ' upper(folder(fIX+1:end)) ' and its subfolders.']);
for row=1:nrows
if isempty(H1_lines{row})
fprintf(fid, '%s\n', ['% ' files{row,:}]);
else
rowfilename = files{row,:};
[~,name,ext] = fileparts(rowfilename);
if strcmpi(ext,'.m') % remove extension from m files
rowfilename = name;
end
fprintf(fid, '%s\n',['% ' rowfilename repmat(' ',1,longest_word-length(rowfilename)) ' - ' H1_lines{row,:}]);
end
end
fprintf(fid, '%% \n%% %s on %s at %s.\n', 'This file was generated by updateContents.m',datestr(datenum(now),'dd mmm yyyy'),datestr(datenum(now),'HH:MM:SS'));
fclose(fid);
end
function H1_line = get_H1_line(filename)
%GET_H1_LINE get the H1 line for a file
[~,name,ext] = fileparts(filename);
H1_line = ''; % default output
if strcmp(ext,'.m')
fid = fopen(filename); % open file
tline = fgetl(fid); % read first line
while ischar(tline)
k = strfind(tline,'%'); % find comment
if ~isempty(k) % if it is found
k = k(1);
ispercents = false(size(tline(k:end)));
ispercents(strfind(tline(k:end),'%'))=true;
start = k+find(~(isspace(tline(k:end)) | ispercents),1,'first')-1;
if ~isempty(start)
tline = tline(start:end); % remove leading space/percent
IX = strfind(lower(tline),lower(name));
if ~isempty(IX)
if IX(1)==1
tline = tline(length(name)+1:end); % remove function name
end
tline = strtrim(tline); % remove any leading/trailing space
end
H1_line = tline;
H1_line = strtrim(H1_line);
if ~isempty(H1_line)
if strcmp(H1_line(end),'.') % remove trailing period
H1_line = H1_line(1:end-1);
end
H1_line(1) = upper(H1_line(1)); % capitalize first letter
end
end
tline = -1; % set tline to numeric
else
tline = fgetl(fid); % read next line
end
end
fclose(fid);
end
end
|
github
|
atcollab/at-master
|
getContents.m
|
.m
|
at-master/atmat/getContents.m
| 6,308 |
utf_8
|
3b7c2a87d47634bac780fdaf77e0a825
|
function [cont,dirflag] = getContents(directory,varargin)
%GETCONTENTS Get the contents of a specified directory
%
% This function returns the contents of a specified directory.
%
% CONT = IOSR.GENERAL.GETCONTENTS(DIRECTORY) returns the files and
% folders in a directory and returns them to the cell array cont. It
% ignores hidden files and folders (those starting '.'). DIRECTORY must
% be a character array (string).
%
% CONT = IOSR.GENERAL.GETCONTENTS(DIRECTORY,'PARAMETER',VALUE) allows
% search options to be specified. The options include:
% 'rec' {false} | true
% Search recursively within the subfolders of the
% specified directory.
% 'path' {'relative'} | 'full'
% Specifies whether returned paths are full or relative
% to the specified directory.
% 'sort' {false} | true
% Specify whether the output is sorted alphabetically.
% 'filter' {'all'} | 'files' | 'folders' | '*.ext' | str
% This option allows a filter to be specified. 'files'
% returns names of all files in the directory. 'folders'
% returns names of all folders in the directory. '*.ext',
% where 'ext' is a user-specified file extension, returns
% all files with the extension '.ext'. str may be any
% string; only elements that contain str will be returned
% (files or folders). str is case-sensitive.
%
% [CONT,DIRFLAG] = IOSR.GENERAL.GETCONTENTS(...) returns a logical array
% DIRFLAG, the same size as CONT, indicating whether each element is a
% directory.
%
% Examples
%
% Ex. 1
%
% % Return all m-files in the current directory
%
% cont = iosr.general.getContents(cd,'filter','*.m')
%
% Ex. 2
%
% % Return all files in the current directory and its
% % sub-directories
%
% cont = iosr.general.getContents(cd,'rec',true)
%
% Ex. 3
%
% % Return all files in current directory with names
% % containing 'foo'
%
% % may return files and folders:
% [cont,dirflag] = iosr.general.getContents(cd,'filter','foo')
%
% % use dirflag to limit:
% cont = cont(~dirflag);
% Copyright 2016 University of Surrey.
% parse input arguments and arrange call(s) to 'main', which
% does the actual searching of directories
assert(ischar(directory), 'iosr:getContents:invalidDir', 'directory must be a character array')
% Switch trap parses the varargin inputs
% default values
recflag = false;
pathflag = 'relative';
sortflag = false;
str = 'all';
% find values
for i = 1:2:length(varargin)
switch lower(varargin{i})
case 'path'
pathflag=varargin{i+1};
case 'rec'
recflag=varargin{i+1};
case 'sort'
sortflag=varargin{i+1};
case 'filter'
str=varargin{i+1};
otherwise
error('iosr:getContents:unknownOption','Unknown option: %s\n',varargin{i});
end
end
% check input options
assert(ischar(pathflag), 'iosr:getContents:invalidPath', '''path'' option must be a string')
assert(strcmp(pathflag,'relative') | strcmp(pathflag,'full'),...
'iosr:getContents:invalidPath', ...
'''path'' option must ''relative'' or ''full''')
assert(islogical(recflag) & numel(recflag)==1, 'iosr:getContents:invalidRec', '''rec'' option must be logical')
assert(islogical(sortflag) & numel(sortflag)==1, 'iosr:getContents:invalidSoftFlag', '''sort'' option must be a logical')
assert(ischar(str), 'iosr:getContents:invalidStr', 'str must be a character array')
% first pass: contents of top-level folder
[cont,dirflag] = main(directory,str);
% do the recursive bit, if recursion is requested
if recflag
dirs = main(directory,'folders');
count = length(dirs);
n = 1;
while n <= count % recursion requested
[cont_temp,dirflag_temp] = main(dirs{n},str); % search them
cont = [cont; cont_temp]; %#ok<AGROW> append search results
dirflag = [dirflag; dirflag_temp]; %#ok<AGROW> append search results
sdirs = main(dirs{n},'folders');
dirs = [dirs; sdirs]; %#ok<AGROW>
count = length(dirs);
n = n+1;
end
end
% remove full path
if strcmp(pathflag,'relative')
if ~strcmp(directory(end),filesep)
directory = [directory filesep];
end
for n = 1:length(cont)
cont{n} = strrep(cont{n}, directory, '');
end
end
% sort output (case insensitive)
if sortflag
[~,IX] = sort(lower(cont));
cont = cont(IX);
dirflag = dirflag(IX);
end
end
function [cont,dirflag] = main(directory,str)
%MAIN get the contents
list = struct2cell(dir(directory));
dirbool = cell2mat(list(cellfun(@islogical,list(:,1)),:)); % return directory flags
list = list(1,:); % keep only file names
X = ~strncmp(list, '.', 1); % remove hidden files (those starting '.')
list = list(X);
list = list(:); % make column vector
dirbool = dirbool(X);
dirbool = dirbool(:); % make column vector
for n = 1:length(list)
list{n} = fullfile(directory,list{n});
end
if nargin > 1
% find filename extensions
exts = cell(size(list));
for n = 1:length(list)
[~,~,exts{n}] = fileparts(list{n});
end
% filter
if strncmp(str,'*.',2) % if extensions are requested
ext = str(2:end);
str = 'ext';
end
switch lower(str)
case 'files'
Y = ~dirbool;
case 'folders'
Y = dirbool;
case 'ext'
Y = strcmp(exts,ext);
case 'all'
Y = true(size(dirbool));
otherwise % use literal search string
Y = ~cellfun(@isempty,strfind(list,str));
end
else
Y = true(size(list));
end
% return search results
cont = list(Y);
dirflag = dirbool(Y);
end
|
github
|
atcollab/at-master
|
atsurvey2spos.m
|
.m
|
at-master/atmat/pubtools/atsurvey2spos.m
| 1,453 |
utf_8
|
e8ffa61b0458986160f7eafd9cd58d4e
|
function [s,distance]=atsurvey2spos(r,xycoord,varargin)
% returns closest lattics s coordinates to xycoord points
%
% input:
% r: AT lattice
% xycoord: 2xN vector of [x,y] cartesian coordinates
% 'slices', value: number of slices to split r
% (more slices = more precision, longer computation time)
%
% output:
% s: 1xN vector of s positions in r, closest to xycoord
% distance: 1xN vector of distances of s to xycoord
%
%see also: distance2curve
% parse inputs
p = inputParser;
defaultslices = 10^5;
addRequired(p,'r',@iscell);
addRequired(p,'xycoord',@isnumeric);
addOptional(p,'slices',defaultslices,@isnumeric);
parse(p,r,xycoord,varargin{:});
r = p.Results.r;
mapxy = p.Results.xycoord;
npts= p.Results.slices;
% split lattice
rs=splitlattice(r,npts);
G=atgeometry(rs,1:length(rs)+1);
% lattice cartesian coordinates
rx=[G.x];
ry=[G.y];
curvexy=[rx;ry]';
[xy,distance,~] = distance2curve(curvexy,mapxy,'linear');
indmin=arrayfun(@(x)find(curvexy(:,1)>x,1,'first'),xy(:,1));
s=findspos(rs,indmin);
end
function rsplit=splitlattice(ring0,npts)
elmlength=findspos(ring0,1+length(ring0))/npts;
r2=cellfun(@(a)splitelem(a,elmlength),ring0,'UniformOutput',false);
rsplit=cat(1,r2{:});
end
function newelems=splitelem(elem,elmlength)
if isfield(elem,'Length') && elem.Length > 0
nslices=ceil(elem.Length/elmlength);
newelems=atdivelem(elem,ones(1,nslices)./nslices);
else
newelems={elem};
end
end
|
github
|
atcollab/at-master
|
freqsearch.m
|
.m
|
at-master/atmat/pubtools/freqsearch.m
| 12,397 |
utf_8
|
1143d29bbd7a626f9c9191e6855bc291
|
function varargout = freqsearch3(data, varargin)
% =========================================================================
% Find the frequency terms in the data set with the use of filters and FFT
% or a "search" algorithm (slower but more accurate). The function returns
% the number of oscillations per unit time where the unit time is define by
% DT. Eg If DT is in seconds, then freq is the number of oscillations per
% second. If DT is the number of turns, then freq is the number of
% oscillations per turn.
%
% [freq(s) amplitude(s) eigenvector(s) time_vec_used] = ...
% FREQSEARCH(DATA, [FILTER, METHOD, DT, ORDER, RANGE, TOLERANCE, windowfraction])
%
% DATA : input data, can be complex.
% FILTER : 'hanning','none' (default)
% METHOD : 'fft' (default),'search','spectrum'
% DT : timestep between each data point. (default: 1)
%
% The options below are only applicable to the 'search' method.
%
% ORDER (vector) : number of frequency terms to extract. Ordered by relative
% strength. So (default: [1])
% RANGE : frequency range over which to scan. (default: [0 Inf])
% TOLERANCE : Search until freq_(n) - freq_(n-1) < TOLERANCE (default:
% 1e-10)
% windowfraction : How wide should the subsequent search range should be.
% (default: 0.03)
%
% Examples:
% >> [f a] = freqsearch(data,'hanning','search',1,[1 2 3])
%
% 24/01/2006
% Eugene
% v2.0 - fft and "search" method combines to increase the speed in which
% one can analyse the freqency components.
% - Use of filters, modified hanning type filter only at the moment.
% - Multiple orders for comparisons of multiple resonant frequencies.
% The frequency is ordered by amplitude/strength. The first
% frequency being the dominant one followed by the second strongest.
% 19/08/2010 Eugene: added 'spectrum' option to return a spectrogram. When
% in this mode, frequency and amplitude will be vectors
% that represent the spectrogram. Range will need to be
% specified and has to be a vector at the frequencies of
% interest.
DEBUG = false;
%======================================================================
% Parse input
% Set some defaults (mainly for the search method)
% Min and Max number of iterations for search method
MAXIT = 23;
% What filter to use
Nparam = 1;
if nargin >= Nparam + 1 & ischar(varargin{Nparam})
switch lower(varargin{Nparam})
case 'hanning'
filter = 'hanning';
case 'none'
filter = 'none';
otherwise
error(sprintf('Unknown filter option %s',varargin{Nparam}));
end
else
filter = 'none';
end
% What method to use
Nparam = Nparam + 1;
if nargin >= Nparam + 1 & ischar(varargin{Nparam})
method = lower(varargin{Nparam});
else
method = 'fft';
end
% Time step between each sample
Nparam = Nparam + 1;
if nargin >= Nparam + 1
dt = varargin{Nparam};
else
dt = 1;
end
% Number of terms to extract
Nparam = Nparam + 1;
if nargin >= Nparam + 1
order = varargin{Nparam};
else
order = 1;
end
% Tune range
Nparam = Nparam + 1;
if nargin >= Nparam + 1
range = varargin{Nparam};
else
range = [0 0.5];
end
Nparam = Nparam + 1;
if nargin >= Nparam + 1
tolerance = varargin{Nparam};
else
tolerance = 1e-10;
end
% Determines how much to zoom in when narrowing the search.
% Depending on the tolerance, the optimal value for the windowfraction
% changes. However 4% seems good enough.
Nparam = Nparam + 1;
if nargin >= Nparam + 1
windowfraction = varargin{Nparam};
else
windowfraction = 0.04;
end
if DEBUG
fprintf('Options selected: filter(%s) method(%s) dt(%11.3e)\n',...
filter, method, dt);
fprintf(' order(%d) range(%f %f) tolerance(%11.3e) windowfraction(%f)\n',...
order(end), range(1), range(2), tolerance, windowfraction);
end
% Finshed parsing input
%======================================================================
% Define variables
% Define the time or running parameter against which to calculate the
% freqrange.
neval = length(data);
T2 = dt*(neval-1)/2; %-T/2 --> T/2
t = [-T2:dt:T2]';
eigenvec = zeros(neval,max(order));
orthvec = zeros(neval,max(order));
orthvec_ = zeros(neval,max(order));
a = zeros(1,max(order));
nu = zeros(1,max(order));
% Ensure that data and t are column vectors;
data = reshape(data,neval,1);
datareal = isreal(data);
%======================================================================
% Remove any DC component in the signal
% data = data - 0.5/T2*local_midpointsum(data);
% What filter to apply to the data
if DEBUG; disp('Calculating filter'); end;
usefilter = 0;
switch filter
case 'hanning'
% Window function that increases hight of the fundamental peak to make it
% easier to pickout.
p = 1; % cosine window order
kai = complex( 2^p*(factorial(p))^2*(1+cos(pi*(t/T2))).^p/factorial(2*p) );
usefilter = 1;
end
% Finished applying filter
%======================================================================
% What method to use
if DEBUG; disp('Starting calculation'); end;
switch method
case 'fft'
[nu a] = local_calculate_with_fft(data,dt,range);
order = 1;
case 'search'
for k=1:max(order)
% Start the frequency search using a two step approach, first
% use the FFT to get a coarse measurement of the frequency
% followed by the correlation analysis to get a more accurate
% measure of the dominant frequency component.
% FFT
if usefilter
prelim_freq = local_calculate_with_fft(data.*kai,dt,range);
else
prelim_freq = local_calculate_with_fft(data,dt,range);
end
% Will scan this range of frequencies.
freqrange = local_find_new_range(prelim_freq,range(2),range(1),windowfraction);
% Some initial variables. Start with some guess at the
% frequency, mainly for the first difference calculation.
% This is the power spectrum/frequency scan.
psi = zeros(1,length(freqrange));
freq = zeros(1,MAXIT);
omega_prev = median(freqrange);
difference = 1;
for j=1:MAXIT
% Do the integral that calculates the average <f(t), e^i*freqrange*t>. Not
% including multiplication of some factors like dt, since we only
% need to find where psi is a maximum and extract the corresponding
% freqrange. Vectorising this loop does not help,
% evaluated already.
if usefilter
psi = local_psi_integral(data.*kai,t,freqrange);
else
psi = local_psi_integral(data,t,freqrange);
end
if j >= 1 && j <=1 && DEBUG
figure; plot(freqrange,abs(psi));
xlabel('freq / Frequency'); ylabel('Arb. Units');
end
% Calculate the value of freqrange for the maximum psi.
[maxpsi maxind] = max(psi(:));
freq(j) = freqrange(maxind);
difference = abs(freq(j) - omega_prev);
if difference < tolerance
if DEBUG; fprintf('Difference less than specified tolerance. j=%d\n',j); end;
break;
else
omega_prev = freq(j);
end
% Find new range to seach, zoom in.
freqrange = local_find_new_range(freq(j),freqrange(end),freqrange(1),windowfraction);
psi = zeros(size(freqrange));
end
if DEBUG; fprintf('FREQ = %20.10e\n',freq(1:j)); end;
% Orthogonal projection to determine the coeffients. Since
% e^i*2pi*freq*t.
eigenvec(:,k) = exp(complex(0,2*pi*freq(j).*(t)));
% Orthogonalize
% sumprojections = zeros(neval,1);
% for ii=1:k-1
% sumprojections = sumprojections + dot(eigenvec(:,k),orthvec(:,ii))/dot(orthvec(:,ii),orthvec(:,ii))*orthvec(:,ii);
% end
% orthvec(:,k) = eigenvec(:,k) - sumprojections;
a(k) = ((0.5/T2)*local_midpointsum(data.*conj(eigenvec(:,k))))*dt;
% a(k) = (0.5/T2)*maxpsi;
nu(k) = freq(j);
% Subtract the component from 'f' function.
data = data - a(k)*eigenvec(:,k);
end
case 'spectrum'
% Return the power spectrum
if usefilter
psi = local_psi_integral(data.*kai,t,range);
else
psi = local_psi_integral(data,t,range);
end
if datareal
nu = psi*dt/T2;
else
nu = 0.5*psi*dt/T2;
end
order = 1:length(psi);
otherwise
error(sprintf('Unknown method option %s',varargin{Nparam}));
end
varargout{1} = nu(order);
if nargout > 1
if datareal
% With only real data the returned amplitudes should also be real.
% And the factor 2 is needed here but not quite sure why just yet.
varargout{2} = 2*abs(a(order));
else
varargout{2} = a(order);
end
end
if nargout > 2
varargout{3} = eigenvec(:,order);
end
if nargout > 3
varargout{4} = t;
end
% temp = freq(find(freq ~= 0));
% varargout{1} = temp(end)/(2*pi);
% DEBUG
% fprintf('%i %17.15g\n',j, difference);
function fctnsum = local_midpointsum(fctn)
% Vectorise the midpoint method of integrating a numerical function.
% f_n = [fctn 0];
% f_nplus1 = [0 fctn]; % shift all the numbers one "space" to the right.
% midpoints = 0.5*(f_n + f_nplus1);
midpoints = 0.5.*(fctn(1:end-1) + fctn(2:end));
fctnsum = sum(midpoints);
function psi = local_psi_integral(data,t,freqrange)
midpoints = zeros(1,length(t)-1);
omegarange = -freqrange*2*pi;
fctn = zeros(1,length(t));
psi = zeros(1,length(omegarange));
for k=1:length(freqrange)
fctn = data.*exp(complex(0,omegarange(k)).*t);
% fctn = data.*complex(cos(freqrange(k).*t),-sin(freqrange(k).*t));
midpoints = 0.5.*(fctn(1:end-1) + fctn(2:end));
psi(k) = sum(midpoints);
end
function [freq varargout] = local_calculate_with_fft(data,dt,range)
% Find peak with FFT within "range" of frequencies.
% Auto calculate number of points to calculate fft. Use maximum
nn = [4:15];
ind = max(find(2.^nn - length(data) < 0));
Nfft = 2^nn(ind);
% Calculate FFT and the power spectrum
yy = fft(data,Nfft);
Pyy = yy.*conj(yy);
% Corresponding frequency range
f = 1/(dt*Nfft).*(0:Nfft/2);
ii = find(f > range(1) & f < range(2));
% Find peak
[maxval maxind] = max(Pyy(ii));
freq = f(ii(maxind));
if nargout > 1
varargout{1} = abs(yy(ii(maxind)))/(Nfft/2);
end
function freqrange = local_find_new_range(centre,upper,lower,windowfraction)
% Find new range to seach, zoom in.
new_width = (upper - lower)*windowfraction;
% minimum frequency separation
min_freq_sep = new_width/500;
if centre == lower
lowerbound = centre - new_width*2/windowfraction;
upperbound = centre;
freqrange = [lowerbound:(upperbound-lowerbound)/100:upperbound];
elseif centre == upper
lowerbound = centre;
upperbound = centre + new_width*2/windowfraction;
freqrange = [lowerbound:(upperbound-lowerbound)/100:upperbound];
else
lowerbound = centre - new_width;
upperbound = centre + new_width;
% num = 15;
% scalefactor = (2*new_width - min_freq_sep*(num+1))/(num+2);
% freqrange = lowerbound + cumsum((1 + cos(0:2*pi/num:2*pi))*scalefactor + min_freq_sep);
scalefactor = (2*new_width - min_freq_sep*16)/17;
freqrange = lowerbound + cumsum((1 + cos(0:2*pi/15:2*pi))*scalefactor + min_freq_sep);
end
% freqrange = freqrange*2*pi;
|
github
|
atcollab/at-master
|
calc_TouschekPM.m
|
.m
|
at-master/atmat/pubtools/calc_TouschekPM.m
| 4,625 |
utf_8
|
d66ee65b0c9d7c29acb6ab9a729fc914
|
function tauT = calc_TouschekPM(TD,dppPM,Trf,Ib,U0,coupling, sigE, emit_x)
%tauT = calc_TouschekPM(TD,dppPM,Trf,Ib,U0,coupling, sigE, emit_x)
%tauT = calc_TouschekPM(TD,dppPM,alpha,Ib,U0,coupling, sigE, emit_x)
% Ib, mA, single bunch current
% U0, MeV, one-turn energy loss
% emit_x, nm-rad
% coupling, average emit_y/emit_x
% TD, lattice function structure (same as twissring output)
% dppPM, Nx2, positive/negative momentum aperture
% Trf, a structure or a scalar, when a structure, it consists of [Frequency, HarmNumber, Energy, alpha,
% Voltage], when a scalar it is alpha of the lattice (other
% parameters are assumed default of SPEAR3).
%ex:
% load('dppAP_LAT_DW_withIDs_Mar20_07_normrf.mat')
% [td, tune,chrom] = twissring(THERING,0,indextab, 'chrom', 1e-5);
% dppPM = [deltap' deltam'];
% tauT = calc_TouschekPM(td,dppPM,a1,100/280,1.04,0.064e-2, 0.001, 18)/3600; %hrs
% disp(tauT)
e0 = PhysConstant.elementary_charge.value; %Coulomb
cspeed = PhysConstant.speed_of_light_in_vacuum.value;
r0 = PhysConstant.classical_electron_radius.value; %m
U0=U0*1e6;
emit_x = emit_x*1.0e-9; %convert nm-rad to m-rad
%cavity related parameters
if isstruct(Trf)
freq = Trf.Frequency;
harm = Trf.HarmNumber;
E0 = Trf.Energy;
alpha = Trf.alpha;
Vrf = Trf.Voltage;
circ = Trf.circum;
else
freq = 352.202e6; %Hz
harm = 992;
E0 = 6.04e9; %eV
alpha = Trf;
Vrf = 9e6;
circ = 844.39;
end
gamma = E0/PhysConstant.electron_mass_energy_equivalent_in_MeV.value*1e6;
N0 = 0.001/(freq/harm)/e0; %Number of particle per 1mA bunch.
%bunch length
phi_s = asin(U0/Vrf);
nus = sqrt(harm*Vrf*alpha*cos(phi_s)/2/pi/E0);
sigZ = sigE/nus*harm*alpha/2/pi/freq*cspeed;
%rf bucket height
delta_max_rf = sqrt(2*U0/pi/alpha/harm/E0)*sqrt( sqrt((Vrf/U0).^2-1) - acos(U0./Vrf));
%---------------------------------
%beam size around the ring
%[td, tune,chrom] = twissring(THERING,0,1:length(THERING)+1, 'chrom', 1e-5);
td = TD;
Dx = cat(2, td.Dispersion)';
betxy = cat(1, td.beta);
alfxy = cat(1, td.alpha);
spos = cat(1,td.SPos);
sigX = sqrt(betxy(:,1)*emit_x+Dx(:,1).^2*sigE^2);
sigY = sqrt(betxy(:,2)*emit_x*coupling);
sigXp = sqrt(emit_x*(1+alfxy(:,1).^2)./betxy(:,1)+Dx(:,2).^2*sigE^2);
%--------------------------------
curH = (Dx(:,1).^2 + (betxy(:,1).*Dx(:,2)+alfxy(:,1).*Dx(:,1)).^2)./betxy(:,1);
%delta_max_perp = hori_acceptance./sqrt(curH);
deltap = dppPM(:,1);
deltam = dppPM(:,2);
delta_maxp = min([deltap, ones(size(curH))*delta_max_rf]')';
delta_maxm = min([-deltam, ones(size(curH))*delta_max_rf]')';
xip = (delta_maxp/gamma.*betxy(:,1)./sigX).^2;
xim = (delta_maxm/gamma.*betxy(:,1)./sigX).^2;
Dvalp = funcD(xip);
Dvalm = funcD(xim);
ds = diff(spos);
n=1:length(ds);
avgfacp = sum(Dvalp(n)./sigX(n)./sigY(n)/sigZ./delta_maxp(n).^3.*ds)/circ;
avgfacm = sum(Dvalm(n)./sigX(n)./sigY(n)/sigZ./delta_maxm(n).^3.*ds)/circ;
lossrate = Ib*N0*r0^2*cspeed/8/gamma^2/pi*(avgfacp+avgfacm)/2.;
tauT = 1/lossrate;
if 0
figure
h = plot(spos, delta_maxp, spos, -delta_maxm); %delta_max_rf*ones(size(spos)));
set(gca,'fontsize', 16,'xlim',[0,240])
xlabel('s (m)')
ylabel('\delta_{max}')
grid
set(gca,'ylim',[-0.03,0.03]);
end
function D=funcD(xi)
%a look-up table
DfunTable = [
%xi Dfunc
0.000500 0.123802
0.001000 0.153464
0.001500 0.172578
0.002000 0.186757
0.002500 0.198008
0.003000 0.207298
0.003500 0.215179
0.004000 0.221992
0.004500 0.227968
0.005000 0.233269
0.005500 0.238015
0.006000 0.242294
0.006500 0.246176
0.007000 0.249717
0.007500 0.252961
0.008000 0.255944
0.008500 0.258697
0.009000 0.261244
0.009500 0.263607
0.010000 0.265805
0.010500 0.267852
0.011000 0.269763
0.011500 0.271549
0.012000 0.273221
0.012500 0.274788
0.013000 0.276259
0.013500 0.277640
0.014000 0.278938
0.014500 0.280159
0.015000 0.281308
0.015500 0.282391
0.016000 0.283411
0.016500 0.284372
0.017000 0.285278
0.017500 0.286132
0.018000 0.286938
0.018500 0.287698
0.019000 0.288415
0.019500 0.289090
0.020000 0.289727
0.020500 0.290327
0.021000 0.290893
0.021500 0.291425
0.022000 0.291926
0.022500 0.292397
0.023000 0.292840
0.023500 0.293256
0.024000 0.293646
0.024500 0.294011
0.025000 0.294352 ];
ximin = DfunTable(1,1);
ximax = DfunTable(end,1);
xi(find(xi<ximin)) = ximin;
xi(find(xi>ximax)) = ximax;
D = interp1(DfunTable(:,1), DfunTable(:,2), xi,'linear');
|
github
|
atcollab/at-master
|
atundulator.m
|
.m
|
at-master/atmat/pubtools/atundulator.m
| 3,079 |
utf_8
|
d56ceb8dae400fe56478e891f72882d5
|
function undulator=atundulator(LUnd,nperiod,varargin)
% define undulator model
%
% input:
% Lund= undulator length
% nperiod = number of periods
% 'BendAngle', value : half pole bending angle in rad
% 'B0andEnergy', value (2x1): [half pole B0 field in T, Energy in eV]
% converts to bending angle in rad.
% 'magnetmodel', value : 'multipoles' (default) or 'rectangularbend'
% 'PoleGap', value : drift space between gaps defualt (0.0)
%
% if neither BendAngle nor B0andEnergy are provided then 'BendAngle' is 0.0
%
% output:
% cellarray of elements describing un undulator of length LUnd divided
% in nperiod periods, each described as follows:
% [negpole,drift,pospole,pospole,drift,negpole]
% if 'PoleGap' is 0.0 (default), then
% [negpole,pospole,pospole,negpole]
%
% example:
% 1) und=atundulator(1.6,61,'B0andEnergy',[0.4 6.04e9])
% 2) und=atundulator(1.6,61,'BendAngle',-0.007984472464733)
% 3) und=atundulator(1.6,61,'B0andEnergy',[0.4 6.04e9],'magnetmodel','rectangularbend')
% 4) und=atundulator(1.6,61,'B0andEnergy',[0.4 6.04e9],'PoleGap',0.001);
%
%see also:
defaultAngPole=NaN;
defaultB0andEnergy=[NaN NaN];
defaultPoleGap=0;
expectedmagmodels={'multipoles','rectangularbend'};
p= inputParser;
addRequired(p,'LUnd',@isnumeric);
addRequired(p,'nperiod',@isnumeric);
addParameter(p,'magnetmodel',expectedmagmodels{1},@(x)any(validatestring(x,expectedmagmodels)));
addParameter(p,'BendAngle',defaultAngPole,@isnumeric);
addParameter(p,'B0andEnergy',defaultB0andEnergy,@isnumeric);
addParameter(p,'PoleGap',defaultPoleGap,@isnumeric);
parse(p,LUnd,nperiod,varargin{:});
magnetmodel=p.Results.magnetmodel;
BendAngle=p.Results.BendAngle;
B0andEnergy=p.Results.B0andEnergy;
PoleGap=p.Results.PoleGap;
% length of one period
periodL=LUnd/nperiod;
DistPole=PoleGap;
LPole=(periodL-2*DistPole)/4;
if ~isnan(B0andEnergy)
B=B0andEnergy(1);%0.6;
Brho=B0andEnergy(2)/299792458;
AngPole=(B*LPole)/Brho;
elseif ~isnan(BendAngle)
AngPole=BendAngle;
else
AngPole=0.0;
end
switch magnetmodel
case 'multipoles'
undperiod=makeundperiod(...
atmultipole('NegPole',LPole,0,-AngPole/LPole),...
atmultipole('PosPole',LPole,0,AngPole/LPole),...
atdrift('PoleGap',DistPole));
case 'rectangularbend'
undperiod=makeundperiod(...
atrbend('NegPole',LPole,-AngPole,0,'BndMPoleSymplectic4Pass'),...
atrbend('PosPole',LPole,AngPole,0,'BndMPoleSymplectic4Pass'),...
atdrift('PoleGap',DistPole));
end
undulator=repmat(undperiod,nperiod,1);
end
function undper=makeundperiod(halfnegpole,halfpospole,driftpole)
if driftpole.Length>0
undper={...
halfnegpole;...
driftpole;...
halfpospole;...
halfpospole;...
driftpole;...
halfnegpole;...
};
elseif driftpole.Length==0
undper={...
halfnegpole;...
halfpospole;...
halfpospole;...
halfnegpole;...
};
end
end
|
github
|
atcollab/at-master
|
atdynap.m
|
.m
|
at-master/atmat/pubtools/atdynap.m
| 1,887 |
utf_8
|
48612afda2e9f15e132e97e7e074d0ff
|
function [xx,zz]=atdynap(ring,nt,dpp,rfrac)
%ATDYNAP Compute the dynamic aperture
%
%
%[XX,ZZ]=ATDYNAP(RING,NTURNS,DPP,RFRAC)
%
%XX,ZZ : limit of the dynamic aperture (betatron amplitudes in m)
%RING : Structure for tracking
%NTURNS: Number of turns
%DPP : Off-momentum value (default: 0)
%RFRAC : Resolution of the grid for checking the stability
% as a fraction of the maximum stable amplitude
% (default: 0.02)
np=5;
rlist=0:0.001:0.1;
if nargin < 4, rfrac=0.02; end
if nargin < 3, dpp=0.0; end
if isnumeric(dpp)
clorb=[findorbit4(ring,dpp);dpp;0];
else
clorb=findorbit6(ring);
end
t1=linspace(0,pi,2*np+3);
xpmax=ascan(ring,nt,clorb,0,rlist);
zmax=ascan(ring,nt,clorb,0.5*pi,rlist);
xmmax=ascan(ring,nt,clorb,pi,rlist);
%
% x1=[xpmax*ones(1,np+2) xmmax*ones(1,np+1)];
% z1=zmax*ones(1,2*np+3);
% tlist=atan2(sin(t1).*z1,cos(t1).*x1)';
%
% rr=NaN(2*np+3,1);
% rr(1)=xpmax;
% rr(np+2)=zmax;
% rr(2*np+3)=xmmax;
% for i=[2:np+1 np+3:2*np+2]
% rr(i)=ascan(ring,nt,clorb,tlist(i),rlist);
% end
% xx=rr.*cos(tlist);
% zz=rr.*sin(tlist);
slist=0.5:rfrac:2;
xx=NaN(2*np+3,1);
zz=xx;
for i=1:np+3
[xx(i),zz(i)]=bscan(ring,nt,clorb,...
xpmax*cos(t1(i))*slist,zmax*sin(t1(i))*slist);
end
for i=np+4:2*np+3
[xx(i),zz(i)]=bscan(ring,nt,clorb,...
xmmax*cos(t1(i))*slist,zmax*sin(t1(i))*slist);
end
function rmax=ascan(ring,nt,clorb,theta,rlist)
for rr=rlist
rin=clorb+[rr*cos(theta);0;rr*sin(theta);0;0;0];
[dummy,lost]=ringpass(ring,rin,nt,'KeepLattice'); %#ok<ASGLU>
if lost, break; end
rmax=rr;
end
fprintf('theta: %g, r: %g\n',theta,rmax);
function [xmax,zmax]=bscan(ring,nt,clorb,xlist,zlist)
xmax = 0.0;
zmax = 0.0;
for i=1:length(xlist)
rin=clorb+[xlist(i);0;zlist(i);0;0;0];
[dummy,lost]=ringpass(ring,rin,nt,'KeepLattice'); %#ok<ASGLU>
if lost, break; end
xmax=xlist(i);
zmax=zlist(i);
end
fprintf('xm: %g, zm: %g\n',xmax,zmax);
|
github
|
atcollab/at-master
|
calc_Touschek.m
|
.m
|
at-master/atmat/pubtools/calc_Touschek.m
| 4,772 |
utf_8
|
723aa4acb63d395f0cb6103c70bbc674
|
function tauT = calc_Touschek(THERING,Ib,varargin)
%tauT = calc_Touschek(THERING, Ib)
%tauT = calc_Touschek(THERING, Ib,hori_acceptance)
%tauT = calc_Touschek(THERING, Ib,hori_acceptance,U0)
%tauT = calc_Touschek(THERING, Ib,hori_acceptance,U0,coupling)
%tauT = calc_Touschek(THERING, Ib,hori_acceptance,U0,coupling,sigE,emit_x)
% hori_acceptance = Min(X/sqrt(beta)) around the ring
% Ib, mA, single bunch current
% Nb, number of bunches
% U0, MeV, one-turn energy loss
% emit_x, nm-rad
%ex:
%tauT = calc_Touschek(THERING, 100/280, 0.015/sqrt(10.37),1.04,0.064e-2,0.001, 18)/3600
%
alpha=1.7e-4;
hori_acceptance = Inf;
if nargin>=3
hori_acceptance = varargin{1};
end
if nargin<7
atsum = atsummary;
end
if nargin>=4
U0 = varargin{2}*1e6; %eV
else
U0 = atsum.radiation*1e9; %eV
%U0 = 1.04e6; %eV
end
coupling = 0.05*1e-2; %by default
if nargin>=5
coupling = varargin{3};
end
if nargin>=7
sigE = varargin{4};
emit_x = varargin{5}*1e-9; %m-rad
else
sigE = atsum.naturalEnergySpread; %sigma_delta
emit_x = atsum.naturalEmittance;
end
e0 = PhysConstant.elementary_charge.value; %Coulomb
cspeed = PhysConstant.speed_of_light_in_vacuum.value;
r0 = PhysConstant.classical_electron_radius.value; %m
%cavity related parameters
cava = findcells(THERING,'PassMethod','RFCavityPass');
cavb = findcells(THERING,'PassMethod','CavityPass');
CAVINDEX = sort([cava,cavb]); %ati1.RF;
if isempty(CAVINDEX)
error('cavity not defined')
end
freq = THERING{CAVINDEX(1)}.Frequency;
harm = THERING{CAVINDEX(1)}.HarmNumber;
E0 = THERING{CAVINDEX(1)}.Energy;
gamma = THERING{CAVINDEX(1)}.Energy/PhysConstant.electron_mass_energy_equivalent_in_MeV.value*1e6;
Vrf = 0;
for ii=1:length(CAVINDEX)
Vrf = Vrf + THERING{CAVINDEX(ii)}.Voltage;
end
%Vrf = 3.2e6;
Vrf = 1.6e6;
%[alpha,a2] = findmcf3(THERING);
%bunch length
phi_s = asin(U0/Vrf);
nus = sqrt(harm*Vrf*alpha*cos(phi_s)/2/pi/E0);
sigZ = sigE/nus*harm*alpha/2/pi/freq*cspeed;
%rf bucket height
delta_max_rf = sqrt(2*U0/pi/alpha/harm/E0)*sqrt( sqrt((Vrf/U0).^2-1) - acos(U0./Vrf));
%---------------------------------
%beam size around the ring
[td, ~,~] = twissring(THERING,0,1:length(THERING)+1, 'chrom', 1e-5);
Dx = cat(2, td.Dispersion)';
betxy = cat(1, td.beta);
alfxy = cat(1, td.alpha);
spos = cat(1,td.SPos);
circ = spos(end);
sigX = sqrt(betxy(:,1)*emit_x+Dx(:,1).^2*sigE^2);
sigY = sqrt(betxy(:,2)*emit_x*coupling);
%sigXp = sqrt(emit_x*(1+alfxy(:,1).^2)./betxy(:,1)+Dx(:,2).^2*sigE^2);
%--------------------------------
curH = (Dx(:,1).^2 + (betxy(:,1).*Dx(:,2)+alfxy(:,1).*Dx(:,1)).^2)./betxy(:,1);
disp('delta_max_perp data: ');
delta_max_perp = hori_acceptance./sqrt(curH);
disp('delta_max data: ');
delta_max = min([delta_max_perp, ones(size(curH))*delta_max_rf]')';
disp('xi data: ');
xi = (delta_max/gamma.*betxy(:,1)./sigX).^2;
Dval = funcD(xi);
N0 = 0.001/(freq/harm)/e0; %Number of particle per 1mA bunch.
ds = diff(spos);
n=1:length(THERING);
avgfac = sum(Dval(n)./sigX(n)./sigY(n)/sigZ./delta_max(n).^3.*ds)/circ;
lossrate = Ib*N0*r0^2*cspeed/8/gamma^2/pi*avgfac;
tauT = 1/lossrate;
% if 0
% figure
% plot(spos, delta_max, spos, delta_max_rf*ones(size(spos)));
% %set(gca,'fontsize', 16,'xlim',[0,240])
% set(gca,'fontsize', 16,'xlim',[0,120])
% xlabel('s (m)')
% ylabel('\delta_{max}')
% grid
% %set(gca,'ylim',[0,0.04]);
% set(gca,'ylim',[0,0.15]);
%
% end
function D=funcD(xi)
%a look-up table
DfunTable = [
%xi Dfunc
0.000500 0.123802
0.001000 0.153464
0.001500 0.172578
0.002000 0.186757
0.002500 0.198008
0.003000 0.207298
0.003500 0.215179
0.004000 0.221992
0.004500 0.227968
0.005000 0.233269
0.005500 0.238015
0.006000 0.242294
0.006500 0.246176
0.007000 0.249717
0.007500 0.252961
0.008000 0.255944
0.008500 0.258697
0.009000 0.261244
0.009500 0.263607
0.010000 0.265805
0.010500 0.267852
0.011000 0.269763
0.011500 0.271549
0.012000 0.273221
0.012500 0.274788
0.013000 0.276259
0.013500 0.277640
0.014000 0.278938
0.014500 0.280159
0.015000 0.281308
0.015500 0.282391
0.016000 0.283411
0.016500 0.284372
0.017000 0.285278
0.017500 0.286132
0.018000 0.286938
0.018500 0.287698
0.019000 0.288415
0.019500 0.289090
0.020000 0.289727
0.020500 0.290327
0.021000 0.290893
0.021500 0.291425
0.022000 0.291926
0.022500 0.292397
0.023000 0.292840
0.023500 0.293256
0.024000 0.293646
0.024500 0.294011
0.025000 0.294352 ];
ximin = DfunTable(1,1);
ximax = DfunTable(end,1);
xi(find(xi<ximin)) = ximin;
xi(find(xi>ximax)) = ximax;
D = interp1(DfunTable(:,1), DfunTable(:,2), xi,'linear');
|
github
|
atcollab/at-master
|
fitgaussian.m
|
.m
|
at-master/atmat/pubtools/haissinski/fitgaussian.m
| 7,136 |
utf_8
|
209df9e5c867353dfd6bfa850f67c4fa
|
function varargout = fitgaussian(varargin)
% GAUSSIAN_PARAM FITERR GAUSSFIT SIGERROR]= FITGAUSSIAN(DATA,[property_value_pair]);
%
% DATA is a 1D vector to which we want to fit a gaussian profile. The
% function will return a structure with various fitted parameters.
% SCALEFACTOR is optional and is applied in the horizontal axis.
%
% Property value pairs:
%
% scale : 1 (default)
% plot : 0 no plots (default), 1 plots fits against data
%
% Initial Fit parameters are fit automatically unless specified.
% integral : estimated sum integral of the function
% mean : estimate of the mean of the gaussian
% sigma : estimated sigma of the gaussian
% DC : known DC component to remove (set to zero to not fit)
% bg_grad : known background gradient to fit (set to zero to not fit)
% Assym : known gaussian assymetry (set to zero to not fit)
%
% FITERR Final fit error returned by the internal error fuction
% GAUSSFIT Final fit function
% SIGERROR Error estimate of the sigma fit in relative terms. Multiply by
% 100 to get in percent.
%
% Original script by R. Dowd
% In functional format by E. Tan 31/07/2009
DEBUG = 0;
ASYM_FLAG = 0;
[reg, prop] = parseparams(varargin);
if nargin > 0
data = reg{1};
else
disp('No data to work with');
return
end
data = data(:)';
% defaults
scalefac = 1;
plotfig = 0;
autofitDC = 1;
autofitGrad = 1;
autofitAssym = 1;
userx = [];
for i=1:length(prop)/2
ind = (i-1)*2+1;
switch lower(prop{ind})
case 'scale'
scalefac = prop{ind+1};
case 'plot'
plotfig = prop{ind+1};
case 'integral'
guess_area = prop{ind+1};
case 'mean'
guess_mu = prop{ind+1};
case 'sigma'
guess_sigma = prop{ind+1};
case 'dc'
DCfit = prop{ind+1};
autofitDC = 0;
case 'bg_grad'
linefit = prop{ind+1};
autofitGrad = 0;
case 'assym'
Assym = prop{ind+1};
autofitAssym = 0;
case 'x'
userx = prop{ind+1};
end
end
% percentage of at the start and end of the data set assumed to be
% representative of the background noise.
pcbackground = 0.1;
datasize = length(data);
if isempty(userx)
x=[1:datasize]*scalefac;
else
x = userx;
end
% Fit a gaussian; first find some starting parameters
% Used to guess the DC component. Assume flat for the first 10% of data
% points.
startDC = mean(data(1:fix(datasize*pcbackground)));
endDC = mean(data(end-fix(datasize*pcbackground):end));
if ~exist('guess_area','var')
% Guess area AUTOMATICALLY
% guess_area = sum(data) - 0.5*(startDC+endDC)*datasize;
% guess_area = guess_area*scalefac;
guess_area = sum((data(2:end) + data(1:end-1)).*diff(x)/2) - 0.5*(startDC+endDC)*(x(end)-x(1));
guess_area = guess_area;
end
if ~exist('guess_mu','var')
% Guess the center of mass AUTOMATICALLY
[~, maxind] = max(data);
guess_mu = x(maxind);
end
if ~exist('guess_sigma','var')
% Guess sigma in pixels AUTOMATICALLY
maxval = max(data);
indices = find(data > (maxval+((startDC+endDC)/2) )/2);
guess_sigma = (x(indices(end)) - x(indices(1)))/2.3;
guess_sigma = guess_sigma;
end
% So far everything has been calculated in units of data points. Apply
% scaling factor here.
fixedvals = [NaN NaN NaN];
Starting(1) = guess_area;
Starting(2) = guess_mu;
Starting(3) = guess_sigma;
if autofitDC
Starting(end+1) = startDC;
else
fixedvals(1) = DCfit;
end
if autofitGrad
% Guess if there is a background gradient. Again assuming first and last
% 10% of data set is "background".
Starting(end+1) = -(endDC-startDC)/datasize*scalefac;
else
fixedvals(2) = linefit;
end
if autofitAssym
% Initial Assymetry factor
Starting(end+1) = 0;
else
fixedvals(3) = Assym;
end
if DEBUG
options = optimset('Display','iter','MaxIter',1500,'TolX',1e-6,'TolFun',1e-10);
else
options = optimset('Display','off','MaxIter',1500,'TolX',1e-6,'TolFun',1e-10);
end
[Estimates fval] = fminsearch(@myfit,Starting,options,x,data,fixedvals);
fitparam.xdata = x;
fitparam.rawdata = data;
fitparam.area = Estimates(1);
fitparam.mu = Estimates(2);
fitparam.sigma = Estimates(3);
i = 1;
if autofitDC
fitparam.DC = Estimates(3+i);
i = i + 1;
else
fitparam.DC = fixedvals(1);
end
if autofitGrad
fitparam.bg_gradient = Estimates(3+i);
i = i + 1;
else
fitparam.bg_gradient = fixedvals(2);
end
if autofitAssym
fitparam.Assym_factor = Estimates(3+i);
else
fitparam.Assym_factor = fixedvals(3);
end
fitparam.final_fit_val = fval;
varargout{1} = fitparam;
if nargout > 1
varargout{2} = fval;
end
if nargout > 2
gaussianfit = ones(size(data));
for i = 1:datasize
c = x(i);
gaussianfit(i) = fitparam.area * exp(-0.5*((c-fitparam.mu)./((1+sign(c-fitparam.mu)*fitparam.Assym_factor)*fitparam.sigma)).^2) / sqrt(2*pi*fitparam.sigma^2) + fitparam.bg_gradient*c + fitparam.DC;
end
varargout{3} = gaussianfit;
end
if nargout > 3
% Calculate error in sigma
er=[];
errscale = ones(size(Estimates));
for perc=0.90:0.001:1.1;
errscale(3) = perc; % change the sigma value and see the fit.
er(end+1) = myfit(Estimates.*errscale,x,data,fixedvals);
end
% Normalise
er = er./min(er);
% threshold a 5% change in the error function;
ind = find(er<1.05);
perc = 0.90:0.001:1.1;
% percerror
sigmaerror = (perc(ind(end))-1);
varargout{4} = sigmaerror;
end
if DEBUG || plotfig
gaussianfit = ones(size(data));
for i = 1:datasize
c = x(i);
gaussianfit(i) = fitparam.area * exp(-0.5*((c-fitparam.mu)./((1+sign(c-fitparam.mu)*fitparam.Assym_factor)*fitparam.sigma)).^2) / sqrt(2*pi*fitparam.sigma^2) + fitparam.bg_gradient*c + fitparam.DC;
end
figure(233);
plot(x,data, '.-r');
hold on;
plot(x,gaussianfit, '-b');
hold off;
% title(sprintf('Fitting STD error %g (\\sigma = %f)',...
% fittingerror(end),fitsigmas(end)));
end
function sse=myfit(params, x, Dist, fixedvals)
% if length(params) > 0
Afit = params(1);
% else
% Afit = 1;
% end
% if length(params) > 1
mufit = params(2);
% else
% mufit = length(x)/2;
% end
% if length(params) > 2
sigmafit = params(3);
% else
% sigmafit = length(x)/10;
% end
i = 1;
if isnan(fixedvals(1))
DCfit = params(3+i);
i = i + 1;
else
DCfit = fixedvals(1);
end
if isnan(fixedvals(2))
linefit = params(3+i);
i = i + 1;
else
linefit = fixedvals(2);
end
if isnan(fixedvals(3))
Asym = params(3+i);
else
Asym = fixedvals(3);
end
fittedcurve = Afit * exp(-0.5*((x-mufit)./((1+sign(x-mufit)*Asym)*sigmafit)).^2) / sqrt(2*pi*sigmafit^2) + (linefit*x) + DCfit;
sse = sum((fittedcurve - Dist).^2)/length(Dist);
|
github
|
atcollab/at-master
|
atset_s_shift.m
|
.m
|
at-master/atmat/pubtools/LatticeTuningFunctions/errors/atset_s_shift.m
| 3,577 |
utf_8
|
a6ba1e6975dcd22aa1ac788c38c0fff6
|
function rerr=atset_s_shift(r,pos,DS)
%ATSET_S_SHIFT Implements DS longitudinal position drift
% by changing drifts at the sides of the
% elements defined by pos in r
%
% for dipoles the T2(1) field is also changed and the the out DS is
% modified:
% T2(1)=DS*sin(bendignangle)
% DSout=DS*cos(bendignangle)
%
% pos and DS must be the same size
%
% See also atsetshift atsettilt atsettiltdipole
if length(pos)~=length(DS)
error('pos and DS must be the same size');
end
rerr=r;
%find dipoles
dipind=findcells(r(pos),'BendingAngle');
posmag=pos;
dsmag=DS;
posdip=pos(dipind);
dsdip=DS(dipind);
posmag(dipind)=[];
dsmag(dipind)=[];
%% STRAIGTH MAGNETS
% find first drift before each element in pos
% and first drift after each element in pos
if ~isempty(posmag)
[driftUP,driftDO]=finddriftsaroundpos(r,posmag);
% shorten drift up
LDUP0=atgetfieldvalues(r,driftUP,'Length',{1,1});
rerr=atsetfieldvalues(rerr,driftUP,'Length',LDUP0-dsmag');
% lengthen drift down
LDDO0=atgetfieldvalues(r,driftDO,'Length',{1,1});
rerr=atsetfieldvalues(rerr,driftDO,'Length',LDDO0+dsmag');
rerr=atsetfieldvalues(rerr,posmag,'DeltaS',dsmag');
end
if ~isempty(posdip)
%% DIPOLES
% find first drift before each element in pos
% and first drift after each element in pos
[driftUP,driftDO]=finddriftsaroundpos(r,posdip);
theta=atgetfieldvalues(rerr,posdip,'BendingAngle',{1,1});
%if dipoles have the same MagNum, move each part and sum effect on T2
%coordinate change. DS assigned to first slice is assumed as DS of the
%whole magnet.
maggr=getMagGroupsFromMagNum(r(posdip));
for imaggr=1:length(maggr)
dipind=maggr{imaggr}; % sliced dipole indexes in posdip
% shorten drift up for first dipole in group
LDUP0=atgetfieldvalues(rerr,driftUP(dipind(1)),'Length',{1,1});
rerr=atsetfieldvalues(rerr,driftUP(dipind(1)),'Length',LDUP0-dsdip(dipind(1))');
dsout=dsdip((dipind(1)))';
dt2out=dsdip((dipind(1)))';
for iiind=1:length(dipind)
dsout=dsout.*cos(theta(dipind(iiind)));
dt2out=dt2out.*sin(theta(dipind(iiind)));
end
try
dt2out0=atgetfieldvalues(rerr,posdip(dipind(end)),'T2',{1,1}); % add to existing if exists
catch
dt2out0=zeros(dt2out);
end
% lengthen drift down FOR last DIPOLE in group. ALSO T2 changes!
LDDO0=atgetfieldvalues(rerr,driftDO(dipind(end)),'Length',{1,1});
rerr=atsetfieldvalues(rerr,driftDO(dipind(end)),'Length',LDDO0+dsout);%+dsdip(dipind(1)));%
rerr=atsetfieldvalues(rerr,posdip(dipind(end)),'T2',{1,1},dt2out0-dt2out); %
rerr=atsetfieldvalues(rerr,posdip(dipind),'DeltaS',dsdip(dipind(1))');
rerr=atsetfieldvalues(rerr,posdip(dipind),'DeltaST2',...
[zeros(1,length(dipind)-1) dt2out]);
end
end
return
function [dup,ddo]=finddriftsaroundpos(r,pos)
dup=nan(size(pos));
ddo=nan(size(pos));
for indpos=1:length(pos)
i=pos(indpos);
NEL=length(r);
while ~strcmp(r{i}.Class,'Drift')
if i<NEL
i=i+1;
else
i=1;
end
end
dup(indpos)=i;
i=pos(indpos);
while ~strcmp(r{i}.Class,'Drift')
if i>1
i=i-1;
else
i=NEL;
end
end
ddo(indpos)=i;
end
return
% function a=getmagnumdipole(r,ind)
%
% try
% a=r{ind}.MagNum;
% catch
% a=NaN;
% end
%
% return
|
github
|
atcollab/at-master
|
atsettiltdipole.m
|
.m
|
at-master/atmat/pubtools/LatticeTuningFunctions/errors/atsettiltdipole.m
| 3,004 |
utf_8
|
5bd60291c9e71410d38e824b88914a14
|
function ring=atsettiltdipole(varargin)
%ATSETTILTDIPOLE sets the entrance and exit rotation matrices
% of an element or a group of elements in THERING
%
% RING=ATSETTILTDIPOLE(RING,ELEMINDEX, PSI)
% ELEMINDEX contains indexes of elements to be rotated
% PSI - angle(s) of rotation in RADIANS
% POSITIVE PSI corresponds to a CORKSCREW (right)
% rotation of the ELEMENT looking in the direction of the beam.
% (or CORKSCREW, aligned with s-axis) rotation of the ELEMENT
% The misalgnment matrixes are stored in fields R1 and R2
% R1 = [ cos(PSI) sin(PSI); -sin(PSI) cos(PSI) ]
% R2 = R1'
%
% the rotated dipole gives a kick in the vertical and horizontal plane.
%
% ATSETTILTDIPOLE(ELEMINDEX, PSI) Uses the global variable THERING
%
% See also ATSETSHIFT ATSETTILT
global THERING
if ~iscell(varargin{1})
THERING=atsettilt(THERING,varargin{:});
else
[ring,idx,rot]=deal(varargin{:});
if length(rot) == 1
rot=rot*ones(size(idx));
elseif length(rot) ~= length(idx)
error('AT:length','Vector lengths are incompatible: %i/%i.',length(idx),length(rot))
end
% tic;
% for i = 1:length(idx)
% ring{idx(i)}=attiltelemdip(ring{idx(i)},rot(i));
% end
% toc
%tic;
ring(idx)=cellfun(@(el,rot)attiltelemdip(el,rot),ring(idx),num2cell(rot(:)),'un',0);
%toc
end
end
function elem = attiltelemdip(elem,rots)
%ATTILTELEMdip set new rotation parameters
%NEWELEM=ATTILTELEMdip(OLDELEM,ROTS)
%
% ROTS - rotation angle in RADIANS
% POSITIVE ROTS corresponds to a CORKSCREW (right)
% rotation of the ELEMENT looking in the direction of the beam.
% (or CORKSCREW, aligned with s-axis) rotation of the ELEMENT
% The rotation matrixes are stored in fields R1 and R2
% R1 = [ cos(PSI) sin(PSI); -sin(PSI) cos(PSI) ]
% R2 = R1'
%See also: atshiftelem, atmodelem
if ~isfield(elem,'BendingAngle')% rotate reference
C=cos(rots);
S=sin(rots);
RM = diag([C C C C 1 1]);
RM(1,3) = S;
RM(2,4) = S;
RM(3,1) = -S;
RM(4,2) = -S;
elem.R1=RM;
elem.R2=RM';
else% rotate multipoles
% bending angle
bb=-elem.BendingAngle/elem.Length;
% horizontal kick
elem.('PolynomB')(1)=-(1-cos(rots)).*bb;
% vertical kick
elem.('PolynomA')(1)=sin(rots).*bb;
% rotate all other multipole components ( combined function magnets)
Lpb=length(elem.('PolynomB'));
elem=padpol(elem);
rotm=-rots*[2:Lpb];
elem.('PolynomB')(2:end)=cos(rotm).*elem.('PolynomB')(2:end)-sin(rotm).*elem.('PolynomA')(2:end);
elem.('PolynomA')(2:end)=sin(rotm).*elem.('PolynomB')(2:end)+cos(rotm).*elem.('PolynomA')(2:end);
end
elem.RotAboutS=rots;
end
function a=padpol(a)
if isfield(a,'PolynomB')
lpa=length(a.PolynomA);
lpb=length(a.PolynomB);
if lpa<lpb
a.PolynomA=[a.PolynomA,zeros(1,lpb-lpa)];
elseif lpa>lpb
a.PolynomB=[a.PolynomB,zeros(1,lpa-lpb)];
end
end
end
|
github
|
atcollab/at-master
|
getMagGroupsFromGirderIndex.m
|
.m
|
at-master/atmat/pubtools/LatticeTuningFunctions/errors/errorsmanipulation/getMagGroupsFromGirderIndex.m
| 559 |
utf_8
|
28b01fd7aaceb36986e1ab6c9514de1a
|
function maggroups=getMagGroupsFromGirderIndex(r)
%GETMAGGROUPSFROMGIRDERINDEX Gets magnets on a girder
% output maggroups in r with indexes between GS and GE markers.
%
% maggroups is a cell array of magnet indexes describing a single magnet in
% reality, but sliced in the lattice
% a single magnet has the same MagNum value.
%
%see also: UniformGirderErrors
indGS=find(atgetcells(r,'FamName','GS'));
indGE=find(atgetcells(r,'FamName','GE'));
maggroups=arrayfun(@(a,b)makegroup(a,b),indGS,indGE,'un',0);
return
function g=makegroup(a,b)
g=a:1:b;
return
|
github
|
atcollab/at-master
|
ThetaPhiGirder.m
|
.m
|
at-master/atmat/pubtools/LatticeTuningFunctions/errors/errorsmanipulation/ThetaPhiGirder.m
| 1,249 |
utf_8
|
bdf76010e76c46b04836bfbd30da6bfe
|
function rtp=ThetaPhiGirder(r,mag_gr)
%rtp=ThetaPhiGirder(r,mag_gr)
%
% sets misalignment to model theta, phi errors for magnets on girder
%
% mag_gr is the output of getMagGroupsFromGirderIndex(ring)
%
%see also: GetExistingErrors setANYshift setTiltAbout seterrorrand
% get girder centers
gm=cellfun(@(mg)gmisal(r,mg),mag_gr,'un',0);
magindex=[mag_gr{:}];
allmisg=[gm{:}];
% get existing alignment errors
%[X0,Y0]=GetExistingErrors(r,magindex);
rtp=r;
nsig=2;
errfun=@(r,po,er)setANYshift(r,po,1,er); % sets x errors
%rtp=seterrorrand(rtp,magindex,errfun,0,0,nsig,X0+allmisg(1,:));
rtp=seterrorrand(rtp,magindex,errfun,0,0,nsig,allmisg(1,:));
errfun=@(r,po,er)setANYshift(r,po,3,er); % sets Y errors
%rtp=seterrorrand(rtp,magindex,errfun,0,0,nsig,Y0+allmisg(2,:));
rtp=seterrorrand(rtp,magindex,errfun,0,0,nsig,allmisg(2,:));
return
function gm=gmisal(r,mg)
sg=findspos(r,mg);
gc=(max(sg)+min(sg))/2;
dg=sg-gc; % distance from girder center
tg=atgetfieldvalues(r,mg,'RotAboutX',{1,1}); % sets a vertical misalignment
pg=atgetfieldvalues(r,mg,'RotAboutY',{1,1}); % sets a horizontal misalignment
gm=zeros(length(mg),2);
if ~isempty(tg)
gm(:,2)=dg'.*sin(tg);
end
if ~isempty(pg)
gm(:,1)=dg'.*sin(pg);
end
gm=gm';
return
|
github
|
atcollab/at-master
|
EquivalentGradientsFromAlignments6D.m
|
.m
|
at-master/atmat/pubtools/LatticeTuningFunctions/correction/RDT/EquivalentGradientsFromAlignments6D.m
| 2,854 |
utf_8
|
71d7239491abcbbe844a8e780d5a1d84
|
function [kn,ks,ind]=EquivalentGradientsFromAlignments6D(r,inCOD)
%EQUIVALENTGRADIENTSFROMALIGNMENTS6D Estimated normal quad gradients from sext offsets
%[kn, 1) estimated normal quad gradients from sext offsets, quad
% errors in quadrupoles and sextupoles.
% ks, 2) estimated skew quad gradients from sext offsets, quad
% errors in quadrupoles, quadrupole rotation.
% ind 3) indexes of locations at wich kn and ks are found
% ]=EquivalentGradientsFromAlignments6D(
% r, 1) AT lattice structure with errors
% inCOD
% )
%
% the function finds the closed orbit at sextupoles and converts it to
% equivalent quadrupole and skew quadrupole gradients for the computation
% of skew and normal quadrupole RDT
% quadrupole rotations are also converted in skew quadrupole gradients.
%
% it returns the complete list of normal (kn) and skew (ks) quadrupole
% gradients at the given indexes (ind) ( not integrated, PolynomB)
%
%
% See also
% quadrupole and skew quadrupole errors are introduced via COD in
% sextupoles
indsext=find(atgetcells(r,'Class','Sextupole'))';
b3=atgetfieldvalues(r,indsext,'PolynomB',{1,3});
oin=findorbit6(r,indsext,inCOD);% orbit at entrance of sextupole DO NOT USE HERE findorbit6Err!
oout=findorbit6(r,indsext+1,inCOD); % orbit at exit of sextupole
xmisal=cellfun(@(a)getT1(a,1),r(indsext));
ymisal=cellfun(@(a)getT1(a,3),r(indsext));
Dx=(oout(1,:)+oin(1,:))/2; % orbit average in sextupole
Dy=(oout(3,:)+oin(3,:))/2; % orbit average in sextupole
% quarupole errors in sextupoles
kn_sext_err=cellfun(@(a)a.PolynomB(2),r(indsext));
ks_sext_err=cellfun(@(a)a.PolynomA(2),r(indsext));
kn_sext=-2.*b3.*(-Dx+xmisal')'+kn_sext_err;
ks_sext=-2.*b3.*(-Dy+ymisal')'+ks_sext_err;
% quadrupole rotations
indquad=find(atgetcells(r,'Class','Quadrupole'))';
kn2=atgetfieldvalues(r,indquad,'PolynomB',{1,2});
ks2=atgetfieldvalues(r,indquad,'PolynomA',{1,2});
srot=cellfun(@(a)getR1(a),r(indquad));
kn_quad=(1-srot).*kn2;
ks_quad=-srot.*kn2+ks2;
% all elements with PolynomB, not sextupoles or quadrupoles
indPolB=find(atgetcells(r,'PolynomB') & ...
~atgetcells(r,'Class','Quadrupole') & ...
~atgetcells(r,'Class','Sextupole' ))';
NpolB=cellfun(@(a)length(a.PolynomB),r(indPolB));
NpolA=cellfun(@(a)length(a.PolynomA),r(indPolB));
indPolB=indPolB(NpolB>=2 & NpolA>=2 & ~ismember(indPolB,[indquad indsext])');
kn_all=cellfun(@(a)a.PolynomB(2),r(indPolB));%-kn0_all;
ks_all=cellfun(@(a)a.PolynomA(2),r(indPolB));%-ks0_all;
[ind,ord]=sort([indsext,indquad,indPolB]);
kn=[kn_sext;kn_quad;kn_all];
ks=[ks_sext;ks_quad;ks_all];
% integrated strengths
%L=cellfun(@(a)a.Length,r(ind));
kn=kn(ord);%.*L;
ks=ks(ord);%.*L;
return
function t1=getT1(a,ind)
t1=0;
if isfield(a,'T1')
t1=-a.T1(ind);
end
return
function r1=getR1(a)
r1=0;
if isfield(a,'R1')
r1=asin(a.R1(1,3));
end
return
|
github
|
atcollab/at-master
|
semrdtresp_mod.m
|
.m
|
at-master/atmat/pubtools/LatticeTuningFunctions/correction/RDT/semrdtresp_mod.m
| 1,993 |
utf_8
|
ee46c3c10b71f273b696acabb09f2800
|
function [f1,f2,skew]=semrdtresp_mod(mach,bpmidx,skewidx)
%SEMRDT compute resonance driving terms at BPM locations
%
%[f1,f2,skew]=semrdtresp_mod(mach,bpmidx,skewidx)
%
% mach : AT lattice
% bpmindx : BPM indexes
% skewidx : skew quadrupole indexes
%
% f1 : f1001 RDT
% f2 : f1010 RDT
% skew : skew.beta skew.phase beta and phase at the skew index (averaged)
%
% to obtain rdt for a given set of skewidx strengths (KL)
%
% f1001=f1.*k1s.*Lskew
% f1010=f2.*k1s.*Lskew
%
% this function is an exact copy of semrdtresp by L.Farvacque
%
%see also: atavedata_mod
nb=length(bpmidx);
ns=length(skewidx);
% Compute optics
[refpts,ii,kl]=unique([skewidx bpmidx length(mach)+1]);
jsk=kl(1:ns);
jbpm=kl(ns+(1:nb));
jend=kl(end);
[vdata,avebeta,avemu]=atavedata_mod(mach,0,refpts);
mtunes=vdata(jend).mu;
if ~isempty(find(avebeta<0))
bx=arrayfun(@(a)a.beta(1),vdata);
by=arrayfun(@(a)a.beta(2),vdata);
avebeta=[bx,by];
warning('on','all');
warning('negative data in AVEBETA! using beta at entrance!')
save('failingavebetalattice.mat','mach','bpmidx','skewidx')
warning('off','all');
end
% Extract parameters
bpm.phase=cat(1,vdata(jbpm).mu);
skew.beta=avebeta(jsk,:);
skew.phase=avemu(jsk,:);
% Compute terms
jsqb=real(sqrt(skew.beta(:,1).*skew.beta(:,2)));
[dphix,dphiz]=dphase(bpm.phase,skew.phase',mtunes);
re1=jsqb(:,ones(1,nb))'.*cos(dphix-dphiz);
im1=jsqb(:,ones(1,nb))'.*sin(dphix-dphiz);
t1=mtunes(1)-mtunes(2);
denom1=4*(1-complex(cos(t1),sin(t1)));
f1=complex(re1,im1)/denom1;
re2=jsqb(:,ones(1,nb))'.*cos(dphix+dphiz);
im2=jsqb(:,ones(1,nb))'.*sin(dphix+dphiz);
t2=mtunes(1)+mtunes(2);
denom2=4*(1-complex(cos(t2),sin(t2)));
f2=complex(re2,im2)/denom2;
end
function [dphix,dphiz]=dphase(phib,phik,mtune)
nb=length(phib);
nk=length(phik);
dphix=phik( ones(nb,1),:)-phib(:, ones(1,nk));
neg=(dphix < 0);
dphix(neg)=dphix(neg)+mtune(1);
dphiz=phik(2*ones(nb,1),:)-phib(:,2*ones(1,nk));
neg=(dphiz < 0);
dphiz(neg)=dphiz(neg)+mtune(2);
end
|
github
|
atcollab/at-master
|
DisplayCorrectionEffect.m
|
.m
|
at-master/atmat/pubtools/LatticeTuningFunctions/correction/correction_chain/DisplayCorrectionEffect.m
| 7,457 |
utf_8
|
a232fd9497506ab41bf0b0245ff89459
|
function [d0,de,dc]=DisplayCorrectionEffect(...
r0,...
rerr,...
rcor,...
inCODe,...
inCODc,...
refpts,...
indHCor,...
indVCor,...
indQCor,...
indSCor)
% [d0,de,dc]=DisplayCorrectionEffect(...
% r0,... 1) reference lattice
% rerr,... 2) lattice with errors
% rcor,... 3) corrected lattice
% inCODe,... 4) initial COD guess for rerr
% inCODc,... 5) initial COD guess for rcor
% refpts,... 6) reference points for computation
% indHCor,... 7) hor. steerers indexes
% indVCor,... 8) ver. steerers indexes
% indQCor,... 9) normal quad. correctors indexes
% indSCor) 10) skew quad. correctors indexes
%
% dispaly correction effect.
%
compute_emittances=true;
disp(' --- model lattice data --- ')
d0=getdatalattice(r0,inCODe*0,refpts,indHCor,indVCor,indQCor,indSCor,compute_emittances);
disp(' --- errors lattice data --- ')
de=getdatalattice(rerr,inCODe,refpts,indHCor,indVCor,indQCor,indSCor,compute_emittances);
disp(' --- corrected lattice data --- ')
dc=getdatalattice(rcor,inCODc,refpts,indHCor,indVCor,indQCor,indSCor,compute_emittances);
% print out correction effect
oudataforma='%3.2e';
oudataformatune='%2.3f';
oudataformaemit='%3.3f';
oudataformabeta='%2.1f';
%
disp('--------------------------------------------------');
disp('------ total std corrector values applyed --------')
disp('------ --------');
disp([' HK (' num2str(length(d0.ch)) ') [1/m]: ' num2str(std(de.ch),oudataforma) ' -> ' num2str(std(dc.ch),oudataforma) ]);
disp([' VK (' num2str(length(d0.cv)) ') [1/m]: ' num2str(std(de.cv),oudataforma) ' -> ' num2str(std(dc.cv),oudataforma) ]);
disp([' SK (' num2str(length(d0.cs)) ') [1/m2]: ' num2str(std(de.cs),oudataforma) ' -> ' num2str(std(dc.cs),oudataforma) ]);
disp([' QK (' num2str(length(d0.cq)) ') [1/m2]: ' num2str(std(de.cq-d0.cq),oudataforma) ' -> ' num2str(std(dc.cq-d0.cq),oudataforma) ]);
if nargin==8
ch=dc.ch.*d0.Lh;
cv=dc.cv.*d0.Lv;
cq=dc.cq.*d0.Lq;
cs=dc.cs.*d0.Ls;
che=de.ch.*d0.Lh;
cve=de.cv.*d0.Lv;
cqe=de.cq.*d0.Lq;
cse=de.cs.*d0.Ls;
disp([' HKL (' num2str(length(ch)) ') [rad]: ' num2str(std(che),oudataforma) ' -> ' num2str(std(ch),oudataforma) ]);
disp([' VKL (' num2str(length(cv)) ') [rad]: ' num2str(std(cve),oudataforma) ' -> ' num2str(std(cv),oudataforma) ]);
disp([' SKL (' num2str(length(cs)) ') [1/m]: ' num2str(std(cse),oudataforma) ' -> ' num2str(std(cs),oudataforma) ]);
disp([' QKL (' num2str(length(cq)) ') [1/m]: ' num2str(std(cqe-d0.cq),oudataforma) ' -> ' num2str(std(cq-d0.cq),oudataforma) ]);
Brho=getBrho(r0);
ch=dc.ch.*d0.Lh.*Brho;
cv=dc.cv.*d0.Lv.*Brho;
cq=dc.cq.*d0.Lq.*Brho;
cs=dc.cs.*d0.Ls.*Brho;
che=de.ch.*d0.Lh.*Brho;
cve=de.cv.*d0.Lv.*Brho;
cqe=de.cq.*d0.Lq.*Brho;
cse=de.cs.*d0.Ls.*Brho;
disp([' HKLBrho (' num2str(length(ch)) ') [Tm]: ' num2str(std(che),oudataforma) ' -> ' num2str(std(ch),oudataforma) ]);
disp([' VKLBrho (' num2str(length(cv)) ') [Tm]: ' num2str(std(cve),oudataforma) ' -> ' num2str(std(cv),oudataforma) ]);
disp([' SKLBrho (' num2str(length(cs)) ') [T]: ' num2str(std(cse),oudataforma) ' -> ' num2str(std(cs),oudataforma) ]);
disp([' QKLBrho (' num2str(length(cq)) ') [T]: ' num2str(std(cqe-d0.cq),oudataforma) ' -> ' num2str(std(cq-d0.cq),oudataforma) ]);
end
disp('------ --------');
disp('------ residual orbit and dispersion --------')
disp('------ --------');
disp([' OH (' num2str(length(d0.monh)) ') [m]: ' num2str(std(de.monh),oudataforma) ' -> ' num2str(std(dc.monh),oudataforma) ]);
disp([' OV (' num2str(length(d0.monv)) ') [m]: ' num2str(std(de.monv),oudataforma) ' -> ' num2str(std(dc.monv),oudataforma) ]);
disp([' DH (' num2str(length(d0.dish)) ') [m]: ' num2str(std(de.dish-d0.dish),oudataforma) ' -> ' num2str(std(dc.dish-d0.dish),oudataforma) ]);
disp([' DV (' num2str(length(d0.disv)) ') [m]:' num2str(std(de.disv),oudataforma) ' -> ' num2str(std(dc.disv),oudataforma) ]);
disp([' BBH (' num2str(length(d0.bbh)) ') %: ' num2str(std((de.bbh-d0.bbh)./d0.bbh)*100,oudataformabeta) ' -> ' num2str(std((dc.bbh-d0.bbh)./d0.bbh)*100,oudataformabeta) ]);
disp([' BBV (' num2str(length(d0.bbv)) ') %: ' num2str(std((de.bbv-d0.bbv)./d0.bbv)*100,oudataformabeta) ' -> ' num2str(std((dc.bbv-d0.bbv)./d0.bbv)*100,oudataformabeta) ]);
disp([' PhH (' num2str(length(d0.mh)) ') : ' num2str(std((de.mh-d0.mh)),oudataforma) ' -> ' num2str(std((dc.mh-d0.mh)),oudataforma) ]);
disp([' PhV (' num2str(length(d0.mv)) ') : ' num2str(std((de.mv-d0.mv)),oudataforma) ' -> ' num2str(std((dc.mv-d0.mv)),oudataforma) ]);
disp('------ --------');
disp('------ tune and emittance --------')
disp('------ --------');
disp([' Qx [' num2str(d0.tune(1),oudataformatune) ']: ' num2str(de.tune(1),oudataformatune) ' -> ' num2str(dc.tune(1),oudataformatune) ]);
disp([' Qy [' num2str(d0.tune(2),oudataformatune) ']: ' num2str(de.tune(2),oudataformatune) ' -> ' num2str(dc.tune(2),oudataformatune) ]);
disp([' Cx [' num2str(d0.crom(1),oudataformatune) ']: ' num2str(de.crom(1),oudataformatune) ' -> ' num2str(dc.crom(1),oudataformatune) ]);
disp([' Cy [' num2str(d0.crom(2),oudataformatune) ']: ' num2str(de.crom(2),oudataformatune) ' -> ' num2str(dc.crom(2),oudataformatune) ]);
if compute_emittances
disp([' EX [' num2str(d0.modemittance(1)*1e12,oudataformaemit) ' pm]: ' num2str(de.modemittance(1)*1e12,oudataformaemit) ' -> ' num2str(dc.modemittance(1)*1e12,oudataformaemit) ]);
disp([' EY [' num2str(d0.modemittance(2)*1e12,oudataformaemit) 'pm]: ' num2str(de.modemittance(2)*1e12,oudataformaemit) ' -> ' num2str(dc.modemittance(2)*1e12,oudataformaemit) ]);
end
disp('------ --------');
disp('--------------------------------------------------');
return
function a=getdatalattice(r0,inCOD,refpts,indHCor,indVCor,indCorQuads,indSCor,emitok)
warning('off','all'); % mcf,atx, findorbit6,... generates warnings
alpha=mcf(r0);
indrfc=find(atgetcells(r0,'Frequency'));
% get initial orbit
o=findorbit6Err(r0,refpts,inCOD);
a.monh=o(1,:);
a.monv=o(3,:);
d=finddispersion6Err(r0,refpts,indrfc,alpha,1e-4,inCOD);
a.dish=d(1,:);
a.disv=d(3,:);
if emitok
try
[~,b0]=atx(r0,0,1:length(r0));
catch exc
getReport(exc,'extended');
warning('atx failed');
b0.modemittance=[NaN NaN];
b0.fulltunes=[NaN NaN];
end
a.tune=b0.fulltunes;
a.modemittance= b0.modemittance;
end
[l,t,a.crom]=atlinopt(r0,0,refpts);
if ~emitok
a.tune=t;
end
a.bbh=arrayfun(@(s)s.beta(1),l);
a.bbv=arrayfun(@(s)s.beta(2),l);
a.mh=arrayfun(@(s)s.mu(1),l);
a.mv=arrayfun(@(s)s.mu(2),l);
a.Lh=getcellstruct(r0,'Length',indHCor);
a.Lv=getcellstruct(r0,'Length',indVCor);
a.Lq=getcellstruct(r0,'Length',indCorQuads);
a.Ls=getcellstruct(r0,'Length',indSCor);
a.ch=getcellstruct(r0,'PolynomB',indHCor,1,1);
a.cv=getcellstruct(r0,'PolynomA',indVCor,1,1);
a.cq=getcellstruct(r0,'PolynomB',indCorQuads,1,2);
a.cs=getcellstruct(r0,'PolynomA',indSCor,1,2);
warning('on','all');
return
|
github
|
atcollab/at-master
|
atmatchtunedelta.m
|
.m
|
at-master/atmat/pubtools/LatticeTuningFunctions/correction/tune/atmatchtunedelta.m
| 1,408 |
utf_8
|
594b0a1ba71cd2b3b662ce2905629a04
|
function arctune0=atmatchtunedelta(arc,tune,quadfams)
% function arcchrom0=atmatchtunedelta(arc,c,quadfams)
%
% arc : at lattice
% tune : tune to get (with integer part) size(tune)=[2,1]
% quadfams: {[findcells(arc,'FamName','QF1','QF2')],...
% [findcells(arc,'FamName','QD1','QD2')] }
%
% delta on quadrupole families
%
% fits the tune to the desired values, including the integer part.
%disp('match tunes')
variabs=[];
for iquadfams=1:length(quadfams)
KQ=cellfun(@(a)a.PolynomB(2),arc(quadfams{iquadfams}),'un',1);
variabs=[variabs, atVariableBuilder(arc,...
{@(r,DKquad)setcellstruct(r,'PolynomB',quadfams{iquadfams},KQ+DKquad,1,2)},...
{[1e-8]})]; %#ok<*AGROW>
end
ConstrQX=struct(...
'Fun',@(~,ld,~)mux(ld),...
'Weight',1,...
'RefPoints',[1:length(arc)+1],...
'Min',tune(1),...
'Max',tune(1));
ConstrQY=struct(...
'Fun',@(~,ld,~)muy(ld),...
'Weight',1,...
'RefPoints',[1:length(arc)+1],...
'Min',tune(2),...
'Max',tune(2));
% tol=1e-6;
% arctune0=atmatch(arc,variabs,[ConstrQX ConstrQY],tol,5,3);%,@lsqnonlin); %
tol=1e-8;
arctune0=arc;
arctune0=atmatch(arctune0,variabs,[ConstrQX ConstrQY],tol,50,3); %);%
% arctune0=atmatch(arctune0,variabs,[ConstrQX ConstrQY],tol,50,3,@lsqnonlin); %);%
return
function m=muy(lindata)
m=lindata(end).mu(2)/2/pi;
return
function m=mux(lindata)
m=lindata(end).mu(1)/2/pi;
return
|
github
|
atcollab/at-master
|
BumpAtBPM.m
|
.m
|
at-master/atmat/pubtools/LatticeTuningFunctions/correction/orbitbumps/matching/BumpAtBPM.m
| 2,920 |
utf_8
|
115949ead276c24cea46066103bd887f
|
function [rbump,hs,vs]=BumpAtBPM(ring0,inCOD,bumph,bumpv,indBPMbump,indHCor,indVCor)
% function roff=BumpAtBPM(...
% ring0,... AT lattice structure
% inCOD,... initial 6x1 coordinate guess
% bumph,... hor. bump value at indBPMbump
% bumpv,... ver. bump value at indBPMbump
% indBPMbump, bump position
% indHCor,.... 1x3 correctors to generate bump. last is used for COD=0
% indVCor.... 1x3 correctors to generate bump. last is used for COD=0
% )
%
% ex:
% % order of correctors does not metter as far as the bpm is within
% the three correctors. last corrector index is used to match the
% postion and angle back to zero
% roff=BumpAtBPM(ring0,0.0,1e-3,50,[4 78 90],[89 34 1]);
%
% % to match bump at first bpm, use last corrector,
% roff=BumpAtBPM(ring0,1e-3,1e-7,1,indHCor([end,1,2]),indVCor([end,1,2]));
%
%see also: atmatch findorbit6Err
if size(indBPMbump)~=[1 1]
error('indBPMbump must be size 1x1')
end
if size(indHCor)~=[1 3]
error('indHCor must be size 1x3')
end
if size(indVCor)~=[1 3]
error('indVCor must be size 1x3')
end
h1=atVariableBuilder(ring0,indHCor(1),{'PolynomB',{1,1}});
h2=atVariableBuilder(ring0,indHCor(2),{'PolynomB',{1,1}});
h3=atVariableBuilder(ring0,indHCor(3),{'PolynomB',{1,1}});
v1=atVariableBuilder(ring0,indVCor(1),{'PolynomA',{1,1}});
v2=atVariableBuilder(ring0,indVCor(2),{'PolynomA',{1,1}});
v3=atVariableBuilder(ring0,indVCor(3),{'PolynomA',{1,1}});
VariabH=[h1 h2 h3];
VariabV=[v1 v2 v3];
% 6D orbit
ConstrH6D=struct(...
'Fun',@(r,~,~)get6dx(r,indBPMbump,indHCor(end)+1,inCOD),...
'Weight',[1e-6 1e-6 1e-6],...
'RefPoints',1,...
'Min',[bumph 0.0 0.0],...
'Max',[bumph 0.0 0.0]);
ConstrV6D=struct(...
'Fun',@(r,~,~)get6dy(r,indBPMbump,indHCor(end)+1,inCOD),...
'Weight',[1e-6 1e-6 1e-6],...
'RefPoints',1,...
'Min',[bumpv 0.0 0.0],...
'Max',[bumpv 0.0 0.0]);
rbump=ring0;
try
rbump=atmatch(rbump,VariabH,ConstrH6D,10^-16,10,3,@lsqnonlin);%,'fminsearch');%
rbump=atmatch(rbump,VariabV,ConstrV6D,10^-16,10,3,@lsqnonlin);%,'fminsearch');%
catch
rbump=atmatch(rbump,VariabH,ConstrH6D,10^-10,40,3);%,'fminsearch');%
rbump=atmatch(rbump,VariabV,ConstrV6D,10^-10,40,3);%,'fminsearch');%
rbump=atmatch(rbump,VariabH,ConstrH6D,10^-16,10,3,@lsqnonlin);%,'fminsearch');%
rbump=atmatch(rbump,VariabV,ConstrV6D,10^-16,10,3,@lsqnonlin);%,'fminsearch');%
end
% plot corrector values.
hs=atgetfieldvalues(rbump,indHCor,'PolynomB',{1,1});
vs=atgetfieldvalues(rbump,indVCor,'PolynomA',{1,1});
end
function x=get6dx(r,ind1,ind2,inCOD)
o1=findorbit6Err(r,ind1,inCOD);
o2=findorbit6Err(r,ind2,inCOD);
x=[o1(1,1),o2(1,1),o2(2,1)]; % orbit at ind1, orbit and angle at ind2
end
function x=get6dy(r,ind1,ind2,inCOD)
o1=findorbit6Err(r,ind1,inCOD);
o2=findorbit6Err(r,ind2,inCOD);
x=[o1(3,1),o2(3,1),o2(4,1)]; % orbit at ind1, orbit and angle at ind2
end
|
github
|
atcollab/at-master
|
distance2curve.m
|
.m
|
at-master/atmat/pubtools/distance2curve/distance2curve.m
| 55,867 |
utf_8
|
ad166e4d19008dd21756da4a9d60319e
|
function [xy,distance,t_a] = distance2curve(curvexy,mapxy,interpmethod)
%DISTANCE2CURVE Gets the minimum distance from a point to a general curvilinear n-dimensional arc
% usage: [xy,distance,t] = distance2curve(curvexy,mapxy) % uses linear curve segments
% usage: [xy,distance,t] = distance2curve(curvexy,mapxy,interpmethod)
%
% Identifies the closest point along a general space curve (a 1-d path
% in some space) to some new set of points. The curve may be piecewise
% linear or a parametric spline or pchip model.
%
% arguments: (input)
% curvexy - An nxp real numeric array containing the points of the
% curve. For 2-dimensional curves, p == 2. This will be a list
% of points (each row of the array is a new point) that
% define the curve. The curve may cross itself in space.
% Closed curves are acceptable, in which case the first
% and last points would be identical. (Sorry, but periodic
% end conditions are not an option for the spline at this time.)
%
% Since a curve makes no sense in less than 2 dimensions,
% p >= 2 is required.
%
% mapxy - an mxp real numeric array, where m is the number of new points
% to be mapped to the curve in term of their closest distance.
%
% These points which will be mapped to the existing curve
% in terms of the minimium (euclidean, 2-norm) distance
% to the curve. Each row of this array will be a different
% point.
%
% interpmethod - (OPTIONAL) string flag - denotes the method
% used to compute the arc length of the curve.
%
% method may be any of 'linear', 'spline', or 'pchip',
% or any simple contraction thereof, such as 'lin',
% 'sp', or even 'p'.
%
% interpmethod == 'linear' --> Uses a linear chordal
% approximation to define the curve.
% This method is the most efficient.
%
% interpmethod == 'pchip' --> Fits a parametric pchip
% approximation.
%
% interpmethod == 'spline' --> Uses a parametric spline
% approximation to fit the curves. Generally for
% a smooth curve, this method may be most accurate.
%
% DEFAULT: 'linear'
%
% arguments: (output)
% xy - an mxp array, contains the closest point identified along
% the curve to each of the points provided in mapxy.
%
% distance - an mx1 vector, the actual distance to the curve,
% in terms minimum Euclidean distance.
%
% t - fractional arc length along the interpolating curve to that
% point. This is the same value that interparc would use to
% produce the points in xy.
%
%
% Example:
% % Find the closest points and the distance to a polygonal line from
% % several test points.
%
% curvexy = [0 0;1 0;2 1;0 .5;0 0];
% mapxy = [3 4;.5 .5;3 -1];
% [xy,distance,t] = distance2curve(curvexy,mapxy,'linear')
% % xy =
% % 2 1
% % 0.470588235294118 0.617647058823529
% % 1.5 0.5
% % distance =
% % 3.16227766016838
% % 0.121267812518166
% % 2.12132034355964
% % t =
% % 0.485194315877587
% % 0.802026225550702
% % 0.34308419095021
%
%
% plot(curvexy(:,1),curvexy(:,2),'k-o',mapxy(:,1),mapxy(:,2),'r*')
% hold on
% plot(xy(:,1),xy(:,2),'g*')
% line([mapxy(:,1),xy(:,1)]',[mapxy(:,2),xy(:,2)]','color',[0 0 1])
% axis equal
%
%
% Example:
% % Solve for the nearest point on the curve of a 3-d quasi-elliptical
% % arc (sampled and interpolated from 20 points) mapping a set of points
% % along a surrounding circle onto the ellipse. This is the example
% % used to generate the screenshot figure.
% t = linspace(0,2*pi,20)';
% curvexy = [cos(t) - 1,3*sin(t) + cos(t) - 1.25,(t/2 + cos(t)).*sin(t)];
%
% s = linspace(0,2*pi,100)';
% mapxy = 5*[cos(s),sin(s),sin(s)];
% xy = distance2curve(curvexy,mapxy,'spline');
%
% plot3(curvexy(:,1),curvexy(:,2),curvexy(:,3),'ko')
% line([mapxy(:,1),xy(:,1)]',[mapxy(:,2),xy(:,2)]',[mapxy(:,3),xy(:,3)]','color',[0 0 1])
% axis equal
% axis square
% box on
% grid on
% view(26,-6)
%
%
% Example:
% % distance2curve is fairly fast, at least for the linear case.
% % Map 1e6 points onto a polygonal curve in 10 dimensions.
% curvexy = cumsum(rand(10,10));
% mapxy = rand(1000000,10)*5;
% tic,[xy,distance] = distance2curve(curvexy,mapxy,'linear');toc
% % Elapsed time is 2.867453 seconds.
%
%
% See also: interparc, spline, pchip, interp1, arclength
%
% Author: John D'Errico
% e-mail: [email protected]
% Release: 1.0
% Release date: 9/22/2010
% check for errors, defaults, etc...
if (nargin < 2)
error('DISTANCE2CURVE:insufficientarguments', ...
'at least curvexy and mapxy must be supplied')
elseif nargin > 3
error('DISTANCE2CURVE:abundantarguments', ...
'Too many arguments were supplied')
end
% get the dimension of the space our points live in
[n,p] = size(curvexy);
if isempty(curvexy) || isempty(mapxy)
% empty begets empty. you might say this was a pointless exercise.
xy = zeros(0,p);
distance = zeros(0,p);
t_a = zeros(0,p);
return
end
% do curvexy and mapxy live in the same space?
if size(mapxy,2) ~= p
error('DISTANCE2CURVE:improperpxorpy', ...
'curvexy and mapxy do not appear to live in the same dimension spaces')
end
% do the points live in at least 2 dimensions?
if p < 2
error('DISTANCE2CURVE:improperpxorpy', ...
'The points MUST live in at least 2 dimensions for any curve to be defined.')
end
% how many points to be mapped to the curve?
m = size(mapxy,1);
% make sure that curvexy and mapxy are doubles, as uint8, etc
% would cause problems down the line.
curvexy = double(curvexy);
mapxy = double(mapxy);
% test for complex inputs
if ~isreal(curvexy) || ~isreal(mapxy)
error('DISTANCE2CURVE:complexinputs','curvexy and mapxy may not be complex')
end
% default for interpmethod
if (nargin < 3) || isempty(interpmethod)
interpmethod = 'linear';
elseif ~ischar(interpmethod)
error('DISTANCE2CURVE:invalidinterpmethod', ...
'Invalid method indicated. Only ''linear'',''pchip'',''spline'' allowed')
else
validmethods = {'linear' 'pchip' 'spline'};
ind = strmatch(lower(interpmethod),validmethods);
if isempty(ind) || (length(ind) > 1)
error('DISTANCE2CURVE:invalidinterpmethod', ...
'Invalid method indicated. Only ''linear'',''pchip'',''spline'' allowed')
end
interpmethod = validmethods{ind};
end
% if the curve is a single point, stop here
if n == 1
% return the appropriate parameters
xy = repmat(curvexy,m,1);
t_a = zeros(m,1);
% 2 norm distance, or sqrt of sum of squares of differences
distance = sqrt(sum(bsxfun(@minus,curvexy,mapxy).^2,2));
% we can drop out here
return
end
% compute the chordal linear arclengths, and scale to [0,1].
seglen = sqrt(sum(diff(curvexy,[],1).^2,2));
t0 = [0;cumsum(seglen)/sum(seglen)];
% We need to build some parametric splines.
% compute the splines, storing the polynomials in one 3-d array
ppsegs = cell(1,p);
% the breaks for the splines will be t0, unless spline got fancy
% on us here.
breaks = t0;
for i = 1:p
switch interpmethod
case 'linear'
dt = diff(t0);
ind = 1:(n-1);
ppsegs{i} = [(curvexy(ind + 1,i) - curvexy(ind,i))./dt,curvexy(ind,i)];
case 'pchip'
spl = pchip(t0,curvexy(:,i));
ppsegs{i} = spl.coefs;
case 'spline'
spl = spline(t0,curvexy(:,i));
breaks = spl.breaks';
nc = numel(spl.coefs);
if nc < 4
% just pretend it has cubic segments
spl.coefs = [zeros(1,4-nc),spl{i}.coefs];
spl.order = 4;
end
ppsegs{i} = spl.coefs;
end
end
% how many breaks did we find in the spline? This is
% only a thing to worry about for a spline based on few points,
% when the function spline.m may choose to use only two breaks.
nbr = numel(breaks);
% for each point in mapxy, find the closest point to those
% in curvexy. This part we can do in a vectorized form.
pointdistances = ipdm(mapxy,curvexy,'metric',2, ...
'result','structure','subset','nearestneighbor');
% initialize the return variables, using the closest point
% found in the set curvexy.
xy = curvexy(pointdistances.columnindex,:);
distance = pointdistances.distance;
t = t0(pointdistances.columnindex);
% we must now do at least some looping, still vectorized where possible.
% the piecewise linear case is simpler though, so do it separately.
if strcmp(interpmethod,'linear');
% loop over the individual points, vectorizing in the number of
% segments, when there are many segments, but not many points to map.
if n >= (5*m)
% many segments, so loop over the points in mapxy
for i = 1:m
% the i'th point in mapxy
xyi = mapxy(i,:);
% Compute the location (in t) of the minimal distance
% point to xyi, for all lines.
tnum = zeros(nbr - 1,1);
tden = tnum;
for j = 1:p
ppj = ppsegs{j};
tden = tden + ppj(:,1).^2;
tnum = tnum + ppj(:,1).*(xyi(j) - ppj(:,2));
end
tmin = tnum./tden;
% toss out any element of tmin that is less than or equal to
% zero, or or is greater than dt for that segment.
tmin((tmin <= 0) | (tmin >= diff(t0))) = NaN;
% for any segments with a valid minimum distance inside the
% segment itself, compute that distance.
dmin = zeros(nbr - 1,1);
for j = 1:p
ppi = ppsegs{j};
dmin = dmin + (ppi(:,1).*tmin + ppi(:,2) - xyi(j)).^2;
end
dmin = sqrt(dmin);
% what is the minimum distance among these segments?
[mindist,minind] = min(dmin);
if ~isnan(mindist) && (distance(i) > mindist)
% there is a best segment, better than the
% closest point from curvexy.
distance(i) = mindist;
t(i) = tmin(minind) + t0(minind);
for j = 1:p
ppj = ppsegs{j};
xy(i,j) = ppj(minind,1).*tmin(minind) + ppj(minind,2);
end
end
end
else
for i = 1:(n-1)
% the i'th segment of the curve
t1 = t0(i);
t2 = t0(i+1);
% Compute the location (in t) of the minimal distance
% point to mapxy, for all points.
tnum = zeros(m,1);
tden = 0;
for j = 1:p
ppj = ppsegs{j};
tden = tden + ppj(i,1).^2;
tnum = tnum + ppj(i,1).*(mapxy(:,j) - ppj(i,2));
end
tmin = tnum./tden;
% We only care about those points for this segment where there
% is a minimal distance to the segment that is internal to the
% segment.
k = find((tmin > 0) & (tmin < (t2-t1)));
nk = numel(k);
if nk > 0
% for any points with a valid minimum distance inside the
% segment itself, compute that distance.
dmin = zeros(nk,1);
xymin = zeros(nk,p);
for j = 1:p
ppj = ppsegs{j};
xymin(:,j) = ppj(i,1).*tmin(k) + ppj(i,2);
dmin = dmin + (xymin(:,j) - mapxy(k,j)).^2;
end
dmin = sqrt(dmin);
L = dmin < distance(k);
% this segment has a closer point
% closest point from curvexy.
if any(L)
distance(k(L)) = dmin(L);
t(k(L)) = tmin(k(L)) + t0(i);
xy(k(L),:) = xymin(L,:);
end
end
end
end
% for the linear case, t is identical to the fractional arc length
% along the curve.
t_a = t;
else
% cubic segments. here it is simplest to loop over the
% distinct curve segments. We need not test the endpoints
% of the segments, since the call to ipdm did that part.
xytrans = zeros(1,p);
polydiff = @(dp) dp(1:6).*[6 5 4 3 2 1];
for j = 1:(n-1)
% the j'th curve segment
t1 = t0(j);
t2 = t0(j+1);
% for a polynomial in t that looks like
% P(t) = a1*t^3 + a2*t^2 + a3*t + a4, in each dimension,
% extract the polynomial pieces for the 6th degree polynomial
% in t for the square of the Euclidean distance to the curve.
% Thus, (P_x(t) - x0)^2 + (P_y(t) - y0)^2 + ...
%
% a1^2*t^6
% 2*a1*a2*t^5
% (2*a1*a3 + a2^2)*t^4
% (2*a2*a3 - 2*a1*x0 + 2*a1*a4)*t^3
% (a3^2 - 2*a2*x0 + 2*a2*a4)*t^2
% (-2*a3*x0 + 2*a3*a4)*t
% x0^2 - 2*a4*x0 + a4^2
%
% here, only the parts of this distance that are independent of
% the point itself are computed. so the x0 terms are not built
% yet. All of the terms with a4 in them will go away because
% of the translation.
distpoly0 = zeros(1,7);
for i = 1:p
ppi = ppsegs{i};
% this will allow us to translate each poly to pass through
% (0,0) (i.e., at t = 0)
xytrans(i) = ppi(j,4);
distpoly0(1:2) = distpoly0(1:2) + ppi(j,1).*[ppi(j,1),2*ppi(j,2)];
distpoly0(3) = distpoly0(3) + 2.*ppi(j,1).*ppi(j,3) + ppi(j,2).^2;
distpoly0(4) = distpoly0(4) + 2.*ppi(j,2).*ppi(j,3);
distpoly0(5) = distpoly0(5) + ppi(j,3).^2;
end
for i = 1:m
% the i'th point, translated by xytrans. The translation does
% not change the distance to this segment, but it does make
% the computations more robust to numerical problems.
xyi = mapxy(i,:) - xytrans;
% update the poly for this particular point
% (-2*a1*x0)*t^3
% (-2*a2*x0)*t^2
% (-2*a3*x0)*t
% x0^2
distpoly = distpoly0;
for k = 1:p
ppk = ppsegs{k};
distpoly(4:6) = distpoly(4:6) - 2.*ppk(j,1:3).*xyi(k);
distpoly(7) = distpoly(7) + xyi(k).^2;
end
% find any minima of this polynomial in the interval (0,t2-t1).
% we can ignore solutions that happen at the endpoints of the
% interval, since those are already covered by ipdm.
%
% merely compute the zeros of the derivative polynomial
diffpoly = polydiff(distpoly);
tstationary = roots(diffpoly);
% discard any with an imaginary part, those that are less
% than 0, or greater than t2-t1.
k = (imag(tstationary) ~= 0) | ...
(real(tstationary) <= 0) | ...
(real(tstationary) >= (t2 - t1));
tstationary(k) = [];
% for any solutions that remain, compute the distance.
if ~isempty(tstationary)
mindist = zeros(size(tstationary));
xyij = zeros(numel(tstationary),p);
for k = 1:p
xyij(:,k) = polyval(ppsegs{k}(j,:),tstationary);
mindist = mindist + (mapxy(i,k) - xyij(:,k)).^2;
end
mindist = sqrt(mindist);
% just in case there is more than one stationary point
[mindist,ind] = min(mindist);
if mindist < distance(i)
% we found a point on this segment that is better
% than the endpoint values for that segment.
distance(i) = mindist;
xy(i,:) = xyij(ind,:);
t(i) = tstationary(ind) + t0(j);
end
end % if ~isempty(tstationary)
end % for i = 1:n
end % for j = 1:(n-1)
% do we need to return t_a? t_a is the same number that interparc
% uses, whereas t as we have computed it so far is just the fractional
% chordal arclength.
%
% Don't bother doing this last piece unless that argument is requested,
% since it takes some additional work to do.
if nargout >= 2
% build new piecewise polynomials for each segment that
% represent (dx/dt)^2 + (dy/dt)^2 + ...
%
% Since each poly must be cubic at this point, the result will be
% a 4th degree piecewise polynomial.
kernelcoefs = zeros(nbr-1,5);
for i = 1:p
ppi = ppsegs{i};
kernelcoefs = kernelcoefs + [9*ppi(:,1).^2, ...
12*ppi(:,1).*ppi(:,2), ...
4*ppi(:,2).^2 + 6*ppi(:,1).*ppi(:,3), ...
4*ppi(:,2).*ppi(:,3), ppi(:,3).^2];
end
% get the arc length for each segment. quadgk will suffice here
% since we need to integrate the sqrt of each poly
arclengths = zeros(nbr-1,1);
for i = 1:(nbr - 1)
lengthfun = @(t) sqrt(polyval(kernelcoefs(i,:),t));
arclengths(i) = quadgk(lengthfun,0,t0(i+1) - t0(i));
end
% get the cumulative arclengths, then scale by the sum
% this gives us fractional arc lengths.
arclengths = cumsum(arclengths);
totallength = arclengths(end);
arclengths = [0;arclengths/totallength];
% where does each point fall in terms of fractional cumulative
% chordal arclength? (i.e., t0?)
[tbin,tbin] = histc(t,t0);
tbin(tbin < 1) = 1; % being careful at the bottom end
tbin(tbin >= nbr) = nbr - 1; % if the point fell at the very top...
% the total length below the segment in question
t_a = arclengths(tbin);
% now get the piece in the tbin segment
for i = 1:m
lengthfun = @(t) sqrt(polyval(kernelcoefs(tbin(i),:),t));
t_a(i) = t_a(i) + quadgk(lengthfun,0,t(i) - t0(tbin(i)))/totallength;
end
end
end % if strcmp(interpmethod,'linear');
% ==========================================================
function d = ipdm(data1,varargin)
% ipdm: Inter-Point Distance Matrix
% usage: d = ipdm(data1)
% usage: d = ipdm(data1,data2)
% usage: d = ipdm(data1,prop,value)
% usage: d = ipdm(data1,data2,prop,value)
%
% Arguments: (input)
% data1 - array of data points, each point is one row. p dimensional
% data will be represented by matrix with p columns.
% If only data1 is provided, then the distance matrix
% is computed between all pairs of rows of data1.
%
% If your data is one dimensional, it MUST form a column
% vector. A row vector of length n will be interpreted as
% an n-dimensional data set.
%
% data2 - second array, supplied only if distances are to be computed
% between two sets of points.
%
%
% Class support: data1 and data2 are assumed to be either
% single or double precision. I have not tested this code to
% verify its success on integer data of any class.
%
%
% Additional parameters are expected to be property/value pairs.
% Property/value pairs are pairs of arguments, the first of which
% (properties) must always be a character string. These strings
% may be shortened as long as the shortening is unambiguous.
% Capitalization is ignored. Valid properties for ipdm are:
%
% 'Metric', 'Subset', 'Limit', 'Result'
%
% 'Metric' - numeric flag - defines the distance metric used
% metric = 2 --> (DEFAULT) Euclidean distance = 2-norm
% The standard distance metric.
%
% metric = 1 --> 1-norm = sum of absolute differences
% Also sometimes known as the "city block
% metric", since this is the sum of the
% differences in each dimension.
%
% metric = inf --> infinity-norm = maximum difference
% over all dimensions. The name refers
% to the limit of the p-norm, as p
% approaches infinity.
%
% metric = 0 --> minimum difference over all dimensions.
% This is not really a useful norm in
% practice.
%
% Note: while other distance metrics exist, IMHO, these
% seemed to be the common ones.
%
%
% 'Result' - A string variable that denotes the style of returned
% result. Valid result types are 'Array', 'Structure'.
% Capitalization is ignored, and the string may be
% shortened if you wish.
%
% result = 'Array' --> (DEFAULT) A matrix of all
% interpoint distances will be generated.
% This array may be large. If this option
% is specified along with a minimum or
% maximum value, then those elements above
% or below the limiting values will be
% set as -inf or +inf, as appropriate.
%
% When any of 'LargestFew', 'SmallestFew',
% or 'NearestNeighbor' are set, then the
% resulting array will be a sparse matrix
% if 'array' is specified as the result.
%
% result = 'Structure' --> A list of all computed distances,
% defined as a structure. This structure
% will have fields named 'rowindex',
% 'columnindex', and 'distance'.
%
% This option will be useful when a subset
% criterion for the distances has been
% specified, since then the distance matrix
% may be very sparsely populated. Distances
% for pairs outside of the criterion will
% not be returned.
%
%
% 'Subset' - Character string, any of:
%
% 'All', 'Maximum', 'Minimum', 'LargestFew', 'SmallestFew',
% 'NearestNeighbor', 'FarthestNeighbor', or empty
%
% Like properties, capitalization is ignored here, and
% any unambiguous shortening of the word is acceptable.
%
% DEFAULT = 'All'
%
% Some interpoint distance matrices can be huge. Often
% these matrices are too large to be fully retained in
% memory, yet only the pair of points with the largest
% or smallest distance may be needed. When only some
% subset of the complete set of distances is of interest,
% these options allow you to specify which distances will
% be returned.
%
% If 'result' is defined to be an array, then a sparse
% matrix will be returned for the 'LargestFew', 'SmallestFew',
% 'NearestNeighbor', and 'FarthestNeighbor' subset classes.
% 'Minimum' and 'Maximum' will yield full matrices by
% default. If a structure is specified, then only those
% elements which have been identified will be returned.
%
% Where a subset is specified, its limiting value is
% specified by the 'Limit' property. Call that value k.
%
%
% 'All' --> (DEFAULT) Return all interpoint distances
%
% 'Minimum' --> Only look for those distances above
% the cutoff k. All other distances will
% be returned as -inf.
%
% 'Maximum' --> Only look for those distances below
% the cutoff k. All other distances will
% be returned as +inf.
%
% 'SmallestFew' --> Only return the subset of the k
% smallest distances. Where only one data
% set is provided, only the upper triangle
% of the inter-point distance matrix will
% be generated since that matrix is symmetric.
%
% 'LargestFew' --> Only return the subset of the k
% largest distances. Where only one data
% set is provided, only the upper triangle
% of the inter-point distance matrix will
% be generated since that matrix is symmetric.
%
% 'NearestNeighbor' --> Only return the single nearest
% neighbor in data2 to each point in data1.
% No limiting value is required for this
% option. If multiple points have the same
% nearest distance, then return the first
% such point found. With only one input set,
% a point will not be its own nearest
% neighbor.
%
% Note that exact replicates in a single set
% will cause problems, since a sparse matrix
% is returned by default. Since they will have
% a zero distance, they will not show up in
% the sparse matrix. A structure return will
% show those points as having a zero distance
% though.
%
% 'FarthestNeighbor' --> Only return the single farthest
% neighbor to each point. No limiting value
% is required for this option. If multiple
% points have the same farthest distance,
% then return the first such point found.
%
%
% 'Limit' - scalar numeric value or []. Used only when some
% Subset is specified.
%
% DEFAULT = []
%
%
% 'ChunkSize' - allows a user with lower RAM limits
% to force the code to only grab smaller chunks of RAM
% at a time (where possible). This parameter is specified
% in bytes of RAM. The default is 32 megabytes, or 2^22
% elements in any piece of the distance matrix. Only some
% options will break the problem into chunks, thus as long
% as a full matrix is expected to be returned, there seems
% no reason to break the problem up into pieces.
%
% DEFAULT = 2^25
%
%
% Arguments: (output)
% d - array of interpoint distances, or a struct wth the
% fields {'rowindex', 'columnindex', 'distance'}.
%
% d(i,j) represents the distance between point i
% (from data1) and point j (from data2).
%
% If only one (n1 x p) array is supplied, then d will
% be an array of size == [n1,n1].
%
% If two arrays (of sizes n1 x p and n2 x p) then d
% will be an array of size == [n1,n2].
%
%
% Efficiency considerations:
% Where possible, this code will use bsxfun to compute its
% distances.
%
%
% Example:
% Compute the interpoint distances between all pairs of points
% in a list of 5 points, in 2 dimensions and using Euclidean
% distance as the distance metric.
%
% A = randn(5,2);
% d = ipdm(A,'metric',2)
% d =
% 0 2.3295 3.2263 2.0263 2.8244
% 2.3295 0 1.1485 0.31798 1.0086
% 3.2263 1.1485 0 1.4318 1.8479
% 2.0263 0.31798 1.4318 0 1.0716
% 2.8244 1.0086 1.8479 1.0716 0
%
% (see the demo file for many other examples)
%
% See also: pdist
%
% Author: John D'Errico
% e-mail: [email protected]
% Release: 1.0
% Release date: 2/26/08
% Default property values
params.Metric = 2;
params.Result = 'array';
params.Subset = 'all';
params.Limit = [];
params.ChunkSize = 2^25;
% untangle the arguments
if nargin<1
% if called with no arguments, then the user probably
% needs help. Give it to them.
help ipdm
return
end
% were two sets of data provided?
pvpairs = {};
if nargin==1
% only 1 set of data provided
dataflag = 1;
data2 = [];
else
if ischar(varargin{1})
dataflag = 1;
data2 = [];
pvpairs = varargin;
else
dataflag = 2;
data2 = varargin{1};
if nargin>2
pvpairs = varargin(2:end);
end
end
end
% get data sizes for later
[n1,dim] = size(data1);
if dataflag == 2
n2 = size(data2,1);
end
% Test the class of the input variables
if ~(isa(data1,'double') || isa(data1,'single')) || ...
((dataflag == 2) && ~(isa(data2,'double') || isa(data2,'single')))
error('data points must be either single or double precision variables.')
end
% do we need to process any property/value pairs?
if nargin>2
params = parse_pv_pairs(params,pvpairs);
% check for problems in the properties
% was a legal Subset provided?
if ~isempty(params.Subset) && ~ischar(params.Subset)
error('If provided, ''Subset'' must be character')
elseif isempty(params.Subset)
params.Subset = 'all';
end
valid = {'all','maximum','minimum','largestfew','smallestfew', ...
'nearestneighbor','farthestneighbor'};
ind = find(strncmpi(params.Subset,valid,length(params.Subset)));
if (length(ind)==1)
params.Subset = valid{ind};
else
error(['Invalid Subset: ',params.Subset])
end
% was a limit provided?
if ~ismember(params.Subset,{'all','nearestneighbor','farthestneighbor'}) && ...
isempty(params.Limit)
error('No limit provided, but a Subset that requires a limit value was specified')
end
% check the limit values for validity
if length(params.Limit)>1
error('Limit must be scalar or empty')
end
switch params.Subset
case {'largestfew', 'smallestfew'}
% must be at least 1, and an integer
if (params.Limit<1) || (round(params.Limit)~=params.Limit)
error('Limit must be a positive integer for LargestFew or NearestFew')
end
end
% was a legal Result provided?
if isempty(params.Result)
params.result = 'Array';
elseif ~ischar(params.Result)
error('If provided, ''Result'' must be character or empty')
end
valid = {'array','structure'};
ind = find(strncmpi(params.Result,valid,length(params.Result)));
if (length(ind)==1)
params.Result = valid{ind};
else
error(['Invalid Result: ',params.Subset])
end
% check for the metric
if isempty(params.Metric)
params.Metric = 2;
elseif (length(params.Metric)~=1) || ~ismember(params.Metric,[0 1 2 inf])
error('If supplied, ''Metric'' must be a scalar, and one of [0 1 2 inf]')
end
end % if nargin>2
% If Metric was given as 2, but the dimension is only 1, then it will
% be slightly faster (and equivalent) to use the 1-norm Metric.
if (dim == 1) && (params.Metric == 2)
params.Metric = 1;
end
% Can we use bsxfun to compute the interpoint distances?
% Older Matlab releases will not have bsxfun, but if it is
% around, it will ne both faster and less of a memory hog.
params.usebsxfun = (5==exist('bsxfun','builtin'));
% check for dimension mismatch if 2 sets
if (dataflag==2) && (size(data2,2)~=dim)
error('If 2 point sets provided, then both must have the same number of columns')
end
% Total number of distances to compute, in case I must do it in batches
if dataflag==1
n2 = n1;
end
ntotal = n1*n2;
% FINALLY!!! Compute inter-point distances
switch params.Subset
case 'all'
% The complete set of interpoint distances. There is no need
% to break this into chunks, since we must return all distances.
% If that is too much to compute in memory, then it will fail
% anyway when we try to store the result. bsxfun will at least
% do the computation efficiently.
% One set or two?
if dataflag == 1
d = distcomp(data1,data1,params);
else
d = distcomp(data1,data2,params);
end
% Must we return it as a struct?
if params.Result(1) == 's'
[rind,cind] = ndgrid(1:size(d,1),1:size(d,2));
ds.rowindex = rind(:);
ds.columnindex = cind(:);
ds.distance = d(:);
d = ds;
end
case {'minimum' 'maximum'}
% There is no reason to break this into pieces if the result
% sill be filled in the end with +/- inf. Only break it up
% if the final result is a struct.
if ((ntotal*8)<=params.ChunkSize) || (params.Result(1) == 'a')
% its small enough to do it all at once
% One set or two?
if dataflag == 1
d = distcomp(data1,data1,params);
else
d = distcomp(data1,data2,params);
end
% Must we return it as a struct?
if params.Result(1) == 'a'
% its an array, fill the unwanted distances with +/- inf
if params.Subset(2) == 'i'
% minimum
d(d<=params.Limit) = -inf;
else
% maximum
d(d>=params.Limit) = +inf;
end
else
% a struct will be returned
if params.Subset(2) == 'i'
% minimum
[dist.rowindex,dist.columnindex] = find(d>=params.Limit);
else
% maximum
[dist.rowindex,dist.columnindex] = find(d<=params.Limit);
end
dist.distance = d(dist.rowindex + n1*(dist.columnindex-1));
d = dist;
end
else
% we need to break this into chunks. This branch
% will always return a struct.
% this is the number of rows of data1 that we will
% process at a time.
bs = floor(params.ChunkSize/(8*n2));
bs = min(n1,max(1,bs));
% Accumulate the result into a cell array. Do it this
% way because we don't know in advance how many elements
% that we will find satisfying the minimum or maximum
% limit specified.
accum = cell(0,1);
% now loop over the chunks
batch = 1:bs;
while ~isempty(batch)
% One set or two?
if dataflag == 1
dist = distcomp(data1(batch,:),data1,params);
else
dist = distcomp(data1(batch,:),data2,params);
end
% big or small as requested
if ('i'==params.Subset(2))
% minimum value specified
[I,J,V] = find(dist>=params.Limit);
else
% maximum limit
[I,J] = find(dist<=params.Limit);
I = I(:);
J = J(:);
V = dist(I + (J-1)*length(batch));
I = I + (batch(1)-1);
end
% and stuff them into the cell structure
if ~isempty(V)
accum{end+1,1} = [I,J,V(:)]; %#ok
end
% increment the batch
batch = batch + bs;
if batch(end)>n1
batch(batch>n1) = [];
end
end
% convert the cells into one flat array
accum = cell2mat(accum);
if isempty(accum)
d.rowindex = [];
d.columnindex = [];
d.distance = [];
else
% we found something
% sort on the second column, to put them in a reasonable order
accum = sortrows(accum,[2 1]);
d.rowindex = accum(:,1);
d.columnindex = accum(:,2);
d.distance = accum(:,3);
end
end
case {'smallestfew' 'largestfew'}
% find the k smallest/largest distances. k is
% given by params.Limit
% if only 1 set, params.Limit must be less than n*(n-1)/2
if dataflag == 1
params.Limit = min(params.Limit,n1*(n1-1)/2);
end
% is this a large problem?
if ((ntotal*8) <= params.ChunkSize)
% small potatoes
% One set or two?
if dataflag == 1
dist = distcomp(data1,data1,params);
% if only one data set, set the diagonal and
% below that to +/- inf so we don't find it.
temp = find(tril(ones(n1,n1),0));
if params.Subset(1) == 's'
dist(temp) = inf;
else
dist(temp) = -inf;
end
else
dist = distcomp(data1,data2,params);
end
% sort the distances to find those we need
if ('s'==params.Subset(1))
% smallestfew
[val,tags] = sort(dist(:),'ascend');
else
% largestfew
[val,tags] = sort(dist(:),'descend');
end
val = val(1:params.Limit);
tags = tags(1:params.Limit);
% recover the row and column index from the linear
% index returned by sort in tags.
[d.rowindex,d.columnindex] = ind2sub([n1,size(dist,2)],tags);
% create the matrix as a sparse one or a struct?
if params.Result(1)=='a'
% its an array, so make the array sparse.
d = sparse(d.rowindex,d.columnindex,val,n1,size(dist,2));
else
% a structure
d.distance = val;
end
else
% chunks
% this is the number of rows of data1 that we will
% process at a time.
bs = floor(params.ChunkSize/(8*n2));
bs = min(n1,max(1,bs));
% We need to find the extreme cases. There are two possible
% algorithms, depending on how many total elements we will
% search for.
% 1. Only a very few total elements.
% 2. A relatively large number of total elements, forming
% a significant fraction of the total set.
%
% Case #1 would suggest to retain params.Limit numberr of
% elements from each batch, then at the end, sort them all
% to find the best few. Case #2 will result in too many
% elements to retain, so we must distinguish between these
% alternatives.
if (8*params.Limit*n1/bs) <= params.ChunkSize
% params.Limit is small enough to fall into case #1.
% Accumulate the result into a cell array. Do it this
% way because we don't know in advance how many elements
% that we will find satisfying the minimum or maximum
% limit specified.
accum = cell(0,1);
% now loop over the chunks
batch = (1:bs)';
while ~isempty(batch)
% One set or two?
if dataflag == 1
dist = distcomp(data1(batch,:),data1,params);
k = find(tril(ones(length(batch),n2),batch(1)-1));
if ('s'==params.Subset(1))
dist(k) = inf;
else
dist(k) = -inf;
end
else
dist = distcomp(data1(batch,:),data2,params);
end
% big or small as requested, keeping only the best
% params.Limit number of elements
if ('s'==params.Subset(1))
% minimum value specified
[tags,tags] = sort(dist(:),1,'ascend'); %#ok
tags = tags(1:bs);
[I,J] = ndgrid(batch,1:n2);
ijv = [I(tags),J(tags),dist(tags)];
else
% maximum limit
[tags,tags] = sort(dist(:),1,'descend'); %#ok
tags = tags(1:bs);
[I,J] = ndgrid(batch,1:n2);
ijv = [I(tags),J(tags),dist(tags)];
end
% and stuff them into the cell structure
accum{end+1,1} = ijv; %#ok
% increment the batch
batch = batch + bs;
if batch(end)>n1
batch(batch>n1) = [];
end
end
% convert the cells into one flat array
accum = cell2mat(accum);
% keep only the params.Limit best of those singled out
accum = sortrows(accum,3);
if ('s'==params.Subset(1))
% minimum value specified
accum = accum(1:params.Limit,:);
else
% minimum value specified
accum = accum(end + 1 - (1:params.Limit),:);
end
d.rowindex = accum(:,1);
d.columnindex = accum(:,2);
d.distance = accum(:,3);
% create the matrix as a sparse one or a struct?
if params.Result(1)=='a'
% its an array, so make the array sparse.
d = sparse(d.rowindex,d.columnindex,d.distance,n1,size(dist,2));
end
else
% params.Limit forces us into the domain of case #2.
% Here we cannot retain params.Limit elements from each chunk.
% so we will grab each chunk and append it to the best elements
% found so far, then filter out the best after each chunk is
% done. This may be slower than we want, but its the only way.
ijv = zeros(0,3);
% loop over the chunks
batch = (1:bs)';
while ~isempty(batch)
% One set or two?
if dataflag == 1
dist = distcomp(data1(batch,:),data1,params);
k = find(tril(ones(length(batch),n2),batch(1)-1));
if ('s'==params.Subset(1))
dist(k) = inf;
else
dist(k) = -inf;
end
else
dist = distcomp(data1(batch,:),data2,params);
end
[I,J] = ndgrid(batch,1:n2);
ijv = [ijv;[I(:),J(:),dist(:)]]; %#ok
% big or small as requested, keeping only the best
% params.Limit number of elements
if size(ijv,1) > params.Limit
if ('s'==params.Subset(1))
% minimum value specified
[tags,tags] = sort(ijv(:,3),1,'ascend'); %#ok
else
[tags,tags] = sort(ijv(:,3),1,'ascend'); %#ok
end
ijv = ijv(tags(1:params.Limit),:);
end
% increment the batch
batch = batch + bs;
if batch(end)>n1
batch(batch>n1) = [];
end
end
% They are fully trimmed down. stuff a structure
d.rowindex = ijv(:,1);
d.columnindex = ijv(:,2);
d.distance = ijv(:,3);
% create the matrix as a sparse one or a struct?
if params.Result(1)=='a'
% its an array, so make the array sparse.
d = sparse(d.rowindex,d.columnindex,d.distance,n1,size(dist,2));
end
end
end
case {'nearestneighbor' 'farthestneighbor'}
% find the closest/farthest neighbor for every point
% is this a large problem? Or a 1-d problem?
if dim == 1
% its a 1-d nearest/farthest neighbor problem. we can
% special case these easily enough, and all the distance
% metric options are the same in 1-d.
% first split it into the farthest versus nearest cases.
if params.Subset(1) == 'f'
% farthest away
% One set or two?
if dataflag == 1
[d2min,minind] = min(data1);
[d2max,maxind] = max(data1);
else
[d2min,minind] = min(data2);
[d2max,maxind] = max(data2);
end
d.rowindex = (1:n1)';
d.columnindex = repmat(maxind,n1,1);
d.distance = repmat(d2max,n1,1);
% which endpoint was further away?
k = abs((data1 - d2min)) >= abs((data1 - d2max));
if any(k)
d.columnindex(k) = minind;
d.distance(k) = d2min;
end
else
% nearest. this is mainly a sort and some fussing around.
d.rowindex = (1:n1)';
d.columnindex = ones(n1,1);
d.distance = zeros(n1,1);
% One set or two?
if dataflag == 1
% if only one data point, then we are done
if n1 == 2
% if exactly two data points, its trivial
d.columnindex = [2 1];
d.distance = repmat(abs(diff(data1)),2,1);
elseif n1>2
% at least three points. do a sort.
[sorted_data,tags] = sort(data1);
% handle the first and last points separately
d.columnindex(tags(1)) = tags(2);
d.distance(tags(1)) = sorted_data(2) - sorted_data(1);
d.columnindex(tags(end)) = tags(end-1);
d.distance(tags(end)) = sorted_data(end) - sorted_data(end-1);
ind = (2:(n1-1))';
d1 = sorted_data(ind) - sorted_data(ind-1);
d2 = sorted_data(ind+1) - sorted_data(ind);
k = d1 < d2;
d.distance(tags(ind(k))) = d1(k);
d.columnindex(tags(ind(k))) = tags(ind(k)-1);
k = ~k;
d.distance(tags(ind(k))) = d2(k);
d.columnindex(tags(ind(k))) = tags(ind(k)+1);
end % if n1 == 2
else
% Two sets of data. still really a sort and some fuss.
if n2 == 1
% there is only one point in data2
d.distance = abs(data1 - data2);
% d.columnindex is already set correctly
else
% At least two points in data2
% We need to sort all the data points together, but also
% know which points from each set went where. ind12 and
% bool12 will help keep track.
ind12 = [1:n1,1:n2]';
bool12 = [zeros(n1,1);ones(n2,1)];
[sorted_data,tags] = sort([data1;data2]);
ind12 = ind12(tags);
bool12 = bool12(tags);
% where did each point end up after the sort?
loc1 = find(~bool12);
loc2 = find(bool12);
% for each point in data1, what is the (sorted) data2
% element which appears most nearly to the left of it?
cs = cumsum(bool12);
leftelement = cs(loc1);
% any points which fell below the minimum element in data2
% will have a zero for the index of the element on their
% left. fix this.
leftelement = max(1,leftelement);
% likewise, any point greater than the max in data2 will
% have an n2 in left element. this too will be a problem
% later, so fix it.
leftelement = min(n2-1,leftelement);
% distance to the left hand element
dleft = abs(sorted_data(loc1) - sorted_data(loc2(leftelement)));
dright = abs(sorted_data(loc1) - sorted_data(loc2(leftelement+1)));
% find the points which are closer to the left element in data2
k = (dleft < dright);
d.distance(ind12(loc1(k))) = dleft(k);
d.columnindex(ind12(loc1(k))) = ind12(loc2(leftelement(k)));
k = ~k;
d.distance(ind12(loc1(k))) = dright(k);
d.columnindex(ind12(loc1(k))) = ind12(loc2(leftelement(k)+1));
end % if n2 == 1
end % if dataflag == 1
end % if params.Subset(1) == 'f'
% create the matrix as a sparse one or a struct?
if params.Result(1)=='a'
% its an array, so make the array sparse.
d = sparse(d.rowindex,d.columnindex,d.distance,n1,n2);
end
elseif (ntotal>1000) && (((params.Metric == 0) && (params.Subset(1) == 'n')) || ...
((params.Metric == inf) && (params.Subset(1) == 'f')))
% nearest/farthest neighbour in n>1 dimensions, but for an
% infinity norm metric. Reduce this to a sequence of
% 1-d problems, each of which will be faster in general.
% do this only if the problem is moderately large, since
% we must overcome the extra overhead of the recursive
% calls to ipdm.
% do the first dimension
if dataflag == 1
d = ipdm(data1(:,1),data1(:,1),'subset',params.Subset,'metric',params.Metric,'result','struct');
else
d = ipdm(data1(:,1),data2(:,1),'subset',params.Subset,'metric',params.Metric,'result','struct');
end
% its slightly different for nearest versus farthest here
% now, loop over dimensions
for i = 2:dim
if dataflag == 1
di = ipdm(data1(:,i),data1(:,i),'subset',params.Subset,'metric',params.Metric,'result','struct');
else
di = ipdm(data1(:,i),data2(:,i),'subset',params.Subset,'metric',params.Metric,'result','struct');
end
% did any of the distances change?
if params.Metric == 0
% the 0 norm, with nearest neighbour, so take the
% smallest distance in any dimension.
k = d.distance > di.distance;
else
% inf norm. so take the largest distance across dimensions
k = d.distance < di.distance;
end
if any(k)
d.distance(k) = di.distance(k);
d.columnindex(k) = di.columnindex(k);
end
end
% create the matrix as a sparse one or a struct?
if params.Result(1)=='a'
% its an array, so make the array sparse.
d = sparse(d.rowindex,d.columnindex,d.distance,n1,n2);
end
elseif ((ntotal*8) <= params.ChunkSize)
% None of the other special cases apply, so do it using brute
% force for the small potatoes problem.
% One set or two?
if dataflag == 1
dist = distcomp(data1,data1,params);
else
dist = distcomp(data1,data2,params);
end
% if only one data set and if a nearest neighbor
% problem, set the diagonal to +inf so we don't find it.
if (dataflag==1) && (n1>1) && ('n'==params.Subset(1))
diagind = (1:n1) + (0:n1:(n1^2-1));
dist(diagind) = +inf;
end
if ('n'==params.Subset(1))
% nearest
[val,j] = min(dist,[],2);
else
% farthest
[val,j] = max(dist,[],2);
end
% create the matrix as a sparse one or a struct?
if params.Result(1)=='a'
% its an array, so make the array sparse.
d = sparse((1:n1)',j,val,n1,size(dist,2));
else
% a structure
d.rowindex = (1:n1)';
d.columnindex = j;
d.distance = val;
end
else
% break it into chunks
bs = floor(params.ChunkSize/(8*n2));
bs = min(n1,max(1,bs));
% pre-allocate the result
d.rowindex = (1:n1)';
d.columnindex = zeros(n1,1);
d.distance = zeros(n1,1);
% now loop over the chunks
batch = 1:bs;
while ~isempty(batch)
% One set or two?
if dataflag == 1
dist = distcomp(data1(batch,:),data1,params);
else
dist = distcomp(data1(batch,:),data2,params);
end
% if only one data set and if a nearest neighbor
% problem, set the diagonal to +inf so we don't find it.
if (dataflag==1) && (n1>1) && ('n'==params.Subset(1))
diagind = 1:length(batch);
diagind = diagind + (diagind-2+batch(1))*length(batch);
dist(diagind) = +inf;
end
% big or small as requested
if ('n'==params.Subset(1))
% nearest
[val,j] = min(dist,[],2);
else
% farthest
[val,j] = max(dist,[],2);
end
% and stuff them into the result structure
d.columnindex(batch) = j;
d.distance(batch) = val;
% increment the batch
batch = batch + bs;
if batch(end)>n1
batch(batch>n1) = [];
end
end
% did we need to return a struct or an array?
if params.Result(1) == 'a'
% an array. make it a sparse one
d = sparse(d.rowindex,d.columnindex,d.distance,n1,n2);
end
end % if dim == 1
end % switch params.Subset
% End of mainline
% ======================================================
% begin subfunctions
% ======================================================
function d = distcomp(set1,set2,params)
% Subfunction to compute all distances between two sets of points
dim = size(set1,2);
% can we take advantage of bsxfun?
% Note: in theory, there is no need to loop over the dimensions. We
% could Just let bsxfun do ALL the work, then wrap a sum around the
% outside. In practice, this tends to create large intermediate
% arrays, especially in higher numbers of dimensions. Its also when
% we might gain here by use of a vectorized code. This will only be
% a serious gain when the number of points is relatively small and
% the dimension is large.
if params.usebsxfun
% its a recent enough version of matlab that we can
% use bsxfun at all.
n1 = size(set1,1);
n2 = size(set2,1);
if (dim>1) && ((n1*n2*dim)<=params.ChunkSize)
% its a small enough problem that we might gain by full
% use of bsxfun
switch params.Metric
case 2
d = sum(bsxfun(@minus,reshape(set1,[n1,1,dim]),reshape(set2,[1,n2,dim])).^2,3);
case 1
d = sum(abs(bsxfun(@minus,reshape(set1,[n1,1,dim]),reshape(set2,[1,n2,dim]))),3);
case inf
d = max(abs(bsxfun(@minus,reshape(set1,[n1,1,dim]),reshape(set2,[1,n2,dim]))),[],3);
case 0
d = min(abs(bsxfun(@minus,reshape(set1,[n1,1,dim]),reshape(set2,[1,n2,dim]))),[],3);
end
else
% too big, so that the ChunkSize will have been exceeded, or just 1-d
if params.Metric == 2
d = bsxfun(@minus,set1(:,1),set2(:,1)').^2;
else
d = abs(bsxfun(@minus,set1(:,1),set2(:,1)'));
end
for i=2:dim
switch params.Metric
case 2
d = d + bsxfun(@minus,set1(:,i),set2(:,i)').^2;
case 1
d = d + abs(bsxfun(@minus,set1(:,i),set2(:,i)'));
case inf
d = max(d,abs(bsxfun(@minus,set1(:,i),set2(:,i)')));
case 0
d = min(d,abs(bsxfun(@minus,set1(:,i),set2(:,i)')));
end
end
end
else
% Cannot use bsxfun. Sigh. Do things the hard (and slower) way.
n1 = size(set1,1);
n2 = size(set2,1);
if params.Metric == 2
% Note: While some people might use a different Euclidean
% norm computation based on expanding the square of the
% difference of two numbers, that computation is inherantly
% inaccurate when implemented in floating point arithmetic.
% While it might be faster, I won't use it here. Sorry.
d = (repmat(set1(:,1),1,n2) - repmat(set2(:,1)',n1,1)).^2;
else
d = abs(repmat(set1(:,1),1,n2) - repmat(set2(:,1)',n1,1));
end
for i=2:dim
switch params.Metric
case 2
d = d + (repmat(set1(:,i),1,n2) - repmat(set2(:,i)',n1,1)).^2;
case 1
d = d + abs(repmat(set1(:,i),1,n2) - repmat(set2(:,i)',n1,1));
case inf
d = max(d,abs(repmat(set1(:,i),1,n2) - repmat(set2(:,i)',n1,1)));
case 0
d = min(d,abs(repmat(set1(:,i),1,n2) - repmat(set2(:,i)',n1,1)));
end
end
end
% if 2 norm, then we must sqrt at the end
if params.Metric==2
d = sqrt(d);
end
% ==============================================================
% end main ipdm
% begin included function - parse_pv_pairs
% ==============================================================
function params=parse_pv_pairs(params,pv_pairs)
% parse_pv_pairs: parses sets of property value pairs, allows defaults
% usage: params=parse_pv_pairs(default_params,pv_pairs)
%
% arguments: (input)
% default_params - structure, with one field for every potential
% property/value pair. Each field will contain the default
% value for that property. If no default is supplied for a
% given property, then that field must be empty.
%
% pv_array - cell array of property/value pairs.
% Case is ignored when comparing properties to the list
% of field names. Also, any unambiguous shortening of a
% field/property name is allowed.
%
% arguments: (output)
% params - parameter struct that reflects any updated property/value
% pairs in the pv_array.
%
% Example usage:
% First, set default values for the parameters. Assume we
% have four parameters that we wish to use optionally in
% the function examplefun.
%
% - 'viscosity', which will have a default value of 1
% - 'volume', which will default to 1
% - 'pie' - which will have default value 3.141592653589793
% - 'description' - a text field, left empty by default
%
% The first argument to examplefun is one which will always be
% supplied.
%
% function examplefun(dummyarg1,varargin)
% params.Viscosity = 1;
% params.Volume = 1;
% params.Pie = 3.141592653589793
%
% params.Description = '';
% params=parse_pv_pairs(params,varargin);
% params
%
% Use examplefun, overriding the defaults for 'pie', 'viscosity'
% and 'description'. The 'volume' parameter is left at its default.
%
% examplefun(rand(10),'vis',10,'pie',3,'Description','Hello world')
%
% params =
% Viscosity: 10
% Volume: 1
% Pie: 3
% Description: 'Hello world'
%
% Note that capitalization was ignored, and the property 'viscosity'
% was truncated as supplied. Also note that the order the pairs were
% supplied was arbitrary.
npv = length(pv_pairs);
n = npv/2;
if n~=floor(n)
error 'Property/value pairs must come in PAIRS.'
end
if n<=0
% just return the defaults
return
end
if ~isstruct(params)
error 'No structure for defaults was supplied'
end
% there was at least one pv pair. process any supplied
propnames = fieldnames(params);
lpropnames = lower(propnames);
for i=1:n
p_i = lower(pv_pairs{2*i-1});
v_i = pv_pairs{2*i};
ind = strmatch(p_i,lpropnames,'exact');
if isempty(ind)
ind = find(strncmp(p_i,lpropnames,length(p_i)));
if isempty(ind)
error(['No matching property found for: ',pv_pairs{2*i-1}])
elseif length(ind)>1
error(['Ambiguous property name: ',pv_pairs{2*i-1}])
end
end
p_i = propnames{ind};
% override the corresponding default in params.
% Use setfield for comptability issues with older releases.
params = setfield(params,p_i,v_i); %#ok
end
|
github
|
atcollab/at-master
|
atreadbeta.m
|
.m
|
at-master/atmat/pubtools/lattice_tools/atreadbeta.m
| 7,602 |
utf_8
|
b82299ba84340d21116de4990fde8cef
|
function [superp,periods]=atreadbeta(filename,cavipass,bendpass,quadpass)
%ATREADBETA reads a BETA file
%
%ring=ATREADBETA(fname,cavipass,bendpass,quadpass,multipass)
%
%FILENAME: BETA file
%CAVIPASS: pass method for cavities (default IdentityPass)
%BENDPASS: pass method for dipoles (default BndMPoleSymplectic4Pass)
%QUADPASS: pass method for quadrupoles (default StrMPoleSymplectic4Pass)
%MULTIPASS: pass method for sextupoles (default StrMPoleSymplectic4Pass)
%
%[superp,periods]=ATREADBETA(fname,cavipass,bendpass,quadpass)
% returns only one superperiod and the number of superperiods
%
%See also: ATX, ATLINOPT, ATMODUL
global GLOBVAL
persistent fpath
if isempty(fpath), fpath=getenv('DBETA'); end
if nargin < 5, multipass='StrMPoleSymplectic4Pass'; end
if nargin < 4, quadpass='StrMPoleSymplectic4Pass'; end
if nargin < 3, bendpass='BndMPoleSymplectic4Pass'; end
if nargin < 2, cavipass='IdentityPass'; end
if nargin < 1, filename=''; end
if isempty(filename)
[fname,fpath]=uigetfile('*.str','BETA structure',[fpath filesep]);
if ~ischar(fname), error('ReadBeta:NoFile','No file selected'); end
filename=fullfile(fpath,fname);
end
fid=fopen(filename,'rt');
if fid < 0
error('ReadBeta:couldNotReadFile','Unable to read file ''%s''. No such file',filename);
end
betadelim(fid,'LIST OF ELEMENTS');
GLOBVAL.E0=1E9;
line=fgetl(fid);
nb_elems=sscanf(line,'%d');
elemtable=struct();
cavilist={};
for el=1:nb_elems
nextelem=readelem(fid,cavipass,bendpass,quadpass,multipass);
try
elemtable.(nextelem.FamName)=nextelem;
catch %#ok<CTCH>
nextelem.FamName=['x' nextelem.FamName];
elemtable.(nextelem.FamName)=nextelem;
end
if isfield(nextelem,'Class') && strcmp(nextelem.Class,'RFCavity')
cavilist{end+1}=nextelem; %#ok<AGROW>
end
end
if isempty(cavilist)% Prepare a default cavity
cavilist{1}=atrfcavity('RFCAV',0,0,0,1,GLOBVAL.E0,cavipass);
end
eledict=fieldnames(elemtable);
disp(['Elements processed (' num2str(nb_elems) ' elements)']);
betadelim(fid,'STRUCTURE');
line=fgetl(fid);
nb_stru=sscanf(line,'%d');
superp=cell(nb_stru,1);
dipelem=[]; % Pending dipole element (waiting for exit face)
anglein=0; % Current dipole face angles
angleout=0;
lff=0; % Current dipole fringe field extension
displ=zeros(1,3); % Current misalignment vector
srot=0; % Current element rotation
id_stru=0;
for el=1:nb_stru
elcode=fscanf(fid,'%s',1); % Select element in the table
try
elnum=str2double(elcode);
if isfinite(elnum)
elcode=eledict{elnum};
end
nextelem=elemtable.(elcode);
catch %#ok<CTCH>
error('ReadBeta:BadElem',['Cannot identify element ' elcode]);
end
switch nextelem.BetaCode % Process the element
case 'CO'
if isempty(dipelem) % Entrance face
anglein=nextelem.Angle;
lff=nextelem.Lff;
else
angleout=nextelem.Angle; % Exit face
id_stru=id_stru+1; % create immediately in case of 2 adjacent CO elems
superp{id_stru}=atelem(dipelem,'EntranceAngle',anglein,...
'ExitAngle',angleout,'FullGap',0,'FringeInt',lff);
anglein=0;
angleout=0;
lff=0;
dipelem=[];
end
case 'RO'
srot=srot+nextelem.Srot;
case 'DE'
displ=displ+nextelem.Displacement;
otherwise
if ~isempty(dipelem)
id_stru=id_stru+1;
superp{id_stru}=atelem(dipelem,'EntranceAngle',anglein,...
'ExitAngle',angleout,'FullGap',0,'FringeInt',lff);
anglein=0;
angleout=0;
lff=0;
dipelem=[];
end
if srot ~= 0
srollmat=mkSRotationMatrix(srot);
nextelem.R1=srollmat;
nextelem.R2=srollmat';
end
if max(abs(displ)) > 0
nextelem.T1([1 3 5])=displ;
nextelem.T2([1 3 5])=-displ;
end
if strcmp(nextelem.BetaCode,'DI')
dipelem=nextelem;
else
id_stru=id_stru+1;
superp{id_stru}=nextelem;
end
end
end
superp(id_stru+1:end)=[];
disp(['Structure processed (' num2str(nb_stru) ' elements)']);
nper=fscanf(fid,'%d',1);
fclose(fid);
cavities=find(atgetcells(superp,'Class','RFCavity'));
if isempty(cavities) % add implicit cavity if necessary
superp{end+1}=cavilist{1};
cavities=length(superp);
end
superp=atsetfieldvalues(superp,'Energy',GLOBVAL.E0);
if nargout >= 2
periods=nper;
for i=cavities' % set cavity frequency
superp{i}=tunecavity(superp{i},...
findspos(superp,id_stru+1),1,nper);
end
else
for i=cavities' % set cavity frequency
superp{i}=tunecavity(superp{i},...
findspos(superp,id_stru+1),nper,nper);
end
if nper > 1
superp=repmat(superp,1,nper);
end
end
evalin('base','global GLOBVAL');
function cav=tunecavity(cav,clength,ncell,ntot)
frev=PhysConstant.speed_of_light_in_vacuum.value/clength;
if cav.HarmNumber > 1
harm=ceil(cav.HarmNumber/ntot);
else
harm=round(1.66678*clength);
end
cav.Frequency=frev*harm;
cav.HarmNumber=ncell*harm;
function newelem=readelem(fid,cavipass,bendpass,quadpass,multipass)
global GLOBVAL
line=fgetl(fid);
next=1;
[elname,count,errmess,nl]=sscanf(line(next:end),'%s',1); %#ok<ASGLU>
next=next+nl;
[code,count,errmess,nl]=sscanf(line(next:end),'%s',1); %#ok<ASGLU>
next=next+nl;
params=sscanf(line(next:end),'%f')';
params((length(params)+1):3)=0;
switch (code)
case 'SD'
newelem=atdrift(elname,params(1));
case 'QP'
newelem=atquadrupole(elname,params(1),params(2),quadpass);
case 'DE'
newelem=atelem(atmarker(elname),'Displacement',params(1:3));
case 'RO'
newelem=atelem(atmarker(elname),'Srot',params(1));
case 'CO'
newelem=atelem(atmarker(elname),'Angle',params(1),'Lff',params(3));
case 'DI'
strength=-params(3)/params(2)/params(2);
newelem=atsbend(elname,params(1)*params(2),params(1),strength,bendpass);
case 'SX'
if params(1) < 0.001
code='LD3';
newelem=atthinmultipole(elname,[],[0 0 params(1)*params(2)]);
else
newelem=atsextupole(elname,params(1),params(2),multipass);
end
case 'LD'
order=params(2)/2;
polb=[];
polb(1,order)=params(1);
code=[code int2str(order)];
newelem=atthinmultipole(elname,[],polb);
case 'LT'
order=params(2)/2;
pola=[];
pola(1,order)=params(1);
code=[code int2str(order)];
newelem=atthinmultipole(elname,pola,[]);
case 'PU'
newelem=atmonitor(elname);
case 'KI'
if params(3) > 0, code='CHV'; end
newelem=atcorrector(elname,0,[params(1) params(2)],'IdentityPass');
case 'CA'
GLOBVAL.E0=params(3);
newelem=atrfcavity(elname,0,abs(params(1)),0,params(2),params(3),cavipass);
otherwise
newelem=atmarker(elname);
end
newelem.BetaCode=code;
function betadelim(fid,code)
while true
while true
line=fgetl(fid);
if ~ischar(line), error('ReadBeta:EndOfFile','Encountered unexpected end of file.'); end
if ~isempty(strfind(line,'***')), break; end
end
if ~isempty(strfind(line,code)), break; end
end
|
github
|
atcollab/at-master
|
intlat.m
|
.m
|
at-master/atmat/atgui/intlat.m
| 12,947 |
utf_8
|
4ae7078bdd2fb416bd61740364f2fee9
|
function intlat(varargin)
%INTLAT Interactive AT lattice editor
% INTLAT(DIRECTION)
% Direction is the initial angle[rad] of the orbit with respect
% to the plot axis
global THERING
if nargin < 1 | isnumeric(varargin{1})
if nargin == 1
STARTANGLE = varargin{1};
else
STARTANGLE = 0;
end
% LAUNCH GUI
fig = openfig(mfilename,'reuse');
set(fig,'ToolBar','none','HandleVisibility','callback');
handles = guihandles(fig);
guidata(fig,handles);
NumElements = length(THERING);
AllFamNames = getcellstruct(THERING,'FamName',1:NumElements);
NumFamilies = 0;
FamNames = {};
for i=1:NumElements
if ~any(strcmp(FamNames,THERING{i}.FamName))
NumFamilies = NumFamilies + 1;
FamNames{NumFamilies} = THERING{i}.FamName;
end
end
FamNames = sort(FamNames);
[Families(1:NumFamilies).FamName] = deal(FamNames{:});
set(handles.FamilyPMenu,'String',FamNames);
set(handles.IconTypePMenu,'String',{'line','rectangle','o','x'});
[x2d, y2d, a2d] = Survey2D(THERING,STARTANGLE);
XScale=max(x2d)-min(x2d);
YScale=max(y2d)-min(y2d);
set(handles.Axes,'DataAspectRatioMode','manual', ...
'DataAspectRatio',[1 1 1],...
'PlotBoxAspectRatioMode','manual', ...
'PlotBoxAspectRatio',[XScale YScale 1]);
FamNumbers = zeros(1,NumElements);
for i =1:NumFamilies
Families(i).KidsList = find(strcmp(Families(i).FamName, AllFamNames));
Families(i).Display = 1;
Families(i).Color = [0 0 0];
Families(i).IconType = 'line';
Families(i).FieldsList = fieldnames(THERING{Families(i).KidsList(1)});
Families(i).SelectedFields = [find(strcmp(Families(i).FieldsList,'FamName')),...
find(strcmp(Families(i).FieldsList,'Length')),...
find(strcmp(Families(i).FieldsList,'PassMethod'))];
Families(i).IconWidth = 0;
FamNumbers(Families(i).KidsList)=i;
end
Elements = struct('FamIndex',num2cell(FamNumbers),'IconHandle',0);
Families = SetDefaultIcons(THERING,Families,Elements);
setappdata(fig,'Families',Families);
setappdata(fig,'Elements',Elements);
setappdata(fig,'X2D',x2d);
setappdata(fig,'Y2D',y2d);
setappdata(fig,'A2D',a2d);
ShowFamilyDisplayMode(handles.FamilyPMenu, [], handles);
% Plot all elements
PlotElements(handles,1:NumElements);
elseif ischar(varargin{1}) % INVOKE NAMED SUBFUNCTION OR CALLBACK
try
if (nargout)
[varargout{1:nargout}] = feval(varargin{:}); % FEVAL switchyard
else
feval(varargin{:}); % FEVAL switchyard
end
catch
disp(lasterr);
end
end
function f = SetDefaultIcons(Lattice,Families,Elements)
L = findspos(Lattice,length(Lattice)+1);
f = Families;
% Make default icons for elements of different physical types
for i=1:length(Families)
Elem = Lattice{Families(i).KidsList(1)};
% make icons for bending magnets
if isfield(Elem,'BendingAngle') & Elem.BendingAngle
f(i).Display = 1;
f(i).Color = [1 1 0];
f(i).IconType = 'rectangle';
f(i).IconWidth = L/300;
% Quadrupoles
elseif isfield(Elem,'K') & Elem.K
if Elem.K > 0 % focusing
f(i).Display = 1;
f(i).Color = [1 0 0];
f(i).IconType = 'rectangle';
f(i).IconWidth = L/400;
else
f(i).Display = 1;
f(i).Color = [0 0 1];
f(i).IconType = 'rectangle';
f(i).IconWidth = L/400;
end
elseif isfield(Elem,'PolynomB') & length(Elem.PolynomB)>2 & Elem.PolynomB(3)
if Elem.PolynomB(3)>0 % focusing sextupole
f(i).Display = 1;
f(i).Color = [1 0 1];
f(i).IconType = 'rectangle';
f(i).IconWidth = L/500;
else
f(i).Display = 1;
f(i).Color = [0 1 0];
f(i).IconType = 'rectangle';
f(i).IconWidth = L/500;
end
elseif isfield(Elem,'Frequency') & isfield(Elem,'Voltage') % RF cavity
f(i).Display = 1;
f(i).Color = [1 0.5 0];
f(i).IconType = 'o';
f(i).IconWidth = 0;
end
end
% --------------------------------------------------------------------
function PlotElements(GUIhandles,INDEX)
% Retrieve application data
% Temporarily set handle visibility to 'on' for drawing elements
set(GUIhandles.IntlatMainFigure,'HandleVisibility','on');
figure(GUIhandles.IntlatMainFigure);
Families = getappdata(GUIhandles.IntlatMainFigure,'Families');
Elements = getappdata(GUIhandles.IntlatMainFigure,'Elements');
x2d = getappdata(GUIhandles.IntlatMainFigure,'X2D');
y2d = getappdata(GUIhandles.IntlatMainFigure,'Y2D');
a2d = getappdata(GUIhandles.IntlatMainFigure,'A2D');
xcorners = [-1 -1 1 1];
ycorners = [ 1 1 -1 -1];
for i=[INDEX(:)]'
FamIndex = Elements(i).FamIndex;
% If Icon already exists
if Elements(i).IconHandle
delete(Elements(i).IconHandle);
Elements(i).IconHandle = 0;
end
if Families(FamIndex).Display
switch Families(FamIndex).IconType
case 'rectangle'
% compute vertex coordinates
IconWidth = Families(FamIndex).IconWidth;
vx = [ x2d(i), x2d(i+1), x2d(i+1), x2d(i)] + IconWidth*xcorners*sin((a2d(i)+a2d(i+1))/2);
vy = [ y2d(i), y2d(i+1), y2d(i+1), y2d(i)] + IconWidth*ycorners*cos((a2d(i)+a2d(i+1))/2);
Elements(i).IconHandle = patch(vx,vy,Families(FamIndex).Color);
case 'line'
Elements(i).IconHandle = line([x2d(i) x2d(i+1)],[y2d(i) y2d(i+1)]);
set(Elements(i).IconHandle,'Color',Families(FamIndex).Color);
case 'o'
Elements(i).IconHandle = line([x2d(i) x2d(i+1)],[y2d(i) y2d(i+1)]);
set(Elements(i).IconHandle,'Color',Families(FamIndex).Color,...
'Marker','o','MarkerFaceColor',Families(FamIndex).Color);
case 'x'
Elements(i).IconHandle = line([x2d(i) x2d(i+1)],[y2d(i) y2d(i+1)]);
set(Elements(i).IconHandle,'Color',Families(FamIndex).Color,...
'Marker','x');
end
% Assign Callback
set(Elements(i).IconHandle,'UserData',i,...
'ButtonDownFcn','intlat(''ElementCallback'',gcbo)');
end
end
setappdata(GUIhandles.IntlatMainFigure,'Elements',Elements);
set(GUIhandles.IntlatMainFigure,'HandleVisibility','callback');
function ShowFamilyDisplayMode(h, eventdata, handles, varargin)
Families = getappdata(handles.IntlatMainFigure,'Families');
FNum = get(h,'Value');
set(handles.DisplayCheckBox,'Value',Families(FNum).Display);
set(handles.ColorSelectionBox,'BackgroundColor',Families(FNum).Color);
set(handles.FieldListBox,'String',Families(FNum).FieldsList);
set(handles.FieldListBox,'Value',Families(FNum).SelectedFields);
set(handles.WidthEditBox,'String',num2str(Families(FNum).IconWidth));
PossibleIconTypes = get(handles.IconTypePMenu,'String');
set(handles.IconTypePMenu,'value',...
find(strcmp(Families(FNum).IconType,PossibleIconTypes)));
if Families(FNum).Display
Visible = 'on';
else
Visible = 'off';
end
set(handles.IconTypePMenu,'Visible',Visible);
set(handles.ColorSelectionBox,'Visible',Visible);
set(handles.WidthEditBox,'Visible',Visible);
set(handles.FieldListBox,'Visible',Visible);
set(handles.DisplayIconLabel,'Visible',Visible);
set(handles.IconColorLabel,'Visible',Visible);
set(handles.IconWidthLabel,'Visible',Visible);
set(handles.FieldsLabel,'Visible',Visible);
function SelectColor(h, eventdata, handles, varargin)
Families = getappdata(handles.IntlatMainFigure,'Families');
FNum = get(handles.FamilyPMenu,'Value');
NewColor = uisetcolor(['Select icon color for ',Families(FNum).FamName,' family']);
set(handles.ColorSelectionBox,'BackgroundColor',NewColor);
Families(FNum).Color = NewColor;
setappdata(handles.IntlatMainFigure,'Families',Families);
PlotElements(handles,Families(FNum).KidsList);
function SetIconType(h, eventdata, handles, varargin)
Families = getappdata(handles.IntlatMainFigure,'Families');
FNum = get(handles.FamilyPMenu,'Value');
PossibleIconTypes = get(handles.IconTypePMenu,'String');
NewIconType = PossibleIconTypes{get(h,'Value')};
Families(FNum).IconType = NewIconType;
setappdata(handles.IntlatMainFigure,'Families',Families);
PlotElements(handles,Families(FNum).KidsList);
function SetDisplay(h, eventdata, handles, varargin)
Families = getappdata(handles.IntlatMainFigure,'Families');
FNum = get(handles.FamilyPMenu,'Value');
Families(FNum).Display = get(h,'Value');
if Families(FNum).Display
Visible = 'on';
else
Visible = 'off';
end
set(handles.IconTypePMenu,'Visible',Visible);
set(handles.ColorSelectionBox,'Visible',Visible);
set(handles.WidthEditBox,'Visible',Visible);
set(handles.FieldListBox,'Visible',Visible);
set(handles.DisplayIconLabel,'Visible',Visible);
set(handles.IconColorLabel,'Visible',Visible);
set(handles.IconWidthLabel,'Visible',Visible);
set(handles.FieldsLabel,'Visible',Visible);
setappdata(handles.IntlatMainFigure,'Families',Families);
PlotElements(handles,Families(FNum).KidsList);
% --------------------------------------------------------------------
function SelectFieldsFromList(h, eventdata, handles, varargin)
Families = getappdata(handles.IntlatMainFigure,'Families');
FNum = get(handles.FamilyPMenu,'Value');
Families(FNum).SelectedFields = get(h,'Value');
setappdata(handles.IntlatMainFigure,'Families',Families);
function SetIconWidth(h, eventdata, handles, varargin)
Families = getappdata(handles.IntlatMainFigure,'Families');
FNum = get(handles.FamilyPMenu,'Value');
NewIconWidth = str2double(get(h,'String'));
Families(FNum).IconWidth = NewIconWidth;
setappdata(handles.IntlatMainFigure,'Families',Families);
PlotElements(handles,Families(FNum).KidsList);
% --------------------------------------------------------------------
function varargout = ElementCallback(h)
index = get(h,'UserData');
handles = guidata(gcbo);
Families = getappdata(handles.IntlatMainFigure,'Families');
Elements = getappdata(handles.IntlatMainFigure,'Elements');
FamIndex= Elements(index).FamIndex;
FieldsList = Families(FamIndex).FieldsList;
Fields2Edit = FieldsList(Families(FamIndex).SelectedFields);
intelem(index,Fields2Edit);
% --------------------------------------------------------------------
function varargout = ColorSelection_Callback(h, eventdata, handles, varargin)
% --------------------------------------------------------------------
function varargout = listbox1_Callback(h, eventdata, handles, varargin)
% --------------------------------------------------------------------
function varargout = Untitled_1_Callback(h, eventdata, handles, varargin)
% --------------------------------------------------------------------
function varargout = ZoomButtonCallback(h, eventdata, handles, varargin)
% --------------------------------------------------------------------
function [x2d, y2d, a2d] = Survey2D(LATTICE,STARTANGLE)
% Determine 2-d geometry of the LATTICE
NumElements = length(LATTICE);
x2d = zeros(1,NumElements+1);
y2d = zeros(1,NumElements+1);
a2d = zeros(1,NumElements+1); % angle of orbit in radians
a2d(1) = STARTANGLE;
for en = 1:NumElements-1
if isfield(LATTICE{en},'BendingAngle')
ba = LATTICE{en}.BendingAngle(1); % bending angle in radians
else
ba = 0;
end
if ba == 0
Lt = LATTICE{en}.Length;
Lp = 0;
else
Lt = LATTICE{en}.Length*sin(ba)/ba;
Lp = -LATTICE{en}.Length*(1-cos(ba))/ba;
end
x2d(en+1) = x2d(en) + Lt*cos(a2d(en)) - Lp*sin(a2d(en));
y2d(en+1) = y2d(en) + Lt*sin(a2d(en)) + Lp*cos(a2d(en));
a2d(en+1)=a2d(en) - ba;
end
x2d(NumElements+1) = x2d(1);
y2d(NumElements+1) = y2d(1);
a2d(NumElements+1) = a2d(1);
X0 = (max(x2d)+min(x2d))/2;
Y0 = (max(y2d)+min(y2d))/2;
x2d = x2d - X0;
y2d = y2d - Y0;
|
github
|
atcollab/at-master
|
ataddmpoleerrors.m
|
.m
|
at-master/atmat/lattice/ataddmpoleerrors.m
| 2,135 |
utf_8
|
fa44b965ae74e661bb1a4b2edcfca94e
|
function newring = ataddmpoleerrors(ring,type,newindex,strength,radius,randflag)
%ataddrandmpole adds a random multipole component to all elements of type
%'type' where type can be 'dipole', 'quadrupole', or 'sextupole'
%
%[newring] = ATRANDMPOLE(ring,type,newindex,strength,radius)
%
%ring = input ring
%type = 'dipole', 'quadrupole' or 'sextupole'
%newindex: index of Multipole to add
%strength: strength of the multipole component at the given radius
%radius: reference radius for defining the absolute strength
%if randflag is set to 1, then the errors will be random, Gaussian
%distributed
% The formula for the added errors is
% B^(N)_(n) = radius^(n-N)*b_n/b_N
% It represents the relative field error to the design value at the ref.
% radius
%For example, to add a random octupole error of 1e-4 at 25 mm, relative to all
%quadrupoles:
% newring =ataddrandmpole(ring,'quadrupole',4,.1e-4,.025,1);
%
%See also: ataddmpolecomppoly attiltelem atshiftelem
% first find all elements of the given type.
% then run through them, and
% create a new element with new polynom, using ataddmpolecomppoly to make the new
% PolyNom with a random strength scaled by strength and
% atelem to make the element. Now replace the old element with the new.
if (strcmp(type,'dipole'))
elemindex0=finddipoles(ring);
elemindex=find(elemindex0);
refindex=1;
else
if (strcmp(type,'quadrupole'))
elemindex0=findquadrupoles(ring);
elemindex=find(elemindex0);
refindex=2;
else
elemindex=[];
end
end
newring=ring;
for j=1:length(elemindex)
elmnt=ring{elemindex(j)};
polyB = elmnt.PolynomB;
if(randflag)
strength = strength*randn;
end
polyB2 = ataddmpolecomppoly(polyB,refindex,newindex,strength,radius);
elmnt.PolynomB=polyB2;
elmnt.MaxOrder=length(polyB2);
newring{elemindex(j)}=elmnt;
end
function quads = findquadrupoles(ring)
dipoles = finddipoles(ring);
isquadrupole=@(elem,polyb) length(polyb) >= 2 && polyb(2)~=0;
quads=atgetcells(ring,'PolynomB',isquadrupole) & ~dipoles;
function dipoles = finddipoles(ring)
isdipole=@(elem,bangle) bangle~=0;
dipoles=atgetcells(ring,'BendingAngle',isdipole);
|
github
|
atcollab/at-master
|
readmad.m
|
.m
|
at-master/atmat/lattice/Converters/readmad.m
| 7,722 |
utf_8
|
8357572071d29729e9cebd24c41b84da
|
function ATLATTICE = readmad(FILENAME)
%READMAD reads the file output of MAD commands
% TWISS, STRUCTURE, SURVEY.
%
% ATLATTICE = readmad(FILENAME)
%
% READMAD reads the MAD file header to determine the number of elements
% in the lattice, symmetry flag, the number of supperperiods etc.
%
% Then it interprets the entry for each element in the MAD output file.
% The topology of the lattice is completely determined by
% Length, Bending Angle, and Ttilt Angle in each element
%
% READMAD uses MAD TYPES and the values of to determine
% which pass-method function in AT to use.
%
% MAD TYPE | AT PassMethod
% ----------------------------------
% DRIFT | DriftPass
% SBEND | BendLinearPass, BndMPoleSymplectic4Pass
% QUADRUPOLE | QualdLinearPass
% SEXTUPOLE | StrMPoleSymplectic4Pass
% OCTUPOLE | StrMPoleSymplectic4Pass
% MULTIPOLE | !!! Not implemented, in future - ThinMPolePass
% RFCAVITY | RFCavityPass
% KICKER | CorrectorPass
% HKICKER | CorrectorPass
% VKICKER | CorrectorPass
% MONITOR | IdentityPass
% HMONITOR | IdentityPass
% VMONITOR | IdentityPass
% MARKER | IdentityPass
% -----------------------------------
% all others | Length=0 -> IdentityPass, Length~=0 -> DriftPass
[fid, errmsg] = fopen(FILENAME,'r');
if fid==-1
error('Could not open file');
end
warnlevel = warning;
warning on
global READMADCAVITYFLAG
READMADCAVITYFLAG = 0;
LINE1 = fgetl(fid);
LINE2 = fgetl(fid);
S = LINE1(9:16);
nonspaceindex = find(~isspace(S) & (S~=0));
MADFILETYPE = S(nonspaceindex);
% The possiblilites for MADFILETYPE are
% TWISS,SURVEY,STRUCTUR,ENVELOPE
NSUPER = str2double(LINE1(41:48));
S = LINE1(56);
SYMFLAG = eq(S,'T');
NPOS = str2double(LINE1(57:64));
disp(['MAD output file: ',FILENAME]);
disp(' ');
disp(['MAD file type: ',MADFILETYPE]);
disp(['Symmetry flag: ',num2str(SYMFLAG)]);
disp(['Number of superperiods: ',num2str(NSUPER)]);
disp(['Number of elements : ',num2str(NPOS)]);
disp(' ');
% Allocate cell array to store AT lattice
% MAD files heve one extra entry for the beginning of the lattice
ATNumElements = NPOS-1;
ATLATTICE = cell(1,ATNumElements);
switch MADFILETYPE
case {'STRUCTUR','SURVEY'}
NumLinesPerElement = 4;
case {'TWISS','CHROM'}
NumLinesPerElement = 5;
case 'ENVELOPE'
NumLinesPerElement = 8;
end
ELEMENTDATA = cell(1,NumLinesPerElement);
% Skip the INITIAL element in MAD file
for i = 1:NumLinesPerElement;
LINE = fgetl(fid);
end
for i = 1:ATNumElements
% Read the first 2 lines of the element entry
for j= 1:NumLinesPerElement
ELEMENTDATA{j}=fgetl(fid);
end
ATLATTICE{i}=mad2at(ELEMENTDATA,MADFILETYPE);
end
fclose(fid);
warning(warnlevel);
disp(' ');
disp(['AT cell array was successfully created from MAD output file ',FILENAME]);
disp('Some information may be not available in MAD otput files')
disp('Some elements may have to be further modified to be consistent with AT element models')
disp(' ');
disp('For RF cavities READMAD creates elements that use DriftPass or IdentityPass (if Length ==0)');
disp('Use CAVITYON(ENERGY) [eV] in order to turn them into cavities');
% ---------------------------------------------------------------------------
function atelement = mad2at(elementdata,madfiletype)
global READMADCAVITYFLAG
MADTYPE = elementdata{1}(1:4);
atelement.FamName = deblank(elementdata{1}(5:20));
atelement.Length = str2double(elementdata{1}(21:32));
% Type specific
switch MADTYPE
case 'DRIF'
atelement.PassMethod = 'DriftPass';
case {'MARK','MONI','HMON','VMON'}
atelement.PassMethod = 'IdentityPass';
case 'RFCA'
% Note MAD determines the RF frequency from the harmonic number HARMON
% defined by MAD stetement BEAM, and the total length of the closed orbit
if ~READMADCAVITYFLAG
warning('MAD lattice contains RF cavities')
READMADCAVITYFLAG = 1;
end
atelement.Frequency = 1e6*str2double(elementdata{2}(17:32)); % MAD uses MHz
atelement.Voltage = 1e6*str2double(elementdata{2}(33:48));
atelement.PhaseLag = str2double(elementdata{2}(49:64));
if atelement.Length
atelement.PassMethod = 'DriftPass';
else
atelement.PassMethod = 'IdentityPass';
end
case 'SBEN'
K1 = str2double(elementdata{1}(49:64));
K2 = str2double(elementdata{1}(65:80));
atelement.BendingAngle = str2double(elementdata{1}(33:48));
atelement.ByError = 0;
atelement.MaxOrder = 3;
atelement.NumIntSteps = 10;
atelement.TiltAngle = str2double(elementdata{2}(1:16));
atelement.EntranceAngle = str2double(elementdata{2}(17:32));
atelement.ExitAngle = str2double(elementdata{2}(33:48));
atelement.K = K1;
atelement.PolynomB = [0 K1 K2 0];
atelement.PolynomA = [0 0 0 0];
atelement.T1 = zeros(1,6);
atelement.T2 = zeros(1,6);
atelement.R1 = eye(6);
atelement.R2 = eye(6);
if atelement.BendingAngle
if K2
atelement.PassMethod = 'BndMPoleSymplectic4Pass';
else
atelement.PassMethod = 'BendLinearPass';
end
else
if K2
atelement.PassMethod = 'StrMPoleSymplectic4Pass';
elseif K1
atelement.PassMethod = 'QuadLinearPass';
else
atelement.PassMethod = 'DriftPass';
end
end
case 'QUAD'
K1 = str2double(elementdata{1}(49:64));
atelement.MaxOrder = 3;
atelement.NumIntSteps = 10;
atelement.K = K1;
atelement.PolynomB = [0 K1 0 0];
atelement.PolynomA = [0 0 0 0];
atelement.T1 = zeros(1,6);
atelement.T2 = zeros(1,6);
TILT = str2double(elementdata{2}(1:16));
atelement.R1 = mkSRotationMatrix(TILT);
atelement.R2 = mkSRotationMatrix(-TILT);
atelement.PassMethod = 'QuadLinearPass';
case 'SEXT'
% MAD multipole strength coefficients K(n) are defined without 1/n!
% Adjust to match AT
K2 = str2double(elementdata{1}(65:80))/2;
atelement.MaxOrder = 3;
atelement.NumIntSteps = 10;
atelement.PolynomB = [0 0 K2 0];
atelement.PolynomA = [0 0 0 0];
atelement.T1 = zeros(1,6);
atelement.T2 = zeros(1,6);
TILT = str2double(elementdata{2}(1:16));
atelement.R1 = mkSRotationMatrix(TILT);
atelement.R2 = mkSRotationMatrix(-TILT);
atelement.PassMethod = 'StrMPoleSymplectic4Pass';
case 'OCTU'
% MAD multipole strength coefficients K(n) are defined without 1/n!
% Adjust to match AT
K3 = str2double(elementdata{2}(17:32))/6;
atelement.MaxOrder = 3 ;
atelement.NumIntSteps = 10;
atelement.PolynomB = [0 0 0 K3];
atelement.PolynomA = [0 0 0 0];
atelement.T1 = zeros(1,6);
atelement.T2 = zeros(1,6);
TILT = str2double(elementdata{2}(1:16));
atelement.R1 = mkSRotationMatrix(TILT);
atelement.R2 = mkSRotationMatrix(-TILT);
atelement.PassMethod = 'StrMPoleSymplectic4Pass';
otherwise
if atelement.Length
atelement.PassMethod = 'DriftPass';
else
atelement.PassMethod = 'IdentityPass';
end
end
|
github
|
atcollab/at-master
|
atfrommadx.m
|
.m
|
at-master/atmat/lattice/Converters/MADX2AT/atfrommadx.m
| 27,333 |
utf_8
|
0e8d4629973f9993e9113666184071bc
|
function atfrommadx(seqfilemadX,E0,outfilename)
%function atfrommadx(seqfilemadX,E0,outfilename)
% tansform madX sequence file (savesequence) file into AT lattice structure.
%
% This procedure reads a saved lattice (sequence in madx) in madX
% and converts it to an AT lattice
%
% (madx comands to save the sequences :
%
% _______ MADX code _________
% use,period=sequencename1;
% use,period=sequencename2;
% use,period=sequencename2;
% SAVE,FILE='seqfilemadX.seq';
% ___________________________
%
% seqfilemadX.seq will contain sequencename1 sequencename2 sequencename3
% in the correct format in a single file
%
% )
%
% The routine outputs a Matlab macro with all the AT defitions and variables as
% in the madX file
%
% The order of the declarations is the same in the two files.
% declarations that contain other variables are moved to the end. (this may not be enough)
%
%
% Works also with single madX files not containing comands, only
% definitions.
%
% parameters:
% - seqfilemadX=name of the mad8 lattice file
% - E0 = design energy
% - outfilename (default: seqfilemadX_AT_LATTICE.mat)
%
% default pass methods:
% quadrupoles : StrMPoleSymplectic4Pass
% dipole : BndMPoleSymplectic4Pass
% multipole : StrMPoleSymplectic4Pass
% sextupole : StrMPoleSymplectic4Pass
% thinmultipole : ThinMPolePass
% correctors : ThinMPolePass
% cavity : DriftPass
%
%% changes history
% created 7-sep-2012 by S.M.Liuzzo @ ESRF
%
% updated 12-sep-2012 Cavity DriftPass (not IdentityPass)
% updated 13-sep-2012 Capital letter names
% CorrectorPass
% _red reduced versions
% updated 14-sep-2012 multipoles (1/n!) factor from madx to AT
% updated 21-dec-2012 rbend-sbend length conversion corrected
% updated 05-mar-2013 lattice output no fringes and with fringes (_FF).
%
% updated 20-mar-2013 removed output no fringes and with fringes (_FF).
% removed output reduced (_red).
% use atconstructors.
% call macro directly
% try-catch macro running
% tempname file and fulfile.
%% initial warnings
disp('important notice about conversion:')
disp('');
disp(['1) THE MADX FILE MUST BE OUTPUT OF save, file=seqfilemadX;'...
' THIS GARANTES AN APPROPRIATE FILE FORMAT']);
disp('');
disp(['2) THE MADX PROGRAM allows to use variable not previously defined.'...
' This is not known to AT!.'...
' If the program fails but generates a ..._macro.m file, '...
' please edit this file reordering the declarations and run it.']);
disp('');
disp(['3) If periodname is not specified' ...
' It is assumed the name of the file (up to the .) ' ...
'to be the sequence name that you want to use']);
%% open madX sequence file
sX=fopen(seqfilemadX,'r');
% the : detect definitions. every : is preceded by a name
% of an element.
% between two : all the parameter of an elemetn or a line ar found.
%% get madX file in a cell array with a new elment at every space comma or new line.
%SXCELL=textscan(sX,'%s','delimiter',';');
SXCELL=textscan(sX,'%s','CollectOutput',0,'delimiter','\n');
A=SXCELL{1};
B={};
iia=1;
iib=1;
B{1}=[];
while iia~=length(A)
if ~strcmp(A{iia}(end),';')
B{iib}=[B{iib} A{iia}]; % catenate until ; is found
else
B{iib}=[B{iib} A{iia}(1:end-1)]; % remove ;
iib=iib+1;
B{iib}=[];
end
iia=iia+1;
end
SXSTRING=B'; % still a cell array but every space or new line now is stored as a new cell.
% scroll and reshape to divide atributes
tmp={};
% for i=1:length(SXSTRING)
%
% waitbar(i/length(SXSTRING));
%
% SXSTRING{i}=strrep(SXSTRING{i},' ',''); % no spaces
% SXSTRING{i}=strrep(SXSTRING{i},':=','=');
% SXSTRING{i}=strrep(SXSTRING{i},';','');
%
% c=[1 sort([strfind(SXSTRING{i},',') strfind(SXSTRING{i},':')...
% strfind(SXSTRING{i},'=')]) length(SXSTRING{i})];
%
% % split sub strings
% for jc=1:length(c)-1
% if jc==1
% tmp=[tmp SXSTRING{i}(c(jc):c(jc+1))];
% else
% tmp=[tmp SXSTRING{i}(c(jc)+1:c(jc+1))];
% end
% end
%
% end
% SXSTRING=tmp;
tmp=cellfun(@(a)formatTextMADX(a),SXSTRING,'un',0);
SXSTRING=[tmp{:}];
%% open .m file to output matlab translated code
filemacroname=[tempname '.m'];
mafileout=fopen(filemacroname,'w+');
mst=['%% this is a macro that converts to AT the madX lattice: '...
seqfilemadX '\n%%\n%% Created: ' datestr(now)...
'\n%%\n%%\n\nglobal GLOBVAL;\nGLOBVAL.E0=' num2str(E0)...
';\n\n\n'];
def=['\n\n%%%% DEFINITIONS \n\n']; %#ok<*NBRAK>
lines=['\n\n%%%% LINES \n\n'];
var=['%%%% VARIABLES \n\n sxt_on=1;'];
formulas=['\n\n%%%% RELATIONS \n\n'];
%% convert to a matlab macro
j=1; % used in line block counter (element atributes counter)
h=waitbar(0,'Converting: ');
elemcount=0;
i=1; % skip header in mad8 file (element counter)
while i<length(SXSTRING)-2
if SXSTRING{i}(end)==':' % new element or line
def=[ def ...
]; %#ok<*AGROW> % end line and go to newline add name
SXSTRING{i}(1:end-1)=upper(SXSTRING{i}(1:end-1));
SXSTRING{i+j}=strrep(SXSTRING{i+j},'MARKER','MARKER,');
SXSTRING{i+j}=strrep(SXSTRING{i+j},'MARKER,,','MARKER,');
SXSTRING{i+j}=strrep(SXSTRING{i+j},'KICKER','KICKER,');
SXSTRING{i+j}=strrep(SXSTRING{i+j},'KICKER,,','KICKER,');
SXSTRING{i+j}=strrep(SXSTRING{i+j},'MONITOR','MONITOR,');
SXSTRING{i+j}=strrep(SXSTRING{i+j},'MONITOR,,','MONITOR,');
ElementType=SXSTRING{i+j}(1:end-1);
% display status of conversion
waitbar(i/(length(SXSTRING)-2),h,['Converting: ' ElementType]);
% THERING=[THERING ' ' SXSTRING{i+1}];
%j=2;
nwel=SXSTRING{i+j}(end);
switch ElementType
case {'quadrupole','QUADRUPOLE', 'QUADRUPO'}
def=[def '\n'];
def=[ def ...
SXSTRING{i}(1:end-1)...
'=atquadrupole('''...
SXSTRING{i}(1:end-1)...
''',0,0,''StrMPoleSymplectic4Pass'');\n'...
];
elemcount=elemcount+1;
while nwel~=':' % loops atributes of this element definition
def=ParseAtributesMADX_2_AT(def,SXSTRING{i}(1:end-1),SXSTRING{i+j},SXSTRING{i+j+1});
j=j+1; %go to new atribute
nwel=SXSTRING{i+j}(end);
end
case {'sextupole','SEXTUPOLE'}
def=[def '\n'];
def=[ def ...
SXSTRING{i}(1:end-1)...
'=atsextupole('''...
SXSTRING{i}(1:end-1)...
''',0,0,''StrMPoleSymplectic4Pass'');\n'...
];
elemcount=elemcount+1;
while nwel~=':' % loops atributes of this element definition
def=ParseAtributesMADX_2_AT(def,SXSTRING{i}(1:end-1),SXSTRING{i+j},SXSTRING{i+j+1});
j=j+1; %go to new atribute
nwel=SXSTRING{i+j}(end);
end
def=[ def ...
SXSTRING{i}(1:end-1) '.(''PolynomB'')=' SXSTRING{i}(1:end-1) '.(''PolynomB'')*1/2' ';\n' ...
];
case {'rbend','RBEND'}
def=[def '\n'];
def=[ def ...
SXSTRING{i}(1:end-1)...
'=atrbend('''...
SXSTRING{i}(1:end-1)...
''',0,0,0,''BndMPoleSymplectic4Pass'');\n'...
];
elemcount=elemcount+1;
while nwel~=':' % loops atributes of this element definition
def=ParseAtributesMADX_2_AT(def,SXSTRING{i}(1:end-1),SXSTRING{i+j},SXSTRING{i+j+1});
j=j+1; %go to new atribute
nwel=SXSTRING{i+j}(end);
end
% bendings are sector by default. change to rectangular
def=[ def ...
SXSTRING{i}(1:end-1) '.(''EntranceAngle'')=' SXSTRING{i}(1:end-1) '.(''EntranceAngle'')+' SXSTRING{i}(1:end-1) '.(''BendingAngle'')/2; \n'...
SXSTRING{i}(1:end-1) '.(''ExitAngle'')=' SXSTRING{i}(1:end-1) '.(''ExitAngle'')+' SXSTRING{i}(1:end-1) '.(''BendingAngle'')/2; \n'...
SXSTRING{i}(1:end-1) '.(''Length'')=' SXSTRING{i}(1:end-1)...
'.(''Length'')*(' SXSTRING{i}(1:end-1)...
'.(''BendingAngle'')/2)/sin(' SXSTRING{i}(1:end-1)...
'.(''BendingAngle'')/2); \n'...
SXSTRING{i}(1:end-1) '.(''MaxOrder'')=length(' SXSTRING{i}(1:end-1) '.(''PolynomB''))-1; \n'];
case {'sbend','SBEND'}
def=[def '\n'];
def=[ def ...
SXSTRING{i}(1:end-1)...
'=atsbend('...
'''' SXSTRING{i}(1:end-1)...
''',0,0,0,''BndMPoleSymplectic4Pass'');\n'...
];
elemcount=elemcount+1;
while nwel~=':' % loops atributes of this element definition
def=ParseAtributesMADX_2_AT(def,SXSTRING{i}(1:end-1),...
SXSTRING{i+j},SXSTRING{i+j+1});
j=j+1; %go to new atribute
nwel=SXSTRING{i+j}(end);
end
def=[ def SXSTRING{i}(1:end-1) '.(''MaxOrder'')=length(' SXSTRING{i}(1:end-1) '.(''PolynomB''))-1; \n'];
case {'DRIFT','drift'}
def=[def '\n'];
def=[ def ...
SXSTRING{i}(1:end-1)...
'=atdrift('...
'''' SXSTRING{i}(1:end-1)...
''',0,''DriftPass'');\n'...
];
elemcount=elemcount+1;
while nwel~=':' % loops atributes of this element definition
def=ParseAtributesMADX_2_AT(def,SXSTRING{i}(1:end-1),...
SXSTRING{i+j},SXSTRING{i+j+1});
j=j+1; %go to new atribute
nwel=SXSTRING{i+j}(end);
end
case {'RFCAVITY','rfcavity'}
def=[def '\n'];
def=[ def ...
SXSTRING{i}(1:end-1)...
'=atrfcavity(''' SXSTRING{i}(1:end-1)...
''',0,0,0,0,' num2str(E0) ...
',''DriftPass'');\n'...
];
elemcount=elemcount+1;
while nwel~=':' % loops atributes of this element definition
def=ParseAtributesMADX_2_AT(def,SXSTRING{i}(1:end-1),...
SXSTRING{i+j},SXSTRING{i+j+1});
j=j+1; %go to new atribute
nwel=SXSTRING{i+j}(end);
end
case {'MULTIPOLE','multipole'}
% multipoles should be StrMPoleSymplectic4Pass with short length
% to be compatible with MADX
def=[def '\n'];
def=[ def ...
SXSTRING{i}(1:end-1) '=atthinmultipole('...
'''' SXSTRING{i}(1:end-1) ''''...
',[0 0 0 0],[0 0 0 0],'...
'''ThinMPolePass'');\n'...
SXSTRING{i}(1:end-1) '.(''Length'')=0; \n'...
];
elemcount=elemcount+1;
% MADX--> ocf0: multipole,knl:={ 0, 0, 0,kocf0 };
while nwel~=':' % loops atributes of this element definition
multipoles=[];
if strcmp(SXSTRING{i+j+1}(1),'{') % if open paerntesis found
multipoles=[multipoles '[' SXSTRING{i+j+1}(2:end)];
k=2;
while ~strcmp(SXSTRING{i+j+k}(end),'}') && k<10 % look for closed parentesis
multipoles=[multipoles SXSTRING{i+j+k}];
k=k+1;
end
multipoles=[multipoles SXSTRING{i+j+k}(1:(end-1)) ']'];
end
if ~isempty(multipoles)
def=ParseAtributesMADX_2_AT(def,SXSTRING{i}(1:end-1),...
SXSTRING{i+j},multipoles);
else
def=ParseAtributesMADX_2_AT(def,SXSTRING{i}(1:end-1),...
SXSTRING{i+j},SXSTRING{i+j+1});
end
j=j+1; %go to new atribute
nwel=SXSTRING{i+j}(end);
end
def=[ def ...
SXSTRING{i}(1:end-1) '.(''Class'')=''Multipole''; \n'...% max order size polynomb -1
SXSTRING{i}(1:end-1) '.(''MaxOrder'')=numel(' ...
SXSTRING{i}(1:end-1) '.(''PolynomB'')' ')-1; \n'...% max order size polynomb -1
'expansionCoefFixA=1./factorial([1: numel(' ...
SXSTRING{i}(1:end-1) '.(''PolynomA''))]-1); \n'...
'expansionCoefFixB=1./factorial([1: numel(' ...
SXSTRING{i}(1:end-1) '.(''PolynomB''))]-1); \n'...
SXSTRING{i}(1:end-1) '.(''PolynomB'')=(' ...
SXSTRING{i}(1:end-1) '.(''PolynomB'')' ').*expansionCoefFixB; \n'...
SXSTRING{i}(1:end-1) '.(''PolynomA'')=(' ...
SXSTRING{i}(1:end-1) '.(''PolynomA'')' ').*expansionCoefFixA; \n'...
];
case {'OCTUPOLE','octupole'}
def=[def '\n'];
def=[ def ...
SXSTRING{i}(1:end-1) '=atmultipole('...
'''' SXSTRING{i}(1:end-1) ''''...
',0,[0 0 0 0],[0 0 0 0],'...
'''StrMPoleSymplectic4Pass'');\n'...
];
elemcount=elemcount+1;
while nwel~=':' % loops atributes of this element definition
def=ParseAtributesMADX_2_AT(def,SXSTRING{i}(1:end-1),...
SXSTRING{i+j},SXSTRING{i+j+1});
j=j+1; %go to new atribute
nwel=SXSTRING{i+j}(end);
end
% fix madX-AT multipole coefficents
def=[ def ...
SXSTRING{i}(1:end-1) '.(''Class'')=''Octupole''; \n'...% max order size polynomb -1
SXSTRING{i}(1:end-1) '.(''MaxOrder'')=numel(' ...
SXSTRING{i}(1:end-1) '.(''PolynomB'')' ')-1; \n'...% max order size polynomb -1
'expansionCoefFixA=1./factorial([1: numel(' ...
SXSTRING{i}(1:end-1) '.(''PolynomA''))]-1); \n'...
'expansionCoefFixB=1./factorial([1: numel(' ...
SXSTRING{i}(1:end-1) '.(''PolynomB''))]-1); \n'...
SXSTRING{i}(1:end-1) '.(''PolynomB'')=(' ....
SXSTRING{i}(1:end-1) '.(''PolynomB'')' ').*expansionCoefFixB; \n'...
SXSTRING{i}(1:end-1) '.(''PolynomA'')=(' ...
SXSTRING{i}(1:end-1) '.(''PolynomA'')' ').*expansionCoefFixA; \n'...
];
case {'SOLENOID','solenoid'}
def=[def '\n'];
def=[ def ...
SXSTRING{i}(1:end-1) '=atsolenoid('...
'''' SXSTRING{i}(1:end-1) ''''...
',0,0,'...
'''SolenoidLinearPass'');\n'...
];
elemcount=elemcount+1;
while nwel~=':' % loops atributes of this element definition
def=ParseAtributesMADX_2_AT(def,SXSTRING{i}(1:end-1),...
SXSTRING{i+j},SXSTRING{i+j+1});
j=j+1; %go to new atribute
nwel=SXSTRING{i+j}(end);
end
case {'MARKER','marker','marke'}
def=[def '\n'];
def=[ def ...
SXSTRING{i}(1:end-1) '=atmarker('...
'''' SXSTRING{i}(1:end-1) ''');\n'...
SXSTRING{i}(1:end-1) '.(''Length'')=0; \n'...
SXSTRING{i}(1:end-1) '.(''Class'')=''Marker''; \n'...
];
elemcount=elemcount+1;
while nwel~=':' % loops atributes of this element definition
def=ParseAtributesMADX_2_AT(def,SXSTRING{i}(1:end-1),...
SXSTRING{i+j},SXSTRING{i+j+1});
j=j+1; %go to new atribute
nwel=SXSTRING{i+j}(end);
end
case {'MATRIX','matrix','Matrix'}
def=[def '\n'];
def=[ def ...
SXSTRING{i}(1:end-1) '=atM66Tijk('...
'''' SXSTRING{i}(1:end-1) ''');\n'...
SXSTRING{i}(1:end-1) '.(''Length'')=0; \n'...
SXSTRING{i}(1:end-1) '.(''Tijk'')=zeros(6,6,6); \n'...
SXSTRING{i}(1:end-1) '.(''M66'')=eye(6,6); \n'...
SXSTRING{i}(1:end-1) '.(''Class'')=''MatrixTijk''; \n'...
];
elemcount=elemcount+1;
while nwel~=':' % loops atributes of this element definition
def=ParseAtributesMADX_2_AT(def,SXSTRING{i}(1:end-1),...
SXSTRING{i+j},SXSTRING{i+j+1});
j=j+1; %go to new atribute
nwel=SXSTRING{i+j}(end);
end
case {'Collimator','collimator','COLLIMATOR'}
def=[def '\n'];
def=[ def ...
SXSTRING{i}(1:end-1) '=atmarker('...
'''' SXSTRING{i}(1:end-1) ''');\n'...
SXSTRING{i}(1:end-1) '.(''Length'')=0; \n'...
SXSTRING{i}(1:end-1) '.(''Class'')=''Marker''; \n'...
];
elemcount=elemcount+1;
while nwel~=':' % loops atributes of this element definition
def=ParseAtributesMADX_2_AT(def,SXSTRING{i}(1:end-1),...
SXSTRING{i+j},SXSTRING{i+j+1});
j=j+1; %go to new atribute
nwel=SXSTRING{i+j}(end);
end
case {'MONITOR','monitor','monito'}
def=[def '\n'];
def=[ def ...
SXSTRING{i}(1:end-1) '.(''FamName'')=''' SXSTRING{i}(1:end-1) ''';\n' ...
SXSTRING{i}(1:end-1) '.(''Class'')=''Monitor''; \n'...
SXSTRING{i}(1:end-1) '.(''BetaCode'')=''PU''; \n'...
SXSTRING{i}(1:end-1) '.(''PassMethod'')=''IdentityPass''; \n'...
SXSTRING{i}(1:end-1) '.(''MaxOrder'')=1; \n'...
SXSTRING{i}(1:end-1) '.(''NumIntSteps'')=2; \n'...
SXSTRING{i}(1:end-1) '.(''Length'')=0; \n'...
SXSTRING{i}(1:end-1) '.(''Energy'')=' num2str(E0) '; \n'...
SXSTRING{i}(1:end-1) '.(''PolynomB'')=zeros(1,4); \n'...
SXSTRING{i}(1:end-1) '.(''PolynomA'')=zeros(1,4); \n'...
];
elemcount=elemcount+1;
while nwel~=':' % loops atributes of this element definition
def=ParseAtributesMADX_2_AT(def,SXSTRING{i}(1:end-1),...
SXSTRING{i+j},SXSTRING{i+j+1});
j=j+1; %go to new atribute
nwel=SXSTRING{i+j}(end);
end
case {'KICKER','hkicker','vkicker','kicker'}
def=[def '\n'];
def=[ def ...
SXSTRING{i}(1:end-1) '=atmultipole('...
'''' SXSTRING{i}(1:end-1) ''''...
',0,[0 0 0 0],[0 0 0 0],'...
'''StrMPoleSymplectic4Pass'');\n'...
];
def=[ def ...
SXSTRING{i}(1:end-1) '.(''MaxOrder'')=1; \n'...
SXSTRING{i}(1:end-1) '.(''Energy'')=' num2str(E0) '; \n'...
];
elemcount=elemcount+1;
while nwel~=':' % loops atributes of this element definition
def=ParseAtributesMADX_2_AT(def,SXSTRING{i}(1:end-1),...
SXSTRING{i+j},SXSTRING{i+j+1});
j=j+1; %go to new atribute
nwel=SXSTRING{i+j}(end);
end
case {'CONSTAN','constant'}
% disp('constant')
var=[var SXSTRING{i}(1:end-1) '=' SXSTRING{i+3} '; \n '];
j=j+3;
case {'sequence'}
% next element is the length
seqname=SXSTRING{i}(1:end-1);
l=SXSTRING{i+j+2};
j=j+2+1;
waitbar(i/(length(SXSTRING)-2),h,['Convert sequence: ' seqname ' ']);
elname=SXSTRING{i+j};
at=SXSTRING{i+j+1};
lines=[lines seqname '={'];
sposstring=['\n spos=['];
%zero=0;
%driftcounter=1;
while strcmp(at,'at=') && ~strcmp(elname,'endsequence')
elname=SXSTRING{i+j};
% WARNING if fails here add RETURN after madx
% ...
% endsequence;
% RETURN
% -----eof------
if ~strcmp(elname,'endsequence')&& ~isempty(elname)
elname=strrep(elname,',','');
at=SXSTRING{i+j+1};
spos=SXSTRING{i+j+2};
lines=[lines upper(elname) ';...\n'];
sposstring=[sposstring ',...\n ' num2str(spos)];
j=j+3;
else
j=j+1;
end
end
lines=[lines '};\n'];
sposstring=[sposstring '];\n'];
sposstring(10)=[]; % remove extra comma
% call function that builds sequence from list of elements
% and total length of sequence
lines=[lines sposstring '\n %% BUILD LATTICE \n'...
seqname '=buildATLattice(' seqname ',spos,' l ');\n'...
'ind=atgetcells(' seqname ',''Class'',''Drift'',''Bend'',''Quadrupole'',''Sextupole'',''Multipole'');\n'...
seqname '=atsetfieldvalues(' seqname ',ind,''NumIntSteps'',20);\n'...
'ind=atgetcells(' seqname ',''PolynomB'');\n'...
'PbL=cellfun(@(a)length(a.PolynomB),' seqname '(ind));\n'...
seqname '=atsetfieldvalues(' seqname ',ind,''MaxOrder'',PbL-1);\n'...
'ind = find(atgetcells(' seqname ',''Tilt''))'';\n'...
'T=atgetfieldvalues(' seqname ',ind,''Tilt'');\n'...
seqname '= atsettilt(' seqname ',ind,T);\n'...
... seqname '_red=atreduce(' seqname ');\n'...
... seqname '_FF=FringeSwitch(' seqname ',1);\n'...
];
otherwise
disp(['Unknown element type: ' ElementType])
end
else % variable declaration??
if SXSTRING{i}(1)~='!';
% in mad8 declaring a variable before using it is not compulsary.
% check that all definitions are at the begining.
%if sum(ismember('ABCDFGHILMNOPQRSTUVZWYK',SXSTRING{i+2}))>0
if sum(ismember('()/*+-',SXSTRING{i+1}))>0
% if letters (but E for exponential) then it is a formula
formulas=[formulas SXSTRING{i} ' ' SXSTRING{i+1} '; \n '];
else
var=[var SXSTRING{i} ' ' SXSTRING{i+1} '; \n '];
end
j=2;
end
end % if new element
i=i+j;
j=1;
end
%% save close and exit
macroconvertmadXAT=strrep([mst var formulas def lines],';;',';');
fprintf(mafileout,macroconvertmadXAT);
fclose('all');
close(h);
%% clean workspace
clear macroconvertmadXAT formulas def lines var mst
clear SXSTRING i j SXCELL elemcount ElementType elname
%% run macro file and save workspace.
[~,seqfilemadX]=fileparts(seqfilemadX);
try % try to run the macro generated
%%%!!!!! THIS COMAND MAY FAIL! CHECK THE ORDER OF THE DECLARATIONS IN MADX!
run(filemacroname)
if nargin<3
save([seqfilemadX '_AT_LATTICE']);
else
save(outfilename);
end
%delete(filemacroname);
fileoutname=[seqfilemadX '_AT_macro.m'];
movefile(filemacroname,fileoutname);
catch %#ok<CTCH>
fileoutname=[seqfilemadX '_AT_macro.m'];
movefile(filemacroname,fileoutname);
disp(['saved macro in : ' fileoutname]);
error(['could not run the macro file.'...
' It is now in your current directory.'...
' Please check the order of the definitions'...
' Please check the multipole definitions in .seq have at least 2 elements m: multipole,knl:={ 0.0, 0.0 };']);
end
fclose('all');
return
function tmp=formatTextMADX(str)
tmp={};
str=strrep(str,' ',''); % no spaces
str=strrep(str,':=','=');
str=strrep(str,';','');
c=[1 sort([strfind(str,',') strfind(str,':')...
strfind(str,'=')]) length(str)];
% split in substrings
for jc=1:length(c)-1
if jc==1
tmp{jc}=str(c(jc):c(jc+1));
else
tmp{jc}=str(c(jc)+1:c(jc+1));
end
end
return
|
github
|
atcollab/at-master
|
plotERAperture.m
|
.m
|
at-master/atmat/atplot/plotfunctions/plotERAperture.m
| 1,961 |
utf_8
|
4a9c3a29f1b184a849d8fbaea79cee22
|
function varargout=plotERAperture(varargin)
%PLOTERAPERTURE Plot RApertures EApertures
%
%Helper function for atplot:
% plot the Elliptic and Rectangular physical apertures
%
% USAGE:
% >> atbaseplot(ring,@plotERAperture);
% >> atplot(ring,@plotERAperture); (obsolete)
%
%See also atplot atbaseplot
if nargout == 1 % From atplot
ring=varargin{2};
rapind=findcells(ring,'RApertures');
eapind=findcells(ring,'EApertures');
xm=getcellstruct(ring,'RApertures',rapind,1);
ym=getcellstruct(ring,'RApertures',rapind,3);
xp=getcellstruct(ring,'RApertures',rapind,2);
yp=getcellstruct(ring,'RApertures',rapind,4);
eh=getcellstruct(ring,'EApertures',eapind,1);
ev=getcellstruct(ring,'EApertures',eapind,2);
Xp=[nan(size(ring)); nan];
Xm=Xp;
Yp=Xp;
Ym=Xp;
Eh=Xp;
Ev=Xp;
Xp(rapind)=xp;
Xm(rapind)=xm;
Yp(rapind)=yp;
Ym(rapind)=ym;
Eh(eapind)=eh;
Ev(eapind)=ev;
Xp=fixgaps(Xp);
Xm=fixgaps(Xm);
Yp=fixgaps(Yp);
Ym=fixgaps(Ym);
Eh=fixgaps(Eh);
Ev=fixgaps(Ev);
plotdata(1).values=[Xp -Xm Yp -Ym]*1e2;%
plotdata(1).labels={'x ','x','y','y'};
plotdata(1).axislabel='rectangular aperture [cm]';
%
plotdata(2).values=[Eh Ev]*1e2;%
plotdata(2).labels={'hor.','ver.'};
plotdata(2).axislabel='elliptic aperture [cm]';
varargout={plotdata};
else % From atbaseplot
s=findspos(varargin{1},1:length(varargin{1})+1);
varargout={s,plotAperture([],varargin{:})};
end
end
function y=fixgaps(x)
% FIXGAPS Linearly interpolates gaps in a time series
% YOUT=FIXGAPS(YIN) linearly interpolates over NaN
% in the input time series (may be complex), but ignores
% trailing and leading NaN.
%
% R. Pawlowicz 6/Nov/99
y=x;
bd=isnan(x);
gd=find(~bd);
if length(gd)>2 % modify for plot in AT
bd([1:(min(gd)-1) (max(gd)+1):end])=0;
y(bd)=interp1(gd,x(gd),find(bd));
end
end
|
github
|
atcollab/at-master
|
BunchLength.m
|
.m
|
at-master/atmat/atphysics/LongitudinalDynamics/BunchLength.m
| 1,651 |
utf_8
|
b9ba831b0e8ab4233e84dbdc40897841
|
function BL = BunchLength (Ib,Zn,Vrf,U0,E0,h,alpha,sigdelta,circ)
% bunch length due to the potential well effect
% the output is the zerocurrent bunch length x bunch lengthening
%
% BL = BunchLength (Ib,Zn,Vrf,U0,E0,h,alpha,sigdelta,circ)
%
% Ib is the bunch current [A] (it may be a vector for multiple values)
% Zn is the longitudinal broadband impedance [Ohms]
% Vrf is the RF voltage [V] (it may be a vector for multiple values)
% U0 is the energy loss around the ring [eV]
% E0 is the beam energy [eV]
% h is the harmonic number
% alpha is the momentum compaction factor
% sigmadelta is the energy spread
% circ is the ring circumference
%
% see also: atBunchLength
blg = abs(blgrowth(Ib,Zn,Vrf,U0,E0,h,alpha,sigdelta));
phi=pi - asin(U0./Vrf);
nus= sqrt(-(Vrf/E0).*(h * alpha)/(2*pi) .* cos(phi));
zcBL = sigdelta.*(circ * alpha)./(2 * pi .* nus );
BL = zcBL .* blg;
end
function blg = blgrowth(Ib,Zn,Vrf,U0,E0,h,alpha,sigdelta)
% bunch lengthening factor due to the potential well effect
% Ib is the bunch current [A] (it may be a vector for multiple values)
% Zn is the longitudinal broadband impedance [Ohms]
% Vrf is the RF voltage [V] (it may be a vector for multiple values)
% U0 is the energy loss around the ring [eV]
% h is the harmonic number
% alpha is the momentum compaction factor
% sigmadelta is the energy spread
phi=pi - asin(U0./Vrf);
nus= sqrt(-(Vrf/E0).*(h * alpha)/(2*pi) .* cos(phi));
Delta = -(2*pi*Ib*Zn)./(Vrf*h.*cos(phi).*(alpha*sigdelta./nus).^3);
Q=Delta/(4*sqrt(pi));
blg = (2/3)^(1/3)./(9*Q + sqrt(3)*sqrt(-4+27*Q.^2)).^(1/3)...
+ (9*Q + sqrt(3)*sqrt(-4+27*Q.^2)).^(1/3)./(2^(1/3)*3^(2/3));
end
|
github
|
atcollab/at-master
|
findtune.m
|
.m
|
at-master/atmat/atphysics/TuneAndChromaticity/findtune.m
| 2,153 |
utf_8
|
430281c9b2bf8858079c67247c3f5100
|
function [tune,spectrum]=findtune(pos,method)
%FINDTUNE get the tune value from turn by turn positions
%
%TUNE=FINDTUNE(POS,METHOD)
%
%POS: Tune-by-turn particle position
%METHOD: Method for tune determination:
% 1: Highest peak in fft
% 2: Interpolation on fft results
% 3: Windowing + interpolation
%
%[TUNE,SPECTRUM]=FINDTUNE(...) Also returns the fft
if nargin < 2, method=3; end
nturns=size(pos,1);
nparts=size(pos,2);
nt2=fix(nturns/2);
posm=mean(pos);
wrong=~isfinite(posm);
switch method
case 1
methname='highest peak';
pos2=pos-posm(ones(nturns,1),:);
spectrum=fft(pos2);
[vmax,rmax]=max(abs(spectrum(1:nt2,:))); %#ok<ASGLU>
tune=(rmax-1)/nturns;
case 2
methname='interpolation';
pos2=pos-posm(ones(nturns,1),:);
spectrum=fft(pos2);
[vmax,rmax]=max(abs(spectrum(1:nt2,:))); %#ok<ASGLU>
rmax=rmax+(rmax==1);
kmax=sub2ind([nturns nparts],rmax,1:nparts);
back=(spectrum(kmax-1) > spectrum(kmax+1));
k1=kmax-back;
k2=k1+1;
v1=abs(spectrum(k1));
v2=abs(spectrum(k2));
tune=(rmax-back-1 +(v2./(v1+v2)))/nturns;
case 3
methname='window + interp.';
w=hann_window(nturns);
pos2=(pos-posm(ones(nturns,1),:)).*w(:,ones(1,nparts));
spectrum=fft(pos2);
[vmax,rmax]=max(abs(spectrum(1:nt2,:))); %#ok<ASGLU>
rmax=rmax+(rmax==1);
kmax=sub2ind([nturns nparts],rmax,1:nparts);
back=(spectrum(kmax-1) > spectrum(kmax+1));
k1=kmax-back;
k2=k1+1;
v1=abs(spectrum(k1));
v2=abs(spectrum(k2));
tune=(rmax-back-1 +((2*v2-v1)./(v1+v2)))/nturns;
%tune2=(rmax-back-1)/nturns + asin(phi(v1,v2,cos(2*pi/nturns))*sin(2*pi/nturns))/2/pi;
%disp(['method 4 tune: ' num2str(mean2(tune2')) ' (rms: ' num2str(std(tune2')) ')']);
end
tune(wrong)=NaN;
errmax=2.5*std(tune,0,2);
keep=(abs(tune-mean(tune,2))<=errmax);
reject=find(~(keep | wrong));
for bpm=reject
fprintf('rejected BPM %d\n', bpm);
end
fprintf('%20s tune:%g (rms:%g)\n',methname, mean(tune(keep),2),std(tune(keep),0,2));
function vv=phi(a,b,c) %#ok<DEFNU>
d1=c*(a+b);
delt=d1.*d1 - 2*a.*b.*(2*c*c-c-1).*a.*a - b.*b - 2*a.*b*c;
vv=(-(a+b*c).*(a-b) + b.*sqrt(delt))./(a.*a + b.*b +2*a.*b*c);
function w=hann_window(n)
w=0.5*(1-cos(2*pi*(0:n-1)'/n));
|
github
|
atcollab/at-master
|
thinmpoleraddiffm.m
|
.m
|
at-master/atmat/atphysics/Radiation/thinmpoleraddiffm.m
| 3,006 |
utf_8
|
9c934f83e1f4500d880e450a91af7eaf
|
function [B66, M, rout] = findthinmpoleraddiffm(rin, PolynomA, PolynomB, L, irho, E0, max_order)
%FINDTHINMPOLERADDIFFM
% Physical constants used in calculations
persistent TWOPI CGAMMA M0C2 LAMBDABAR CER CU
if isempty(TWOPI) %Initialize constansts on the first call
TWOPI = 2*pi;
CGAMMA = 8.846056192e-05; % [m]/[GeV^3] Ref[1] (4.1)
M0C2 = 5.10999060e5; % Electron rest mass [eV]
LAMBDABAR = 3.86159323e-13; % Compton wavelength/2pi [m]
CER = 2.81794092e-15; % Classical electron radius [m]
CU = 1.323094366892892; % 55/(24*sqrt(3))
end
% Calculate field from polynomial coefficients
P1 = i*PolynomA(1:max_order+1)+PolynomB(1:max_order+1);
Z1 = cumprod([1, (rin(1)+i*rin(3))*ones(1,max_order)]);
S1 = sum(P1.*Z1);
Bx = real(S1);
By = imag(S1);
B2P = B2perp([Bx; By+irho; 0], irho, rin);
B3P = B2P^(3/2);
p_norm = 1/(1+rin(5));
p_norm2 = p_norm^2;
CRAD = CGAMMA*E0^3/(TWOPI*1e27);
BB = CU * CER * LAMBDABAR * (E0/M0C2)^5 * L * B3P * (1+rin(5))^4*...
(1+rin(1)*irho + (rin(2)^2+rin(4)^2)*p_norm2/2);
% Propagate particle
rout = rin;
% Loss of energy (dp/p) due to radiation
rout(5) = rin(5) - CRAD*(1+rin(5))^2*B2P*...
(1+rin(1)*irho + (rin(1)^2+rin(3)^2)*p_norm2/2)*L;
% Change in transverse momentum due to radiation
% Angle does not change but dp/p changes due to radiation
% and therefore transverse canonical momentum changes
% px = x'*(1+dp/p)
% py = y'*(1+dp/p)
rout([2 4]) = rin([2 4])*(1+rout(5))/(1+rin(5));
% transverse kick due to magnetic field
rout(2) = rout(2) - L*(Bx-(rin(5)-rin(1)*irho)*irho);
rout(4) = rout(4) + L*By;
% pathlength
rout(6) = rout(6) + L*irho*rin(1);
% Calculate transfer matrix at rin
P2 = i*PolynomA(2:max_order+1)+PolynomB(2:max_order+1);
Z2 = cumprod([1, (rin(1)+i*rin(3))*ones(1,max_order-1)]);
S2 = sum(P2.*(1:max_order).*Z2);
M = eye(6);
M(2,1) = -L*real(S2);
M(2,3) = L*imag(S2);
M(4,1) = L*imag(S2);
M(4,3) = L*real(S2);
M(2,5) = L*irho;
M(2,1) = M(2,1) - L*irho*irho;
M(6,1) = L*irho;
% Calculate Ohmi's diffusion matrix of a thin multipole element
% For elements with straight coordinate system irho = 0
% For curved elements the B polynomial (PolynomB in MATLAB)
% MUST NOT include the guide field By0 = irho * E0 /(c*e)
B66 = zeros(6);
B66(2,2) = BB*rin(2)^2*p_norm2;
B66(2,4) = BB*rin(2)*rin(4)*p_norm2;
B66(4,2) = B66(2,4);
B66(4,4) = BB*rin(4)^2*p_norm2;
B66(5,2) = BB*rin(2)*p_norm;
B66(2,5) = B66(5,2);
B66(5,4) = BB*rin(4)*p_norm;
B66(4,5) = B66(5,4);
B66(5,5) = BB;
function b2 = B2perp(B, irho, rin)
% Calculates sqr(|e x B|) , where 'e' is a unit vector in the direction of
% velocity. Components of the velocity vector:
% ex = xpr;
% ey = ypr;
% ez = (1+x*irho);
E = [rin(2)/(1+rin(5));rin(4)/(1+rin(5));1+rin(1)*irho];
b2 = sum(cross(E/norm(E),B).^2);
|
github
|
atcollab/at-master
|
findthinmpoleraddiffm.m
|
.m
|
at-master/atmat/atphysics/Radiation/findthinmpoleraddiffm.m
| 3,006 |
utf_8
|
9c934f83e1f4500d880e450a91af7eaf
|
function [B66, M, rout] = findthinmpoleraddiffm(rin, PolynomA, PolynomB, L, irho, E0, max_order)
%FINDTHINMPOLERADDIFFM
% Physical constants used in calculations
persistent TWOPI CGAMMA M0C2 LAMBDABAR CER CU
if isempty(TWOPI) %Initialize constansts on the first call
TWOPI = 2*pi;
CGAMMA = 8.846056192e-05; % [m]/[GeV^3] Ref[1] (4.1)
M0C2 = 5.10999060e5; % Electron rest mass [eV]
LAMBDABAR = 3.86159323e-13; % Compton wavelength/2pi [m]
CER = 2.81794092e-15; % Classical electron radius [m]
CU = 1.323094366892892; % 55/(24*sqrt(3))
end
% Calculate field from polynomial coefficients
P1 = i*PolynomA(1:max_order+1)+PolynomB(1:max_order+1);
Z1 = cumprod([1, (rin(1)+i*rin(3))*ones(1,max_order)]);
S1 = sum(P1.*Z1);
Bx = real(S1);
By = imag(S1);
B2P = B2perp([Bx; By+irho; 0], irho, rin);
B3P = B2P^(3/2);
p_norm = 1/(1+rin(5));
p_norm2 = p_norm^2;
CRAD = CGAMMA*E0^3/(TWOPI*1e27);
BB = CU * CER * LAMBDABAR * (E0/M0C2)^5 * L * B3P * (1+rin(5))^4*...
(1+rin(1)*irho + (rin(2)^2+rin(4)^2)*p_norm2/2);
% Propagate particle
rout = rin;
% Loss of energy (dp/p) due to radiation
rout(5) = rin(5) - CRAD*(1+rin(5))^2*B2P*...
(1+rin(1)*irho + (rin(1)^2+rin(3)^2)*p_norm2/2)*L;
% Change in transverse momentum due to radiation
% Angle does not change but dp/p changes due to radiation
% and therefore transverse canonical momentum changes
% px = x'*(1+dp/p)
% py = y'*(1+dp/p)
rout([2 4]) = rin([2 4])*(1+rout(5))/(1+rin(5));
% transverse kick due to magnetic field
rout(2) = rout(2) - L*(Bx-(rin(5)-rin(1)*irho)*irho);
rout(4) = rout(4) + L*By;
% pathlength
rout(6) = rout(6) + L*irho*rin(1);
% Calculate transfer matrix at rin
P2 = i*PolynomA(2:max_order+1)+PolynomB(2:max_order+1);
Z2 = cumprod([1, (rin(1)+i*rin(3))*ones(1,max_order-1)]);
S2 = sum(P2.*(1:max_order).*Z2);
M = eye(6);
M(2,1) = -L*real(S2);
M(2,3) = L*imag(S2);
M(4,1) = L*imag(S2);
M(4,3) = L*real(S2);
M(2,5) = L*irho;
M(2,1) = M(2,1) - L*irho*irho;
M(6,1) = L*irho;
% Calculate Ohmi's diffusion matrix of a thin multipole element
% For elements with straight coordinate system irho = 0
% For curved elements the B polynomial (PolynomB in MATLAB)
% MUST NOT include the guide field By0 = irho * E0 /(c*e)
B66 = zeros(6);
B66(2,2) = BB*rin(2)^2*p_norm2;
B66(2,4) = BB*rin(2)*rin(4)*p_norm2;
B66(4,2) = B66(2,4);
B66(4,4) = BB*rin(4)^2*p_norm2;
B66(5,2) = BB*rin(2)*p_norm;
B66(2,5) = B66(5,2);
B66(5,4) = BB*rin(4)*p_norm;
B66(4,5) = B66(5,4);
B66(5,5) = BB;
function b2 = B2perp(B, irho, rin)
% Calculates sqr(|e x B|) , where 'e' is a unit vector in the direction of
% velocity. Components of the velocity vector:
% ex = xpr;
% ey = ypr;
% ez = (1+x*irho);
E = [rin(2)/(1+rin(5));rin(4)/(1+rin(5));1+rin(1)*irho];
b2 = sum(cross(E/norm(E),B).^2);
|
github
|
atcollab/at-master
|
thickmpoleraddiffm.m
|
.m
|
at-master/atmat/atphysics/Radiation/thickmpoleraddiffm.m
| 1,708 |
utf_8
|
cc43d4f668b8b722cf435a9b6b4b175c
|
function [Bcum, Mcum, r] = findthickmpoleraddifm(rin, PolynomA, PolynomB,L, irho, E0, max_order,num_steps)
%FINDTHICKMPOLERADDIFFM
% Fourth order-symplectic integrator constants
persistent DRIFT1 DRIFT2 KICK1 KICK2
if isempty(DRIFT1)
DRIFT1 = 0.6756035959798286638;
DRIFT2 = -0.1756035959798286639;
KICK1 = 1.351207191959657328;
KICK2 = -1.702414383919314656;
end
SL = L/num_steps;
L1 = SL*DRIFT1;
L2 = SL*DRIFT2;
K1 = SL*KICK1;
K2 = SL*KICK2;
Mcum = eye(6);
Bcum = zeros(6);
r = rin;
for m=1:num_steps % Loop over slices
[M, r] = driftm66(L1,r);
Bcum = M*Bcum*M';
Mcum = M*Mcum;
[B, M, r] = findthinmpoleraddiffm(r, PolynomA, PolynomB, K1, irho, E0, max_order);
Bcum = M*Bcum*M' + B;
Mcum = M*Mcum;
[M, r] = driftm66(L2,r);
Bcum = M*Bcum*M';
Mcum = M*Mcum;
[B, M, r] = findthinmpoleraddiffm(r, PolynomA, PolynomB, K2, irho, E0, max_order);
Bcum = M*Bcum*M' + B;
Mcum = M*Mcum;
[M, r] = driftm66(L2,r);
Bcum = M*Bcum*M';
Mcum = M*Mcum;
[B, M, r] = findthinmpoleraddiffm(r, PolynomA, PolynomB, K1, irho, E0, max_order);
Bcum = M*Bcum*M' + B;
Mcum = M*Mcum;
[M, r] = driftm66(L1,r);
Bcum = M*Bcum*M';
Mcum = M*Mcum;
end
function [M, rout] = driftm66(L,r);
% transfer matrix of a drift - map linearized at r
Pnorm = 1/(1+r(5));
NormL = L*Pnorm;
M = eye(6);
M([7 21]) = NormL;
M([1 3],5) = -NormL*r([2,4])*Pnorm;
M(6,[2 4]) = -M([1 3],5)';
M(6,5) = -NormL*Pnorm*sum(r([2,4]).^2);
rout = r;
rout([1 3]) = r([1 3]) + r([2 4])*NormL;
rout(6) = r(6) + NormL*Pnorm*sum(r([2,4]).^2)/2;
|
github
|
atcollab/at-master
|
findthickmpoleraddiffm.m
|
.m
|
at-master/atmat/atphysics/Radiation/findthickmpoleraddiffm.m
| 1,709 |
utf_8
|
ebc35623ce562ec6ce4133a19cb63d42
|
function [Bcum, Mcum, r] = findthickmpoleraddifm(rin, PolynomA, PolynomB,L, irho, E0, max_order,num_steps)
%FINDTHICKMPOLERADDIFFM
% Fourth order-symplectic integrator constants
persistent DRIFT1 DRIFT2 KICK1 KICK2
if isempty(DRIFT1)
DRIFT1 = 0.6756035959798286638;
DRIFT2 = -0.1756035959798286639;
KICK1 = 1.351207191959657328;
KICK2 = -1.702414383919314656;
end
SL = L/num_steps;
L1 = SL*DRIFT1;
L2 = SL*DRIFT2;
K1 = SL*KICK1;
K2 = SL*KICK2;
Mcum = eye(6);
Bcum = zeros(6);
r = rin;
for m=1:num_steps % Loop over slices
[M, r] = driftm66(L1,r);
Bcum = M*Bcum*M';
Mcum = M*Mcum;
[B, M, r] = findthinmpoleraddiffm(r, PolynomA, PolynomB, K1, irho, E0, max_order);
Bcum = M*Bcum*M' + B;
Mcum = M*Mcum;
[M, r] = driftm66(L2,r);
Bcum = M*Bcum*M';
Mcum = M*Mcum;
[B, M, r] = findthinmpoleraddiffm(r, PolynomA, PolynomB, K2, irho, E0, max_order);
Bcum = M*Bcum*M' + B;
Mcum = M*Mcum;
[M, r] = driftm66(L2,r);
Bcum = M*Bcum*M';
Mcum = M*Mcum;
[B, M, r] = findthinmpoleraddiffm(r, PolynomA, PolynomB, K1, irho, E0, max_order);
Bcum = M*Bcum*M' + B;
Mcum = M*Mcum;
[M, r] = driftm66(L1,r);
Bcum = M*Bcum*M';
Mcum = M*Mcum;
end
function [M, rout] = driftm66(L,r);
% transfer matrix of a drift - map linearized at r
Pnorm = 1/(1+r(5));
NormL = L*Pnorm;
M = eye(6);
M([7 21]) = NormL;
M([1 3],5) = -NormL*r([2,4])*Pnorm;
M(6,[2 4]) = -M([1 3],5)';
M(6,5) = -NormL*Pnorm*sum(r([2,4]).^2);
rout = r;
rout([1 3]) = r([1 3]) + r([2 4])*NormL;
rout(6) = r(6) + NormL*Pnorm*sum(r([2,4]).^2)/2;
|
github
|
atcollab/at-master
|
naff_cc.m
|
.m
|
at-master/atmat/atphysics/nafflib/naff_cc.m
| 2,023 |
utf_8
|
f4beeffc5c9f251ee33da68512ec0199
|
function naff_cc
%NAFF_CC Compile nafflibrary for Matlab
%
% Modified by Laurent S. Nadolski
% April 6th, 2007
cd_old = pwd;
cd(fileparts(which('naff_cc')))
disp(['Compiling NAFF routines on ', computer,'.'])
switch computer
case 'SOL2'
PLATFORMOPTION = ['-D',computer,' '];
case 'GLNXA64'
PLATFORMOPTION = ['-ldl -D',computer,' ']; % added by Laurent April 6th, 2007
case 'GLNX86'
PLATFORMOPTION = ['-ldl -D',computer,' ']; % added by Laurent April 6th, 2007
case 'PCWIN64'
PLATFORMOPTION = ['-D',computer,' LDFLAGS=''-pthread -shared -m64'' '];
case 'PCWIN'
PLATFORMOPTION = ['-D',computer,' '];
case 'MACI64'
PLATFORMOPTION = ['-D',computer,' LDFLAGS=''-pthread -shared -m64'' '];
otherwise
error('Platform not defined');
end
% Object files
disp('Compiling: modnaff.c');
%mex LDFLAGS='-pthread -shared -m64' -I/usr/local/matlab/extern/include -O -c modnaff.c
eval(['mex ', PLATFORMOPTION, '-O -c modnaff.c ']);
disp('Compiling: example.c');
%mex LDFLAGS='-pthread -shared -m64' -I/usr/local/matlab/extern/include -O -c complexe.c
eval(['mex ', PLATFORMOPTION, '-O -c modnaff.c ']);
eval(['mex ', PLATFORMOPTION, '-O -c complexe.c ']);
disp('Compiling: nafflib.c');
switch computer
case {'MACI64', 'GLNX86', 'GLNX64', 'GLNXA64'}
internal_cc('nafflib.c modnaff.o complexe.o');
case {'PCWIN', 'PCWIN64'}
internal_cc('nafflib.c modnaff.obj complexe.obj');
end
cd(cd_old);
function internal_cc(fn)
% cc(filename)
%
% MAC 64 bits
% TODO WINDOWS
disp(['Compiling: ',fn]);
switch computer
case {'GLNX86', 'GLNX64', 'GLNXA64'}
cmdstr = [ 'mex -I' matlabroot '/extern/include -fPIC -O ', fn ];
case {'MACI64'}
cmdstr = [ 'mex -I' matlabroot '/extern/include -O ', fn ];
case {'PCWIN', 'PCWIN64'}
cmdstr = [ 'mex -I' 'LDFLAGS=''-pthread -shared -m64'' -O ', fn];
otherwise
error('Architecture not defined')
end
disp(cmdstr);
eval(cmdstr);
|
github
|
atcollab/at-master
|
twissring.m
|
.m
|
at-master/atmat/atphysics/ParameterSummaryFunctions/twissring.m
| 4,892 |
utf_8
|
7e51fadcac888999286ae29c5d767c9c
|
function [TD, varargout] = twissring(RING,DP,varargin)
%TWISSRING calculates linear optics functions for an UNCOUPLED ring
%
% [TwissData, tune] = TWISSRING(LATTICE,DP) calculates twiss parameters
% and closed orbit coordinates at the RING entrance assuming
% constant energy deviation DP.
%
% [TwissData, tune] = TWISSRING(LATTICE,DP,REFPTS) calculates Twiss parameters
% and closed orbit coordinates at specified reference points REFPTS.
%
% Note: REFPTS is an array of increasing indexes that
% select elements from range 1 to length(LATTICE)+1.
% See further explanation of REFPTS in the 'help' for FINDSPOS
%
% [TwissData, tune, chrom] = TWISSRING(...,'chrom', DDP) also calculates
% linear dispersion and chromaticity. Dispersion is returned as one
% of the fields in TwissData.
% !!! Last argument DDP is a momentum deviation on top
% of DP (the second argument) used to calculate and normalize
% dispersion and chromaticity. If not supplied
% the default value of 1e-8 is used.
%
% Note: To resolve the integer part of the tune
% and the uncertainty of acos(trace(M)/2) it is necessary to
% supply sufficient number of REFPTS properly spaced in betatron phase.
%
% TwisData is a 1-by-REFPTS (1-by-1) structure array with fields
% (Some are the same as in the output of LINOPT)
% ElemIndex - integer (element number) in the RING
% SPos - longitudinal position [m]
% ClosedOrbit - closed orbit column vector with
% components x, px, y, py (momentums, NOT angles)
% Dispersion - dispersion orbit position 4-by-1 vector with
% components [eta_x, eta_prime_x, eta_y, eta_prime_y]'
% calculated with respect to the closed orbit with
% momentum deviation DP
% M44 - 4x4 transfer matrix M from the beginning of RING
% to the entrance of the element for specified DP [2]
% beta - [betax, betay] horizontal and vertical Twiss parameter beta
% alpha - [alphax, alphay] horizontal and vertical Twiss parameter alpha
% mu - [mux, muy] horizontal and vertical betatron phase
% !!! NOT 2*PI normalized
%
% Use MATLAB function CAT to get the data from fields of TwissData into MATLAB arrays.
% Example:
% >> TD = twissring(THERING,0,1:length(THERING));
% >> BETA = cat(1,TD.beta);
% >> S = cat(1,TD.SPos);
% >> plot(S,BETA(:,1))
%
% See also TWISSLINE, LINOPT, TUNECHROM.
NE=length(RING);
% Process input arguments
[CHROMFLAG,args]=getflag(varargin,'chrom');
[REFPTS,DDP]=getargs(args,NE+1,1.e-8);
CHROMFLAG=CHROMFLAG || (nargout == 3);
% Include the endpoint if it is not already in REFPTS
if REFPTS(end)==NE+1
[M44, MS, orb] = findm44(RING,DP,REFPTS);
else
[M44, MS, orb] = findm44(RING,DP,[REFPTS,NE+1]);
end
cos_mu_x = (M44(1,1)+M44(2,2))/2;
cos_mu_y = (M44(3,3)+M44(4,4))/2;
sin_mu_x = sign(M44(1,2))*sqrt(-M44(1,2)*M44(2,1)-(M44(1,1)-M44(2,2))^2/4);
sin_mu_y = sign(M44(3,4))*sqrt(-M44(3,4)*M44(4,3)-(M44(3,3)-M44(4,4))^2/4);
ax = (M44(1,1)-M44(2,2))/2/sin_mu_x;
ay = (M44(3,3)-M44(4,4))/2/sin_mu_y;
bx = M44(1,2)/sin_mu_x;
by = M44(3,4)/sin_mu_y;
BX = squeeze((MS(1,1,:)*bx-MS(1,2,:)*ax).^2 + MS(1,2,:).^2)/bx;
BY = squeeze((MS(3,3,:)*by-MS(3,4,:)*ay).^2 + MS(3,4,:).^2)/by;
AX = -squeeze((MS(1,1,:)*bx-MS(1,2,:)*ax).*(MS(2,1,:)*bx-MS(2,2,:)*ax) + MS(1,2,:).*MS(2,2,:))/bx;
AY = -squeeze((MS(3,3,:)*by-MS(3,4,:)*ay).*(MS(4,3,:)*by-MS(4,4,:)*ay) + MS(3,4,:).*MS(4,4,:))/by;
MX = atan2(squeeze(MS(1,2,:)), squeeze(MS(1,1,:)*bx-MS(1,2,:)*ax));
MY = atan2(squeeze(MS(3,4,:)), squeeze(MS(3,3,:)*by-MS(3,4,:)*ay));
MX = BetatronPhaseUnwrap(MX);
MY = BetatronPhaseUnwrap(MY);
tune=mod(atan2([sin_mu_x,sin_mu_y],[cos_mu_x cos_mu_y])/2/pi,1.0);
NR = length(REFPTS);
% Build TD only for points originally referenced in REFPTS
TD = struct('ElemIndex',num2cell(REFPTS),...
'SPos',num2cell(findspos(RING,REFPTS)),...
'ClosedOrbit',num2cell(orb(:,1:NR),1),...
'M44', squeeze(num2cell(MS(:,:,1:NR),[1 2]))',...
'beta', num2cell([BX(1:NR),BY(1:NR)],2)',...
'alpha', num2cell([AX(1:NR),AY(1:NR)],2)',...
'mu', num2cell([MX(1:NR),MY(1:NR)],2)');
if CHROMFLAG
[TD_DDP,tune_DDP] = twissring(RING,DP+DDP,REFPTS);
DORBIT = reshape(cat(1,TD_DDP.ClosedOrbit),4,[]);
DISPERSION = num2cell((DORBIT-orb(:,1:NR))/DDP,1);
[TD.Dispersion] = deal( DISPERSION{:});
varargout{2} = (tune_DDP-tune)/DDP;
end
if nargout>1
varargout{1}=tune;
end
function UP = BetatronPhaseUnwrap(P)
% unwrap negative jumps in betatron
%JUMPS = [0; diff(P)] < -1.e-5;
JUMPS = [0; diff(P)] < -1.e-3;
UP = P+cumsum(JUMPS)*2*pi;
|
github
|
atcollab/at-master
|
twissline.m
|
.m
|
at-master/atmat/atphysics/ParameterSummaryFunctions/twissline.m
| 6,230 |
utf_8
|
83e74ff872e535dae8dba2a0ae9e1efc
|
function [TD, varargout] = twissline(LINE,DP,TWISSDATAIN,varargin)
%TWISSLINE calculates linear optics functions for an UNCOUPLED transport line
%
% TwissData = TWISSLINE(LATTICE,DP,TWISSDATAIN) propagates twiss
% parameters and closed orbit coordinates from the LINE entrance
% given by TWISSDATAIN assuming constant energy deviation DP.
% TWISSDATAIN is a 1-by-1 structure with the same field names
% as the return argument. (See below)
% !!! IMPORTANT: Since TWISSLINE does not search for closed orbit
% its value at the entrance must be supplied in the
% ClosedOrbit field of TWISSDATAIN structure.
%
% TwissData = TWISSLINE(LATTICE,DP,TWISSDATAIN,REFPTS) calculates Twiss parameters
% and closed orbit coordinates at specified reference points REFPTS
%
% Note: REFPTS is an array of increasing indexes that
% select elements from range 1 to length(LATTICE)+1.
% See further explanation of REFPTS in the 'help' for FINDSPOS
%
% TwissData = TWISSLINE(...,'chrom', DDP) also calculates
% linear dispersion. Dispersion is returned as one
% of the fields in TwissData.
% !!! Last argument DDP is a momentum deviation on top
% of DP (the second argument) used to calculate and normalize
% dispersion. If not supplied
% the default value of 1e-8 is used.
%
% TwisData is a 1-by-REFPTS (1-by-1 if no REFPTS specified) structure array with fields:
% ElemIndex - integer (element number) in the LINE
% SPos - longitudinal position [m]
% ClosedOrbit - closed orbit column vector with
% components x, px, y, py (momentums, NOT angles)
% Dispersion - dispersion orbit position 4-by-1 vector with
% components [eta_x, eta_prime_x, eta_y, eta_prime_y]'
% calculated with respect to the closed orbit with
% momentum deviation DP
% M44 - 4x4 transfer matrix M from the beginning of LINE
% to the entrance of the element for specified DP [2]
% beta - [betax, betay] horizontal and vertical Twiss parameter beta
% alpha - [alphax, alphay] horizontal and vertical Twiss parameter alpha
% mu - [mux, muy] horizontal and vertical betatron phase
% !!! NOT 2*PI normalized
%
% Use CAT to get the data from fields of TwissData into MATLAB arrays.
% Example:
% >> TD = twissring(THERING,0,1:length(THERING));
% >> BETA = cat(1,TD.beta);
% >> S = cat(1,TD.SPos);
% >> plot(S,BETA(:,1))
%
% See also TWISSRING, LINOPT, TUNECHROM.
DDP_default = 1e-8;
NE=length(LINE);
% Process input arguments
switch nargin
case 3
REFPTS=NE+1;
CHROMFLAG=0;
case 4
if isnumeric(varargin{1})
REFPTS = varargin{1};
CHROMFLAG = 0;
elseif ischar(varargin{1}) & strncmp(lower(varargin{1}),'chrom',5)
CHROMFLAG = 1;
REFPTS = NE+1;
DDP = DDP_default;
else
error('Third argument must be a numeric array or string');
end
case 5
if isnumeric(varargin{1})
REFPTS = varargin{1};
if ischar(varargin{2}) & strncmp(lower(varargin{2}),'chrom',5)
CHROMFLAG = 1;
DDP = DDP_default;
else
error('Fourth argument - wrong type');
end
elseif ischar(varargin{1}) & strncmp(lower(varargin{1}),'chrom',5)
CHROMFLAG = 1;
REFPTS = NE+1;
if isnumeric(varargin{2})
DDP = varargin{2};
else
error('Fourth argument - wrong type');
end
end
case 6
if isnumeric(varargin{1})
REFPTS = varargin{1};
else
error('Fourth argument - wrong type');
end
if ischar(varargin{2}) & strncmp(lower(varargin{2}),'chrom',5)
CHROMFLAG = 1;
else
error('Fifth argument - wrong type');
end
if isnumeric(varargin{3})
DDP = varargin{3};
else
error('Sixth argument - wrong type');
end
otherwise
error('Wrong number of arguments');
end
if isfield(TWISSDATAIN,'alpha')
ax = TWISSDATAIN(end).alpha(1);
ay = TWISSDATAIN(end).alpha(2);
else
error('TWISSDATAIN structure does not have field ''alpha''');
end
if isfield(TWISSDATAIN,'beta')
bx = TWISSDATAIN(end).beta(1);
by = TWISSDATAIN(end).beta(2);
else
error('TWISSDATAIN structure does not have field ''beta''');
end
if isfield(TWISSDATAIN,'mu')
mux = TWISSDATAIN(end).mu(1);
muy = TWISSDATAIN(end).mu(2);
else
error('TWISSDATAIN structure does not have field ''mu''');
end
R0 = [TWISSDATAIN(end).ClosedOrbit;DP;0];
[M44, MS, orb] = findm44(LINE,DP,REFPTS,R0);
BX = squeeze((MS(1,1,:)*bx-MS(1,2,:)*ax).^2 + MS(1,2,:).^2)/bx;
BY = squeeze((MS(3,3,:)*by-MS(3,4,:)*ay).^2 + MS(3,4,:).^2)/by;
AX = -squeeze((MS(1,1,:)*bx-MS(1,2,:)*ax).*(MS(2,1,:)*bx-MS(2,2,:)*ax) + MS(1,2,:).*MS(2,2,:))/bx;
AY = -squeeze((MS(3,3,:)*by-MS(3,4,:)*ay).*(MS(4,3,:)*by-MS(4,4,:)*ay) + MS(3,4,:).*MS(4,4,:))/by;
MX = atan(squeeze( MS(1,2,:)./(MS(1,1,:)*bx-MS(1,2,:)*ax)));
MY = atan(squeeze(MS(3,4,:)./(MS(3,3,:)*by-MS(3,4,:)*ay)));
MX = BetatronPhaseUnwrap(MX);
MY = BetatronPhaseUnwrap(MY);
TD = struct('ElemIndex',num2cell(REFPTS),...
'SPos',num2cell(findspos(LINE,REFPTS)),...
'ClosedOrbit',num2cell(orb,1),...
'M44', squeeze(num2cell(MS,[1 2]))',...
'beta', num2cell([BX,BY],2)',...
'alpha', num2cell([AX,AY],2)',...
'mu', num2cell([MX,MY],2)');
if CHROMFLAG
TWISSDATAIN_DDP = TWISSDATAIN(end);
TWISSDATAIN_DDP.ClosedOrbit = TWISSDATAIN_DDP.ClosedOrbit+TWISSDATAIN_DDP.Dispersion(:)*DDP;
TD_DDP = twissline(LINE,DP+DDP,TWISSDATAIN_DDP,REFPTS);
DORBIT = reshape(cat(1,TD_DDP.ClosedOrbit),4,length(cat(1,TD_DDP.ClosedOrbit))/4);
DISPERSION = num2cell((DORBIT-orb)/DDP,1);
[TD.Dispersion] = deal( DISPERSION{:});
end
function UP = BetatronPhaseUnwrap(P)
% unwrap negative jumps in betatron phase
DP = diff(P);
JUMPS = [0; diff(P)] < -1e-3; % modified! was 0!
UP = P+cumsum(JUMPS)*pi;
|
github
|
smallcorgi/3D-Deepbox-master
|
writeLabels.m
|
.m
|
3D-Deepbox-master/visualization/writeLabels.m
| 2,759 |
utf_8
|
9f25c784ee622dc760d160581deac377
|
function writeLabels(objects,label_dir,img_idx)
% parse input file
fid = fopen(sprintf('%s/%06d.txt',label_dir,img_idx),'w');
% for all objects do
for o = 1:numel(objects)
% set label, truncation, occlusion
if isfield(objects(o),'type'), fprintf(fid,'%s ',objects(o).type);
else error('ERROR: type not specified!'), end;
if isfield(objects(o),'truncation'), fprintf(fid,'%.2f ',objects(o).truncation);
else fprintf(fid,'-1 '); end; % default
if isfield(objects(o),'occlusion'), fprintf(fid,'%.d ',objects(o).occlusion);
else fprintf(fid,'-1 '); end; % default
if isfield(objects(o),'alpha'), fprintf(fid,'%.2f ',wrapToPi(objects(o).alpha));
else fprintf(fid,'-10 '); end; % default
% set 2D bounding box in 0-based C++ coordinates
if isfield(objects(o),'x1'), fprintf(fid,'%.2f ',objects(o).x1);
else error('ERROR: x1 not specified!'); end;
if isfield(objects(o),'y1'), fprintf(fid,'%.2f ',objects(o).y1);
else error('ERROR: y1 not specified!'); end;
if isfield(objects(o),'x2'), fprintf(fid,'%.2f ',objects(o).x2);
else error('ERROR: x2 not specified!'); end;
if isfield(objects(o),'y2'), fprintf(fid,'%.2f ',objects(o).y2);
else error('ERROR: y2 not specified!'); end;
% set 3D bounding box
if isfield(objects(o),'h'), fprintf(fid,'%.2f ',objects(o).h);
else fprintf(fid,'-1 '); end; % default
if isfield(objects(o),'w'), fprintf(fid,'%.2f ',objects(o).w);
else fprintf(fid,'-1 '); end; % default
if isfield(objects(o),'l'), fprintf(fid,'%.2f ',objects(o).l);
else fprintf(fid,'-1 '); end; % default
if isfield(objects(o),'t'), fprintf(fid,'%.2f %.2f %.2f ',objects(o).t);
else fprintf(fid,'-1000 -1000 -1000 '); end; % default
if isfield(objects(o),'ry'), fprintf(fid,'%.2f ',wrapToPi(objects(o).ry));
else fprintf(fid,'-10 '); end; % default
% set score
if isfield(objects(o),'score'), fprintf(fid,'%.2f ',objects(o).score);
else error('ERROR: score not specified!'); end;
% next line
fprintf(fid,'\n');
end
% close file
fclose(fid);
function alpha = wrapToPi(alpha)
% wrap to [0..2*pi]
alpha = mod(alpha,2*pi);
% wrap to [-pi..pi]
idx = alpha>pi;
alpha(idx) = alpha(idx)-2*pi;
|
github
|
mmclkv/caffe-mask-rcnn-master
|
classification_demo.m
|
.m
|
caffe-mask-rcnn-master/matlab/demo/classification_demo.m
| 5,466 |
utf_8
|
45745fb7cfe37ef723c307dfa06f1b97
|
function [scores, maxlabel] = classification_demo(im, use_gpu)
% [scores, maxlabel] = classification_demo(im, use_gpu)
%
% Image classification demo using BVLC CaffeNet.
%
% IMPORTANT: before you run this demo, you should download BVLC CaffeNet
% from Model Zoo (http://caffe.berkeleyvision.org/model_zoo.html)
%
% ****************************************************************************
% For detailed documentation and usage on Caffe's Matlab interface, please
% refer to the Caffe Interface Tutorial at
% http://caffe.berkeleyvision.org/tutorial/interfaces.html#matlab
% ****************************************************************************
%
% input
% im color image as uint8 HxWx3
% use_gpu 1 to use the GPU, 0 to use the CPU
%
% output
% scores 1000-dimensional ILSVRC score vector
% maxlabel the label of the highest score
%
% You may need to do the following before you start matlab:
% $ export LD_LIBRARY_PATH=/opt/intel/mkl/lib/intel64:/usr/local/cuda-5.5/lib64
% $ export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6
% Or the equivalent based on where things are installed on your system
% and what versions are installed.
%
% Usage:
% im = imread('../../examples/images/cat.jpg');
% scores = classification_demo(im, 1);
% [score, class] = max(scores);
% Five things to be aware of:
% caffe uses row-major order
% matlab uses column-major order
% caffe uses BGR color channel order
% matlab uses RGB color channel order
% images need to have the data mean subtracted
% Data coming in from matlab needs to be in the order
% [width, height, channels, images]
% where width is the fastest dimension.
% Here is the rough matlab code for putting image data into the correct
% format in W x H x C with BGR channels:
% % permute channels from RGB to BGR
% im_data = im(:, :, [3, 2, 1]);
% % flip width and height to make width the fastest dimension
% im_data = permute(im_data, [2, 1, 3]);
% % convert from uint8 to single
% im_data = single(im_data);
% % reshape to a fixed size (e.g., 227x227).
% im_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear');
% % subtract mean_data (already in W x H x C with BGR channels)
% im_data = im_data - mean_data;
% If you have multiple images, cat them with cat(4, ...)
% Add caffe/matlab to your Matlab search PATH in order to use matcaffe
if exist('../+caffe', 'dir')
addpath('..');
else
error('Please run this demo from caffe/matlab/demo');
end
% Set caffe mode
if exist('use_gpu', 'var') && use_gpu
caffe.set_mode_gpu();
gpu_id = 0; % we will use the first gpu in this demo
caffe.set_device(gpu_id);
else
caffe.set_mode_cpu();
end
% Initialize the network using BVLC CaffeNet for image classification
% Weights (parameter) file needs to be downloaded from Model Zoo.
model_dir = '../../models/bvlc_reference_caffenet/';
net_model = [model_dir 'deploy.prototxt'];
net_weights = [model_dir 'bvlc_reference_caffenet.caffemodel'];
phase = 'test'; % run with phase test (so that dropout isn't applied)
if ~exist(net_weights, 'file')
error('Please download CaffeNet from Model Zoo before you run this demo');
end
% Initialize a network
net = caffe.Net(net_model, net_weights, phase);
if nargin < 1
% For demo purposes we will use the cat image
fprintf('using caffe/examples/images/cat.jpg as input image\n');
im = imread('../../examples/images/cat.jpg');
end
% prepare oversampled input
% input_data is Height x Width x Channel x Num
tic;
input_data = {prepare_image(im)};
toc;
% do forward pass to get scores
% scores are now Channels x Num, where Channels == 1000
tic;
% The net forward function. It takes in a cell array of N-D arrays
% (where N == 4 here) containing data of input blob(s) and outputs a cell
% array containing data from output blob(s)
scores = net.forward(input_data);
toc;
scores = scores{1};
scores = mean(scores, 2); % take average scores over 10 crops
[~, maxlabel] = max(scores);
% call caffe.reset_all() to reset caffe
caffe.reset_all();
% ------------------------------------------------------------------------
function crops_data = prepare_image(im)
% ------------------------------------------------------------------------
% caffe/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat contains mean_data that
% is already in W x H x C with BGR channels
d = load('../+caffe/imagenet/ilsvrc_2012_mean.mat');
mean_data = d.mean_data;
IMAGE_DIM = 256;
CROPPED_DIM = 227;
% Convert an image returned by Matlab's imread to im_data in caffe's data
% format: W x H x C with BGR channels
im_data = im(:, :, [3, 2, 1]); % permute channels from RGB to BGR
im_data = permute(im_data, [2, 1, 3]); % flip width and height
im_data = single(im_data); % convert from uint8 to single
im_data = imresize(im_data, [IMAGE_DIM IMAGE_DIM], 'bilinear'); % resize im_data
im_data = im_data - mean_data; % subtract mean_data (already in W x H x C, BGR)
% oversample (4 corners, center, and their x-axis flips)
crops_data = zeros(CROPPED_DIM, CROPPED_DIM, 3, 10, 'single');
indices = [0 IMAGE_DIM-CROPPED_DIM] + 1;
n = 1;
for i = indices
for j = indices
crops_data(:, :, :, n) = im_data(i:i+CROPPED_DIM-1, j:j+CROPPED_DIM-1, :);
crops_data(:, :, :, n+5) = crops_data(end:-1:1, :, :, n);
n = n + 1;
end
end
center = floor(indices(2) / 2) + 1;
crops_data(:,:,:,5) = ...
im_data(center:center+CROPPED_DIM-1,center:center+CROPPED_DIM-1,:);
crops_data(:,:,:,10) = crops_data(end:-1:1, :, :, 5);
|
github
|
rpng/clatt-master
|
isspd.m
|
.m
|
clatt-master/isspd.m
| 717 |
utf_8
|
264b483d9cf3dbe9867b79e3e554a1c4
|
function [t,R] = isspd(Sigma)
%ISPDS Test if a matrix is positive definite symmetric
% T = ISPDS(SIGMA) returns a logical indicating whether the matrix SIGMA is
% square, symmetric, and positive definite, i.e., it is a valid full rank
% covariance matrix.
%
% [T,R] = ISPDS(SIGMA) returns the cholesky factor of SIGMA in R. If SIGMA
% is not square symmetric, ISPDS returns [] in R.
% Test for square, symmetric
%
[nSamples,m] = size(Sigma);
if (nSamples == m) & all(all(abs(Sigma - Sigma') < 10*eps(max(abs(diag(Sigma))))))
% Test for positive definiteness
[R,p] = chol(Sigma);
if p == 0
t = true;
else
t = false;
end
else
R = [];
t = false;
end
|
github
|
rpng/clatt-master
|
rws.m
|
.m
|
clatt-master/rws.m
| 10,484 |
utf_8
|
0102e197e65e4fb724d6fb3ec9d3ea8b
|
%% Real-world simulation
function [v_m,omega_m,v,omega,xR_true,zr,Rr, zl,Rl, xT_true,PHI,Qd,zt,Rt ] = rws(nR,nSteps, nL,xL_true, dt, ...
v_true,omega_true,sigma_v,sigma_w, sigma_r,sigma_th,sigma_p, ...
nT, vt, sigma_a, at, sigma_j,dim_target, ...
max_range,min_range, r_max,omega_max,DORANDOM,SIGPERCENT)
%% robots' starting poses
x0 = zeros(3,nR);
xyinit = 0;
if nR>0, x0(:,1) = [xyinit, xyinit, 0]; end
if nR>1, x0(:,2) = [xyinit, -xyinit, 0]; end
if nR>2, x0(:,3) = [-xyinit, -xyinit, 0]; end
if nR>3, x0(:,4) = [-xyinit, xyinit, 0]; end
%% generate robot odometry
for ell = 1:nR
%true velocities w/ noise at each nSteps
v(ell,:) = v_true(ell,:);
omega(ell,:) = omega_true(ell,:);
for k = 1:nSteps
if k==1
xR_true(:,ell,k) = x0(:,ell);
else
xR_true(1,ell,k) = xR_true(1,ell,k-1)+v(ell,k-1)*dt*cos(xR_true(3,ell,k-1));
xR_true(2,ell,k) = xR_true(2,ell,k-1)+v(ell,k-1)*dt*sin(xR_true(3,ell,k-1));
xR_true(3,ell,k) = pi_to_pi(xR_true(3,ell,k-1)+omega(ell,k-1)*dt);
if DORANDOM
max_change = omega_max*dt;
if norm(xR_true(1:2,ell,k),2)>0.98*r_max
%then turn back to the center:
%the direction towards the center:
phi_c=atan2(-xR_true(2,ell,k),-xR_true(1,ell,k));
%we turn as much as the robot can towards the center:
phi1=atan2(sin(xR_true(3,ell,k-1)),cos(xR_true(3,ell,k-1)));
if abs(phi1-phi_c)<max_change
xR_true(3,ell,k)=phi_c;
else
xR_true(3,ell,k)=xR_true(3,ell,k)-max_change*sign(phi1-phi_c);
%crude handling of a special case...
if ((xR_true(1,ell,k)>0.98*r_max) && abs(phi_c)>pi/10)
xR_true(3,ell,k)=phi_c;
end
end
omega(ell,k-1) = pi_to_pi(xR_true(3,ell,k)-xR_true(3,ell,k-1))/dt;
end
end
end
end %k
%noisy odom measurements
nv = sigma_v*randn(size(v(ell,:)));
v_m(ell,:) = v(ell,:) + nv;
nw = sigma_w*randn(size(omega(ell,:)));
omega_m(ell,:) = omega(ell,:) + nw;
end%ell
%% target kinematic model: driven by continuous-time white noise
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% % **constant velocity model / zero acceleration model**
% % see Bar-Shalom pp.269
if dim_target ==4
Q = zeros(2,2,nT,nSteps); %uncertainty of acceleration
for i = 1:nT
for k = 1:nSteps
Q(:,:,i,k) = sigma_a^2*eye(2); % Q can also used for time-varing case
end
end
Qd = zeros(4,4, nT,nSteps); % The Discrete State Propogate Uncertainty
for i=1:nT
for k = 1:nSteps
Qd(:,:,i,k) = kron( [ 1/3*(dt^3) , 1/2*(dt^2); 1/2*(dt^2) , (dt) ] , Q(:,:,i,k) ); %\int [t;1]'*Q*[t;1] dt
end
end
% Generate the Real State of Target
xT_true = zeros(4,nT,nSteps);
for i = 1:nT
ax = zeros(1,nSteps-1);
ay = zeros(1,nSteps-1);
for k = 1:nSteps-1
a = sqrtm(Q(:,:,k))*randn(2,1) ;
ax(k) = a(1);
ay(k) = a(2);
end
PT_init = 1e0*eye(4); %uncertainty of target's initial state
xT_init_true = [10;-10;-vt;vt]; % initial state of targets
xT_true(:,i,1) = xT_init_true * (-1)^0; % mvnrnd(xT_init_true, PT_init)';
for k=2:nSteps
xT_true(1,i,k) = xT_true(1,i,k-1)+xT_true(3,i,k-1)*(dt) +1/2*ax(k-1)*(dt^2);
xT_true(2,i,k) = xT_true(2,i,k-1)+xT_true(4,i,k-1)*(dt) +1/2*ay(k-1)*(dt^2);
xT_true(3,i,k) = xT_true(3,i,k-1)+ax(k-1)*(dt);
xT_true(4,i,k) = xT_true(4,i,k-1)+ay(k-1)*(dt);
end
end
% The State Transition Matrix
PHI = zeros(4,4,nT,nSteps);
FF = [1 dt; 0 1];
perPhi = kron(FF, eye(2));
for i=1:nT
for k=1:nSteps
PHI(:,:,i,k) = perPhi;
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% % **constant acceleration model**
% % see Bar-Shalom pp.271
elseif dim_target==6
Qk = sigma_j^2*eye(2); % Q can also used for time-varing case
QQ = [ 1/20*dt^5, 1/8*dt^4, 1/6*dt^3 ;
1/8*dt^4, 1/3*dt^3, 1/2*dt^2 ;
1/6*dt^3, 1/2*dt^2, dt ];
Qd = zeros(6,6, nT,nSteps); % The Discrete State Propogate Uncertainty
for i=1:nT
for k = 1:nSteps
Qd(:,:,i,k) = kron(QQ, Qk);
end
end
% Generate the Real State of Target
xT_true = zeros(6,nT,nSteps);
for i = 1:nT
jx = zeros(1,nSteps-1);
jy = zeros(1,nSteps-1);
for k = 1:nSteps-1
jerk = sqrtm(Qk)*randn(2,1) ;
jx(k) = jerk(1);
jy(k) = jerk(2);
end
PT_init = 1e0*eye(6); %uncertainty of target's initial state
xT_init_true = [10;-10;-vt;vt; -at; at]; % initial state of targets
xT_true(:,i,1) = xT_init_true * (-1)^0; % mvnrnd(xT_init_true, PT_init)';
for k=2:nSteps
xT_true(1,i,k) = xT_true(1,i,k-1)+xT_true(3,i,k-1)*(dt) +1/2*xT_true(5,i,k-1)*(dt^2) +1/6*jx(k-1)*dt^3;
xT_true(2,i,k) = xT_true(2,i,k-1)+xT_true(4,i,k-1)*(dt) +1/2*xT_true(6,i,k-1)*(dt^2) +1/6*jy(k-1)*dt^3;
xT_true(3,i,k) = xT_true(3,i,k-1)+xT_true(5,i,k-1)*(dt) +1/2*jx(k-1)*dt^2;
xT_true(4,i,k) = xT_true(4,i,k-1)+xT_true(5,i,k-1)*(dt) +1/2*jy(k-1)*dt^2;
xT_true(5,i,k) = xT_true(5,i,k-1)+jx(k-1)*dt;
xT_true(6,i,k) = xT_true(6,i,k-1)+jy(k-1)*dt;
end
end
% The State Transition Matrix
FF = [1 dt 1/2*dt^2 ; 0 1 dt; 0 0 1];
perPhi = kron(FF, eye(2));
PHI = zeros(6,6,nT,nSteps);
for i=1:nT
for k=1:nSteps
PHI(:,:,i,k) = perPhi;
end
end
end
%% generate measurements
% %Note that only first curr_meas_num nonzeros are actual measuremetns
zr = zeros(nR,3, nR, nSteps); %robot-to-robot
zl = zeros(nR,3, nL, nSteps); %robot-to-landmark
zt = zeros(nR,3, nT, nSteps); %robot-to-target
for ell = 1:nR
for k = 1:nSteps
Rr{ell,k} = [];
Rl{ell,k} = [];
Rt{ell,k} = [];
end
end
for ell = 1:nR
for k = 1:nSteps
%% robot-to-robot measurements
curr_meas_num = 0;
for j = 1:nR
if j==ell, continue, end
% measurement wrt *global* frame
[th,r] = cart2pol(xR_true(1,j,k)-xR_true(1,ell,k), xR_true(2,j,k)-xR_true(2,ell,k));
%measurement wrt robot
th = pi_to_pi(th-xR_true(3,ell,k));
if SIGPERCENT, sigma_r = sigma_p*r; end %sigma_p percentage of range
%use measurement only if landmark is closer than max_range
if r<max_range && r>min_range
curr_meas_num = curr_meas_num+1;
%distance-bearing meausement
Rii = diag([sigma_r^2, sigma_th^2]);
Rr{ell,k} = blkdiag(Rr{ell,k},Rii);
%noise = mvnrnd([0;0],Rii);
r = r + sigma_r*randn; %noise(1);%
th = th + sigma_th*randn; %noise(2);%
%store measurement, and landmark id
zr(ell,1:2,curr_meas_num,k) = [r;th]; %[dx;dy];
zr(ell,3,curr_meas_num,k) = j;
end
end%j=nR
%% robot-to-landmark measurements
curr_meas_num = 0;
for j = 1:nL
% measurement wrt *global* frame
[th,r] = cart2pol(xL_true(1,j)-xR_true(1,ell,k), xL_true(2,j)-xR_true(2,ell,k));
%measurement wrt robot
th = pi_to_pi(th-xR_true(3,ell,k));
if SIGPERCENT, sigma_r = sigma_p*r; end %sigma_p percentage of range
%use measurement only if landmark is closer than max_range
if r<max_range && r>min_range
curr_meas_num = curr_meas_num+1;
%distance-bearing meausement
Rii = diag([sigma_r^2,sigma_th^2]);
Rl{ell,k} = blkdiag(Rl{ell,k},Rii);
%noise = mvnrnd([0;0],Rii);
r = r + sigma_r*randn; %noise(1);%
th = th + sigma_th*randn; %noise(2);%
%store measurement, and landmark id
zl(ell,1:2, curr_meas_num,k) = [r;th];%[dx;dy];%
zl(ell,3, curr_meas_num,k) = j;
end
end%nL
%% robot-to-target measurements
curr_meas_num = 0;
for j = 1:nT
% measurement wrt *global* frame
[th,r] = cart2pol(xT_true(1,j,k)-xR_true(1,ell,k), xT_true(2,j,k)-xR_true(2,ell,k));
%measurement wrt robot
th = pi_to_pi(th-xR_true(3,ell,k));
if SIGPERCENT, sigma_r = sigma_p*r; end %sigma_p percentage of range
if 1 %r<max_range && r>min_range %always measures targets!!!
curr_meas_num = curr_meas_num+1;
%distance-bearing meausement
Rii = diag([sigma_r^2,sigma_th^2]);
Rt{ell,k} = blkdiag(Rt{ell,k},Rii);
%noise = mvnrnd([0;0],Rii);
r = r + sigma_r*randn; %noise(1);%
th = th + sigma_th*randn; %noise(2);%
%store measurement, and landmark id
zt(ell,1:2, curr_meas_num,k) = [r;th];%[dx;dy];%
zt(ell,3, curr_meas_num,k) = j;
end
end%nT
end%nSteps
end%ell=nR
|
github
|
rezazad68/Dynamic-3D-Action-Recognition-on-RGB-D-Videos-master
|
elm_kernel.m
|
.m
|
Dynamic-3D-Action-Recognition-on-RGB-D-Videos-master/elm_kernel.m
| 8,062 |
utf_8
|
5f5cf9aaa5bfe2cd1d8e863c871cbbf9
|
function [TrainingTime, TestingTime, TrainingAccuracy, TestingAccuracy, TY, ConfusMatrix] = elm_kernel(train_data, test_data, Elm_Type, Regularization_coefficient, Kernel_type, Kernel_para)
% Usage: elm(TrainingData_File, TestingData_File, Elm_Type, NumberofHiddenNeurons, ActivationFunction)
% OR: [TrainingTime, TestingTime, TrainingAccuracy, TestingAccuracy] = elm(TrainingData_File, TestingData_File, Elm_Type, NumberofHiddenNeurons, ActivationFunction)
%
% Input:
% TrainingData_File - Filename of training data set
% TestingData_File - Filename of testing data set
% Elm_Type - 0 for regression; 1 for (both binary and multi-classes) classification
% Regularization_coefficient - Regularization coefficient C
% Kernel_type - Type of Kernels:
% 'RBF_kernel' for RBF Kernel
% 'lin_kernel' for Linear Kernel
% 'poly_kernel' for Polynomial Kernel
% 'wav_kernel' for Wavelet Kernel
%Kernel_para - A number or vector of Kernel Parameters. eg. 1, [0.1,10]...
% Output:
% TrainingTime - Time (seconds) spent on training ELM
% TestingTime - Time (seconds) spent on predicting ALL testing data
% TrainingAccuracy - Training accuracy:
% RMSE for regression or correct classification rate for classification
% TestingAccuracy - Testing accuracy:
% RMSE for regression or correct classification rate for classification
%
% MULTI-CLASSE CLASSIFICATION: NUMBER OF OUTPUT NEURONS WILL BE AUTOMATICALLY SET EQUAL TO NUMBER OF CLASSES
% FOR EXAMPLE, if there are 7 classes in all, there will have 7 output
% neurons; neuron 5 has the highest output means input belongs to 5-th class
%
% Sample1 regression: [TrainingTime, TestingTime, TrainingAccuracy, TestingAccuracy] = elm_kernel('sinc_train', 'sinc_test', 0, 1, ''RBF_kernel',100)
% Sample2 classification: elm_kernel('diabetes_train', 'diabetes_test', 1, 1, 'RBF_kernel',100)
%
%%%% Authors: MR HONG-MING ZHOU AND DR GUANG-BIN HUANG
%%%% NANYANG TECHNOLOGICAL UNIVERSITY, SINGAPORE
%%%% EMAIL: [email protected]; [email protected]
%%%% WEBSITE: http://www.ntu.edu.sg/eee/icis/cv/egbhuang.htm
%%%% DATE: MARCH 2012
%%%%%%%%%%% Macro definition
REGRESSION=0;
CLASSIFIER=1;
%%%%%%%%%%% Load training dataset
%train_data=load(TrainingData_File);
T=train_data(:,1)';
P=train_data(:,2:size(train_data,2))';
clear train_data; % Release raw training data array
%%%%%%%%%%% Load testing dataset
%test_data=load(TestingData_File);
TV.T=test_data(:,1)';
TV.P=test_data(:,2:size(test_data,2))';
clear test_data; % Release raw testing data array
ConfusMatrix = zeros(max(TV.T),max(TV.T));
C = Regularization_coefficient;
NumberofTrainingData=size(P,2);
NumberofTestingData=size(TV.P,2);
if Elm_Type~=REGRESSION
%%%%%%%%%%%% Preprocessing the data of classification
sorted_target=sort(cat(2,T,TV.T),2);
label=zeros(1,1); % Find and save in 'label' class label from training and testing data sets
label(1,1)=sorted_target(1,1);
j=1;
for i = 2:(NumberofTrainingData+NumberofTestingData)
if sorted_target(1,i) ~= label(1,j)
j=j+1;
label(1,j) = sorted_target(1,i);
end
end
number_class=j;
NumberofOutputNeurons=number_class;
%%%%%%%%%% Processing the targets of training
temp_T=zeros(NumberofOutputNeurons, NumberofTrainingData);
for i = 1:NumberofTrainingData
for j = 1:number_class
if label(1,j) == T(1,i)
break;
end
end
temp_T(j,i)=1;
end
T=temp_T*2-1;
%%%%%%%%%% Processing the targets of testing
temp_TV_T=zeros(NumberofOutputNeurons, NumberofTestingData);
for i = 1:NumberofTestingData
for j = 1:number_class
if label(1,j) == TV.T(1,i)
break;
end
end
temp_TV_T(j,i)=1;
end
TV.T=temp_TV_T*2-1;
% end if of Elm_Type
end
%%%%%%%%%%% Training Phase %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
tic;
n = size(T,2);
Omega_train = kernel_matrix(P',Kernel_type, Kernel_para);
OutputWeight=((Omega_train+speye(n)/C)\(T'));
TrainingTime=toc;
%%%%%%%%%%% Calculate the training output
Y=(Omega_train * OutputWeight)'; % Y: the actual output of the training data
%%%%%%%%%%% Calculate the output of testing input
tic;
Omega_test = kernel_matrix(P',Kernel_type, Kernel_para,TV.P');
TY=(Omega_test' * OutputWeight)'; % TY: the actual output of the testing data
TestingTime=toc;
%%%%%%%%%% Calculate training & testing classification accuracy
if Elm_Type == REGRESSION
%%%%%%%%%% Calculate training & testing accuracy (RMSE) for regression case
TrainingAccuracy=sqrt(mse(T - Y));
TestingAccuracy=sqrt(mse(TV.T - TY));
end
if Elm_Type == CLASSIFIER
%%%%%%%%%% Calculate training & testing classification accuracy
MissClassificationRate_Training=0;
MissClassificationRate_Testing=0;
for i = 1 : size(T, 2)
[x, label_index_expected]=max(T(:,i));
[x, label_index_actual]=max(Y(:,i));
if label_index_actual~=label_index_expected
MissClassificationRate_Training=MissClassificationRate_Training+1;
end
end
TrainingAccuracy=1-MissClassificationRate_Training/size(T,2);
for i = 1 : size(TV.T, 2)
[x, label_index_expected]=max(TV.T(:,i));
[x, label_index_actual]=max(TY(:,i));
if label_index_actual~=label_index_expected
MissClassificationRate_Testing=MissClassificationRate_Testing+1;
end
ConfusMatrix(label_index_actual,label_index_expected) = ...
ConfusMatrix(label_index_actual,label_index_expected) + 1;
end
TestingAccuracy=1-MissClassificationRate_Testing/size(TV.T,2);
end
%%%%%%%%%%%%%%%%%% Kernel Matrix %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function omega = kernel_matrix(Xtrain,kernel_type, kernel_pars,Xt)
nb_data = size(Xtrain,1);
if strcmp(kernel_type,'RBF_kernel'),
if nargin<4,
XXh = sum(Xtrain.^2,2)*ones(1,nb_data);
omega = XXh+XXh'-2*(Xtrain*Xtrain');
omega = exp(-omega./kernel_pars(1));
else
XXh1 = sum(Xtrain.^2,2)*ones(1,size(Xt,1));
XXh2 = sum(Xt.^2,2)*ones(1,nb_data);
omega = XXh1+XXh2' - 2*Xtrain*Xt';
omega = exp(-omega./kernel_pars(1));
end
elseif strcmp(kernel_type,'lin_kernel')
if nargin<4,
omega = Xtrain*Xtrain';
else
omega = Xtrain*Xt';
end
elseif strcmp(kernel_type,'poly_kernel')
if nargin<4,
omega = (Xtrain*Xtrain'+kernel_pars(1)).^kernel_pars(2);
else
omega = (Xtrain*Xt'+kernel_pars(1)).^kernel_pars(2);
end
elseif strcmp(kernel_type,'wav_kernel')
if nargin<4,
XXh = sum(Xtrain.^2,2)*ones(1,nb_data);
omega = XXh+XXh'-2*(Xtrain*Xtrain');
XXh1 = sum(Xtrain,2)*ones(1,nb_data);
omega1 = XXh1-XXh1';
omega = cos(kernel_pars(3)*omega1./kernel_pars(2)).*exp(-omega./kernel_pars(1));
else
XXh1 = sum(Xtrain.^2,2)*ones(1,size(Xt,1));
XXh2 = sum(Xt.^2,2)*ones(1,nb_data);
omega = XXh1+XXh2' - 2*(Xtrain*Xt');
XXh11 = sum(Xtrain,2)*ones(1,size(Xt,1));
XXh22 = sum(Xt,2)*ones(1,nb_data);
omega1 = XXh11-XXh22';
omega = cos(kernel_pars(3)*omega1./kernel_pars(2)).*exp(-omega./kernel_pars(1));
end
end
|
github
|
luciana-marques/orps-master
|
DataGeneration.m
|
.m
|
orps-master/DataGeneration.m
| 22,194 |
utf_8
|
f81037670752ee4a6c4925a030e28ca6
|
%=========================================================================
% Load Scheduling Problem - Data Generation
% Based on: Vasirani and Ossowski (2012) and Mohsenian-Rad et al. (2010)
% Institution: Federal University of Minas Gerais (UFMG)
% Department: Graduate Program in Electrical Engineering
% Author: Luciana Sant'Ana Marques
% Date: Feb 20th, 2018 at 16:42
%=========================================================================
function [countLoads, loads, w, pi, pc, b, S, alpha, beta, gamma] = ...
DataGeneration(N, last, delta, seed)
% Input:
% N: number of consumers
% last: time horizon
% delta: size of slot in hours
% seed: if -1, no seed;
% Action:
% Generate a random instance based on Vasirani and Ossowski (2012)
% Output:
% countLoads: (1 x N) vector of how many shiftable loads each consumer
% has
% loads: (1 x countLoads) structure with all data needed to solve
% the scheduling problem
% w: (N x last) vector with base load of each consumer
% pi: (1 x last) vector with prices (TOU tariffs) - for PP and IBR
% pc: peak price - for PP
% b: (1 x last) vector with sencod level of tariffs - for IBR
% alpha: (1 x last) vector with quadrastic costs - for QC
% beta: (1 x last) vector with linear costs - for QC
% gamma: (1 x last) vector with fixed costs - for QC
% Initialize seed (if any)
if seed == -1
rng('shuffle')
else
rng('default');
rng(seed);
end
% Sets of loads
% ---------------------------------
% All of them are presented in the article. Not every consumer
% have all of them. This will be decided by probability
% Types of shiftable interruptible loads
IS = {'AC' 'EVN' 'EVM'};
% Types of shiftable uninterruptible loads
AS = {'WASHING' 'DRYER' 'DISHWASHER'};
% Types of shiftable loads
S = [AS,IS];
% Types of basic (non-shiftable) loads
NS = {'WATER' 'LIGHTING' 'KITCHEN' 'FRIDGE' 'FREEZER' 'OVEN'...
'MICROWAVE' 'TV' 'DESKTOP' 'LAPTOT' 'HEAT1' 'HEAT2' 'HEAT3'};
% ----
% Price of energy/capacity in the day-ahead market
% ---------------------------------
% We consider an hypothetical case (need to improve it after). We define
% a base price and 6 periods in a day, each of them having one base price
% multiplier
basePrice = .16;
priceMultiplier = [1 1.1 1.3 1.2 2 1.2];
hourInterval = [0 6 11 13 17 22 24];
% Calculate price considering the time slot size
pi = zeros(1,last);
j = 1;
for i = 1:last
% Calculate hour
aux = i*delta;
% Calculate price
pi(1,i) = basePrice*priceMultiplier(1,j);
% Verify if it needs to change period
if (aux >= hourInterval(1,j+1))
j = j+1;
end
end
% Capacity price (slightly bigger than the article)
pc = 0.07;
% Inclining block rate price
ibrMultiplier = 1.5;
b = ibrMultiplier*pi;
% Quadratic costs (based on Mohsenian-Rad et al. (2010))
% alpha_t*(P_t)^2 + beta_t*P_t + gamma_t
% where gamma_t = 0 for simplicity
qcMultiplier = 1/80;
alpha = qcMultiplier*pi;
beta = pi;
gamma = zeros(1,last);
% ----
% Base Load
% ---------------------------------
% Calculate the load for each equipment and consumer and aggregate them
% Based entirely on article
% Total base load
w = zeros(N,last);
% Base load of each consumer and equipment
auxW = zeros(N,size(NS,2),last);
% Probability of having each equipment
prob = [0.2 1 0.53 1 0.23 0.77 0.9 1 0.52 0.41 0.41 0.41 0.41];
% Equipment power
power = [1.2 .176 .0165 .08 .07 1.2 1.3 .01 .1 .02 .02 .02 .02];
% Shiftable loads
% ---------------------------------
% Calculate the preferred times and durations for each equipment and
% consumer
% Based entirely on article
% Probability of having each equipment
probS = [.93 .28 .53 .49 .1];
% Starting time of loads
ts = zeros(N,size(S,2));
% Ending time of loads
tf = zeros(N,size(S,2));
% Duration of loads
d = zeros(N,size(S,2));
% Power rate of loads
L = zeros(N,size(S,2));
% Equipment power
powerS = [.19 1.24 .66 1 1.5 1.92];
% Structure of loads
loads = struct('n',{},'type',{},'alpha',{},'beta',{},'power',{},...
'duration',{},'isUn',{},'solution',{});
% Count how many loads each consumer have
countLoads = zeros(1,N);
% Counter
count = 0;
% For each consumer
for n = 1:N
% Electric Water Heater
i = 1;
p = rand;
% If consumer has the equipment
if (p <= prob(i))
% Initial time slot
j = randi([1 6]);
while (j <= last)
% Until 18 hrs, the water heater is turned on for
% 30 min every 2 hrs
while (j/6 < 18)
auxW(n,i,j) = power(1,i);
auxW(n,i,j+1) = power(1,i);
auxW(n,i,j+2) = power(1,i);
j = j+12;
end
% After 18 hrs, the water heater is turned on for
% 30 min every 1 hr
while (j/6 <= 24)
auxW(n,i,j) = power(1,i);
if ((j+1)/6 <= 24)
auxW(n,i,j+1) = power(1,i);
end
if ((j+2)/6 <= 24)
auxW(n,i,j+2) = power(1,i);
end
j = j+6;
end
end
end % Otherwise consumer n doen't have water heater (zeros)
% Lighting
i = 2;
p = rand;
% If consumer has the equipment
if (p <= prob(i))
% Average from spanish document http://www.ree.es/sites/default/files/downloadable/atlas_indel_ree.pdf
% p. 64: lighting represents 32% of power load at 22hrs -> 0,176 kW
mu = power(1,i);
% Variance equals 5% of average
sigma = sqrt(mu*.05);
% Consumption is normally ditributed in every time slot
aux = max(normrnd(mu,sigma,1,last),0);
auxW(n,i,:) = aux;
end % Otherwise consumer n doen't have lighting (zeros)
% Kitchen
i = 3;
p = rand;
% If consumer has the equipment
if (p <= prob(i))
% Average from spanish document
% http://www.ree.es/sites/default/files/downloadable/atlas_indel_ree.pdf
% p. 64: lighting represents 32% of power load at 22hrs -> 0,176 kW
mu = power(1,i);
% Variance equals 5% of average
sigma = sqrt(mu*.05);
% Consumption is normally ditributed in every time slot
% Until 6 am consumer doesn't use kitchen
aux = max(normrnd(mu,sigma,1,last-1/delta*6),0);
auxW(n,i,1/delta*6+1:last) = aux;
end % Otherwise consumer n doen't have kitchen (zeros)
% Fridge
i = 4;
p = rand;
% If consumer has the equipment
if (p <= prob(i))
aux = repmat(power(1,i),1,last);
auxW(n,i,:) = aux;
end % Otherwise consumer n doen't have fridge (zeros)
% Freezer
i = 5;
p = rand;
% If consumer has the equipment
if (p <= prob(i))
aux = repmat(power(1,i),1,last);
auxW(n,i,:) = aux;
end % Otherwise consumer n doen't have freezer (zeros)
% Oven
i = 6;
p = rand;
% If consumer has the equipment
if (p <= prob(i))
% Oven is used 2 times a week; so probability of using it at
% a day is 2/7
% In a day
p1 = rand;
if (p1 <= 2/7)
% At lunch time is 0.8
p2 = rand;
if (p2 <= .8)
% Run between 20 min and 1hr - number of times slots
slots = randi([20/(delta*60) 60/(delta*60)]);
% Start time slot: 14 +- 1hr
startSlot = randi([13*(1/delta) 15*(1/delta)]);
% Power consumption
for j = startSlot:startSlot+slots-1
auxW(n,i,j) = power(1,i);
end
end
% At dinner time is 0.2
p2 = rand;
if (p2 <= .2)
% Run between 20 min and 1hr - number of times slots
slots = randi([20/(delta*60) 60/(delta*60)]);
% Start time slot: 21 +- 1hr
startSlot = randi([20*(1/delta) 22*(1/delta)]);
% Power consumption
for j = startSlot:startSlot+slots-1
auxW(n,i,j) = power(1,i);
end
end
end % Otherwise not used at this day
end % Otherwise consumer n doen't have oven (zeros)
% Microwave
i = 7;
p = rand;
% If consumer has the equipment
if (p <= prob(i))
% Possible time intervals they use it (for 10 minutes)
startSlot = [randi([8*(1/delta) 10*(1/delta)]),
randi([10*(1/delta) 12*(1/delta)]),
randi([14*(1/delta) 16*(1/delta)]),
randi([19*(1/delta) 21*(1/delta)])];
probMicrowave = [.12 .2 .25 .43];
for j = 1:4
p1 = rand;
if (p1 <= probMicrowave(j))
auxW(n,i,startSlot(j)) = power(1,i);
end
end
end % Otherwise consumer n doen't have microwave (zeros)
% TV, Desktop and computer
for i = 8:10
p = rand;
% If consumer has the equipment
if (p <= prob(i))
% It is used twice a day: 14 +- 1hr and 20 +- 1hrs
startSlot = [randi([13*(1/delta) 14*(1/delta)]),
randi([19*(1/delta) 21*(1/delta)])];
% Duration between 1 and 3 hrs
duration = [randi([1*(1/delta) 3*(1/delta)]),
randi([1*(1/delta) 3*(1/delta)])];
% Power consumption
for j = startSlot(1):startSlot(1)+duration(1)-1
auxW(n,i,j) = power(1,i);
end
% Power consumption
for j = startSlot(2):startSlot(2)+duration(2)-1
auxW(n,i,j) = power(1,i);
end
end % Otherwise consumer n doen't have TV or desktop or laptop (zeros)
end % TV, desktop and computer
% Heating
% must be on for 10 min in every hour between 8am and 8pm
% must be on for 30 min in every hour between 8pm and 11pm
% consumers can have between 1 and 3
% Position of load heat in vector of all loads
% Consumers can have 3
for i = 11:13
p = rand;
% If consumer has the equipment
if (p <= prob(i))
% For each hour
for tau = 8:22
% Starting time
startSlot = randi([1/delta*tau+1 1/delta*tau+1/delta]);
% Duration
if (tau <= 20)
slots = 1;
else
slots = 3;
end
% Power consumption
for j = startSlot:startSlot+slots-1
auxW(n,i,j) = power(1,i);
end
end % all heat loads calculated
end % Otherwise consumer n doen't heating (zeros)
end % Count for 3 heatings
% Uninterruptible loads
% Washing machine
% Used 3 times a week, at morning (11+-1hr to 15+-1hr)
% with probS 78% or at night (19+-1hr to 23+-1hr).
% Durantion draw from ~U[1,2] hours
i = 1;
p = rand;
% If consumer has the equipment
if (p <= probS(i))
% Used 3 times a week
p1 = rand;
if (p1 <= 3/7)
% Count loads to be shcedule and of consumer n
count = count + 1;
countLoads(1,n) = countLoads(1,n) + 1;
% At morning or at nigth
p2 = rand;
if (p2 <= .78) % at morning
% Starting time
js = randi([10*1/delta 12*1/delta]);
% Ending time
jf = randi([14*1/delta 16*1/delta]);
else % at night
% Starting time
js = randi([18*1/delta 20*1/delta]);
% Ending time
jf = randi([22*1/delta 24*1/delta]);
end
% Consumer
loads(count).n = n;
% Type
loads(count).type = 'WASHING';
% Starting time
loads(count).alpha = js;
ts(n,i) = js;
% Ending time
loads(count).beta = jf;
tf(n,i) = jf;
% Duration (1 to 2 hours)
duration = randi([1*1/delta 2*1/delta]);
loads(count).duration = duration;
d(n,i) = duration;
% Load
loads(count).power = powerS(i);
L(n,i) = powerS(i);
% It is uninterruptible?
loads(count).isUn = true;
% Initialize solution vector
loads(count).solution = zeros(1,jf-js+1);
end % Otherwise not used this day
end % Otherwise consumer n doesn't have washing machine (zeros)
% Dryer
% Used 3 times a week, between 17+-1hr and 21+-1hr
% Duration between 1 and 1.5 hours
i = 2;
p = rand;
% If consumer has the equipment
if (p <= probS(i))
% Used 3 times a week
p1 = rand;
if (p1 <= 3/7)
% Count loads to be shcedule and of consumer n
count = count + 1;
countLoads(1,n) = countLoads(1,n) + 1;
% Consumer
loads(count).n = n;
% Type
loads(count).type = 'DRYER';
% Starting time
js = randi([16*1/delta 18*1/delta]);
loads(count).alpha = js;
ts(n,i) = js;
% Ending time
jf = randi([20*1/delta 22*1/delta]);
loads(count).beta = jf;
tf(n,i) = jf;
% Duration (1 to 2 hours)
duration = randi([1*1/delta 1.5*1/delta]);
loads(count).duration = duration;
d(n,i) = duration;
% Load
loads(count).power = powerS(i);
L(n,i) = powerS(i);
% It is uninterruptible?
loads(count).isUn = true;
% Initialize solution vector
loads(count).solution = zeros(1,jf-js+1);
end % Otherwise not used this day
end % Otherwise consumer n doesn't have dryer (zeros)
% Dishwasher
% Used 4 times a week, starting at 15+-1hr (50%) or at 19+-1hr
% Finishing at 19+-1hr or 23+-1hr
% Duration between 1 and 2 hours
i = 3;
p = rand;
% If consumer has the equipment
if (p <= probS(i))
% Used 4 times a week
p1 = rand;
if (p1 <= 4/7)
% Count loads to be shcedule and of consumer n
count = count + 1;
countLoads(1,n) = countLoads(1,n) + 1;
% At morning or at afternoon
p2 = rand;
if (p2 <= .50) % at afternoon
% Starting time
js = randi([14*1/delta 16*1/delta]);
% Ending time
jf = randi([18*1/delta 20*1/delta]);
else % at night
% Starting time
js = randi([18*1/delta 20*1/delta]);
% Ending time
jf = randi([22*1/delta 24*1/delta]);
end
% Consumer
loads(count).n = n;
% Type
loads(count).type = 'DISHWASHER';
% Starting time
loads(count).alpha = js;
ts(n,i) = js;
% Ending time
loads(count).beta = jf;
tf(n,i) = jf;
% Duration (1 to 2 hours)
duration = randi([1*1/delta 2*1/delta]);
loads(count).duration = duration;
d(n,i) = duration;
% Load
loads(count).power = powerS(i);
L(n,i) = powerS(i);
% It is uninterruptible?
loads(count).isUn = true;
% Initialize solution vector
loads(count).solution = zeros(1,jf-js+1);
end % Otherwise not used this day
end % Otherwise consumer n doesn't have dishwasher (zeros)
% Interruptible loads
% AC
% must be on between 1pm and 6 pm for a duration
% depending on the amount of energy consumed
i = 4;
p = rand;
% If consumer has the equipment
if (p <= probS(i))
% Count loads to be shcedule and of consumer n
count = count + 1;
countLoads(1,n) = countLoads(1,n) + 1;
% Consumer
loads(count).n = n;
% Type
loads(count).type = 'AC';
% Starting time
js = 1/delta*13 + 1;
loads(count).alpha = js;
ts(n,i) = js;
% Ending time
jf = 1/delta*18 + 1/delta;
loads(count).beta = jf;
tf(n,i) = jf;
% Duration
% Amount of energy for AC (from 1.6 to 5.6 kWh uniformaly
% distributed)
energy = 1.6+(5.6-1.6).*rand;
duration = round(energy/(powerS(i)*delta));
loads(count).duration = duration;
d(n,i) = duration;
% Power
loads(count).power = powerS(i);
L(n,i) = powerS(i);
% It is uninterruptible?
loads(count).isUn = false;
% Initialize solution vector
loads(count).solution = zeros(1,jf-js+1);
end % Otherwise consumer n doesn't have AC (zeros)
% EV
% Owner arrives at 19+-1hr and has to get out to work at 8+-1hr
% So 2 shiftable loads
i = 5;
p = rand;
% If consumer has the equipment
if (p <= probS(i))
% Batterie size (Nissan Leaf) = 24kWh
bat = 24;
% SOC of batterie when owner arrives
soc = .3+(.8-.3).*rand;
% For night period (when owner arrives)
% Count loads to be shcedule and of consumer n
count = count + 1;
countLoads(1,n) = countLoads(1,n) + 1;
% Consumer
loads(count).n = n;
% Type
loads(count).type = 'EV';
% Starting time
js = randi([18*1/delta 20*1/delta]);
loads(count).alpha = js;
ts(n,i) = js;
% Ending time
jf = last;
loads(count).beta = jf;
tf(n,i) = jf;
% k1 for duration calculation
k1 = jf - js + 1;
% Power
loads(count).power = powerS(i);
L(n,i) = powerS(i);
% It is uninterruptible?
loads(count).isUn = false;
% Initialize solution vector
loads(count).solution = zeros(1,jf-js+1);
% For morning period (before owner has to go)
% Count loads to be shcedule and of consumer n
count = count + 1;
countLoads(1,n) = countLoads(1,n) + 1;
% Consumer
loads(count).n = n;
% Type
loads(count).type = 'EV';
% Starting time
js = 1;
loads(count).alpha = js;
ts(n,i+1) = js;
% Ending time
jf = randi([7*1/delta 9*1/delta]);
loads(count).beta = jf;
tf(n,i+1) = jf;
% Duration
% k2 for duration calculation
k2 = jf;
energy = bat*(1-soc);
e1 = energy*k1/(k1+k2); %night charge
e2 = energy*k2/(k1+k2); %morning charge
duration = round(e1/(powerS(i)*delta)); %night duration
loads(count-1).duration = duration;
d(n,i) = duration;
duration = round(e2/(powerS(i)*delta)); %morning duration
loads(count).duration = duration;
d(n,i+1) = duration;
% Power
loads(count).power = powerS(i);
L(n,i+1) = powerS(i);
% It is uninterruptible?
loads(count).isUn = false;
% Initialize solution vector
loads(count).solution = zeros(1,jf-js+1);
end % Otherwise consumer n doen't have EV (zeros)
end % all consumers analyzed
w(:,:) = sum(auxW,2);
end
|
github
|
luciana-marques/orps-master
|
LocalSearch.m
|
.m
|
orps-master/LocalSearch.m
| 3,859 |
utf_8
|
a4994e40245585afd181436240ec5a32
|
%===============================================================
% Load Scheduling Problem - Local Search
% Institution: Federal University of Minas Gerais (UFMG)
% Department: Graduate Program in Electrical Engineering
% Course: Network Optimization
% Author: Luciana Sant'Ana Marques
% Date: Feb 23th, 2018 at 16:30
%===============================================================
function [loads, fbest, loadCurve] = LocalSearch(last, delta, loads, ...
pi, pc, ro, R, isPP, totalCost, loadCurve, timeLim)
% Input:
%
% Action:
%
% Output:
%
% Best cost
fbest = totalCost;
nLoads = size(loads,2);
a = 1;
stop = 0;
tic
while (stop ~= 1) && (a <= nLoads)
sol = loads(a).solution;
auxSol = sol;
% Check which type of appliance
% If interruptible load
if loads(a).isUn == 0
for i = 1:size(sol,2)
for j = (i+1):size(sol,2)
sol = auxSol;
if sol(i) ~= sol(j)
% Take the solution vector
aux = sol(i);
sol(i) = sol(j);
sol(j) = aux;
auxLoads = loads;
auxLoads(a).solution = sol;
[auxCost, auxTotalLoad] = UpdateCost(last, delta, auxLoads(a), ...
loads(a), pi, pc, ro, R, isPP, ...
loadCurve);
if auxCost <= fbest
fbest = auxCost;
loads(a).solution = sol;
loadCurve = auxTotalLoad;
end
end
end
end
% If uninterruptible load
else
d = loads(a).duration;
for i = 1:size(sol,2)
if sol(i) == 1
break
end
end
posIn = i;
posFin = i+d-1;
% Go left
for i = 1:(posIn-1)
% Take the solution vector
sol(posIn-i) = 1;
sol(posFin-i+1) = 0;
auxLoads = loads;
auxLoads(a).solution = sol;
[auxCost, auxTotalLoad] = UpdateCost(last, delta, auxLoads(a), ...
loads(a), pi, pc, ro, R, isPP, ...
loadCurve);
if auxCost <= fbest
fbest = auxCost;
loads(a).solution = sol;
loadCurve = auxTotalLoad;
end
end
% Size of soluction vector
auxSize = size(sol,2);
% Go right
sol = auxSol;
for i = (posFin+1):auxSize
sol(i) = 1;
sol(i-d) = 0;
auxLoads = loads;
auxLoads(a).solution = sol;
[auxCost, auxTotalLoad] = UpdateCost(last, delta, auxLoads(a), ...
loads(a), pi, pc, ro, R, isPP, ...
loadCurve);
if auxCost <= fbest
fbest = auxCost;
loads(a).solution = sol;
loadCurve = auxTotalLoad;
end
end
end
% Calculate time
time = toc;
% If time limit reached, stop
if (time > timeLim)
stop = 1;
break
end
a = a + 1;
end
end
|
github
|
luciana-marques/orps-master
|
SimulatedAnnealing.m
|
.m
|
orps-master/SimulatedAnnealing.m
| 5,346 |
utf_8
|
144c6ee658cf75cc304607c331e11b8e
|
%===============================================================
% Simulated Annealing for Scheduling Problem
% Institution: Federal University of Minas Gerais (UFMG)
% Department: Graduate Program in Electrical Engineering
% Author: Luciana Sant'Ana Marques
% Date: Feb 23th, 2018 at 16:28
%===============================================================
function [optimal_load, optimal_cost, costs, optimal_load_curve] = SimulatedAnnealing(t0, alpha,...
Mk, last, delta, loads, pi, pc, ro, R, isPP,...
totalCost, wLocal, localAfterSA, loadCurve, timeLim)
% Input:
% loadCurve: matrix of appliances of n consumers
% costShift: cost of shiftables
% t0: initial temperature
% alpha: parameter for decreasing the temperature
% Mk: number of repetition in each temperature
% Action:
% Construct a solution using Simulated Annealing to
% minimize cost.
% Shake loads >> Calculate de current cost/ Compare/ Keep the min Cost or
% by randon probability. Keep the best cost in "optimal_cost".
% Output:
% optimal_load: Last loadCurve related to optimal_cost ?
% totalCost: = optimal_cost of the solution
% SA Parameters
k = 1; % annealing parameter
delta2 = 10^(-10);
K = round((log(delta2)-log(t0))/log(alpha)); % number of temperatures
stop = 0; % flag for stop criteria
temperature = t0;
% Starting with the Previous Cost
previous_cost = totalCost;
optimal_cost = totalCost; % Keep the best solution
current_cost = totalCost;
% Loads struct
previous_load = loads;
optimal_load = loads;
% Load curve
previous_load_curve = loadCurve;
optimal_load_curve = loadCurve;
% Counter of iterations without solution improvement
iterations = 0;
% Number of Temperatures without improvement
numIt = 5;
% Test Vector
costs = zeros(3,K*Mk);
% Iterations counter
i = 1;
% Time limit
tic
% None of the stoping criteria and k <= K
while (stop ~= 1) && (k <= K)
m = 1;
iterations = iterations + 1;
while m <= Mk
% Guarda resultados
costs(1,i) = current_cost;
costs(2,i) = previous_cost;
costs(3,i) = optimal_cost;
i = i + 1;
% Shake the solution
[current_load, appIndex] = Neighborhood(previous_load,size(previous_load,2));
% Calculate the current cost:
[current_cost,current_load_curve] = UpdateCost(last, delta, current_load(appIndex), ...
previous_load(appIndex), pi, pc, ro, R, isPP, ...
previous_load_curve);
% Comparision to keep the best solution and matrix loadCurve:
if current_cost <= optimal_cost
optimal_cost = current_cost;
optimal_load = current_load;
optimal_load_curve = current_load_curve;
% If SA with local search
if wLocal
% Perform local search to try to improve solution
[localSloads, localScost, localLoad_curve] = LocalSearch(last, delta, current_load, ...
pi, pc, ro, R, isPP, current_cost, current_load_curve, timeLim);
% If Local Search returns a strictly better solution,
% get it as optimal solution
if localScost < current_cost
optimal_cost = localScost;
optimal_load = localSloads;
optimal_load_curve = localLoad_curve;
end
end
end
% Calculate the difference
diff = current_cost - previous_cost;
if diff <= 0
previous_load = current_load;
previous_cost = current_cost;
previous_load_curve = current_load_curve;
% If solution was improved, iterations = 0.
if diff < 0
iterations = 0;
end
else
if rand(1) < exp(-diff/(temperature))
previous_load = current_load;
previous_cost = current_cost;
previous_load_curve = current_load_curve;
end
end
% Calculate time
time = toc;
% If solution is not improved until "numIt" iterations, stop algorithm
if (iterations > numIt) || (time > timeLim)
stop = 1;
break
end
m = m + 1;
end
% Uptade parameter k and next temperature
k = k + 1;
temperature = alpha*temperature;
end
if (stop ~= 1) && (localAfterSA == true)
[optimal_load, optimal_cost, optimal_load_curve] = LocalSearch(last, delta, optimal_load, ...
pi, pc, ro, R, isPP, optimal_cost, optimal_load_curve, timeLim);
end
end
|
github
|
luciana-marques/orps-master
|
InstanceGeneration.m
|
.m
|
orps-master/InstanceGeneration.m
| 1,439 |
utf_8
|
0d71f8b8798c208034da7601cb1bb7b7
|
%===============================================================
% Load Scheduling Problem - Instance Generation
% Institution: Federal University of Minas Gerais (UFMG)
% Department: Graduate Program in Electrical Engineering
% Author: Luciana Sant'Ana Marques
% Date: Feb 20th, 2018 at 16:33
%===============================================================
function instance = InstanceGeneration(N, last, delta, seed)
% Input:
% N: vector with number of consumers
% last: time horizon
% delta: size of slot in hours
% seed: if -1, no seed;
% Action:
% Generate a set of random instances (.dat and struct)
% Output:
% instance: struct with the data
instance = struct('count',{},'loads',{},'w',{},'pi',{},'pc',{},...
'b',{},'alpha', {}, 'beta', {}, 'gamma', {}, ...
'S',{}, 'totalLoad', {});
for i = 1:size(N,2)
[countLoads, loadsOr, w, pi, pc, b, S, alpha, beta, gamma] = ...
DataGeneration(N(1,i),last, delta, seed);
instance(i).count = countLoads;
instance(i).loads = loadsOr;
instance(i).w = w;
instance(i).pi = pi;
instance(i).pc = pc;
instance(i).b = b;
instance(i).alpha = alpha;
instance(i).beta = beta;
instance(i).gamma = gamma;
instance(i).S = S;
instance(i).totalLoad = zeros(1,last);
end
end
|
github
|
luciana-marques/orps-master
|
UpdateCost.m
|
.m
|
orps-master/UpdateCost.m
| 2,422 |
utf_8
|
fbea553a182a9a10e872c40f7a1b2aec
|
%===============================================================
% Calculate Total Cost of a Solution Considering only the
% Modified load
% Institution: Federal University of Minas Gerais (UFMG)
% Department: Graduate Program in Electrical Engineering
% Author: Luciana Sant'Ana Marques
% Date: Feb 23th, 2018 at 16:28
%===============================================================
function [totalCost,totalLoad] = UpdateCost(last, delta, loads, ...
previous_Load, pi, pc, ro, R, isPP, ...
previousTL)
% Input:
% last: number of time slots
% delta: size of time slot
% loads: structure of solution with loads information
% w: (1 x last) vector with base load of all consumers
% pi: (1 x last) vector with prices (R$/kWh)
% pc: peak charge
% ro: (1 x last) price multiplier for IBR
% R: (1 x last) load threshold
% isPP: true if peak pricing and false if inclining block rates
% Action:
% Calculates total cost of solution
% Output:
% totalCost: total cost of solution
% totalLoad
% Previous solution vector
pSol = previous_Load.solution;
% New solution vector
nSol = loads.solution;
% Previous total load
totalLoad = previousTL;
% Auxiliary variables
auxAlpha = loads.alpha;
auxBeta = loads.beta;
auxPower = loads.power;
% Subtract previous load of appliance changed
totalLoad(auxAlpha:auxBeta) = totalLoad(auxAlpha:auxBeta) - ...
pSol*auxPower;
% Add new solution
totalLoad(auxAlpha:auxBeta) = totalLoad(auxAlpha:auxBeta) + ...
nSol*auxPower;
% Calculate cost if peak pricing
if isPP
% Find peak
nPeak = max(totalLoad);
% Total cost (- previous peak + new peak)
totalCost = delta*totalLoad*pi' + nPeak*pc;
% Calculate cost if IBR
else
totalCost = 0;
% For each time slot verify if load exceeds threshold R
for i = 1:last
if totalLoad(i) <= R(i)
totalCost = totalCost + totalLoad(i)*pi(i);
else
totalCost = totalCost + R(i)*pi(i) + ...
(totalLoad(i)-R(i))*pi(i)*ro(i);
end
end
end
end
|
github
|
luciana-marques/orps-master
|
TotalCostF.m
|
.m
|
orps-master/TotalCostF.m
| 2,162 |
utf_8
|
e552444b4471acef52f53422169cc3eb
|
%===============================================================
% Calculate Total Cost of a Solution
% Institution: Federal University of Minas Gerais (UFMG)
% Department: Graduate Program in Electrical Engineering
% Author: Luciana Sant'Ana Marques
% Date: Feb 1923th, 2018 at 16:29
%===============================================================
function [totalCost,totalLoad] = TotalCostF(last, delta, loads, w, pi, pc, ro, R, isPP)
% Input:
% last: number of time slots
% delta: size of time slot
% loads: structure of solution with loads information
% w: (1 x last) vector with base load of all consumers
% pi: (1 x last) vector with prices (R$/kWh)
% pc: peak charge
% ro: (1 x last) price multiplier for IBR
% R: (1 x last) load threshold
% isPP: true if peak pricing and false if inclining block rates
% Action:
% Calculates total cost of solution
% Output:
% totalCost: total cost of solution
% Quantity of loads in structure
nApp = size(loads,2);
% Total load
totalLoad = zeros(1,last);
% Calculate total load
for i = 1:nApp
% Auxiliary variables
auxAlpha = loads(i).alpha;
auxBeta = loads(i).beta;
auxPower = loads(i).power;
totalLoad(auxAlpha:auxBeta) = totalLoad(auxAlpha:auxBeta) + ...
loads(i).solution*auxPower;
end
totalLoad = totalLoad + sum(w);
% Calculate cost if peak pricing
if isPP
% Find peak
peak = max(totalLoad);
% Total cost
totalCost = delta*totalLoad*pi' + peak*pc;
% Calculate cost if IBR
else
totalCost = 0;
% For each time slot verify if load exceeds threshold R
for i = 1:last
if totalLoad(i) <= R(i)
totalCost = totalCost + totalLoad(i)*pi(i);
else
totalCost = totalCost + R(i)*pi(i) + ...
(totalLoad(i)-R(i))*pi(i)*ro(i);
end
end
end
end
|
github
|
luciana-marques/orps-master
|
Neighborhood.m
|
.m
|
orps-master/Neighborhood.m
| 3,657 |
utf_8
|
9f1cb2f303c98193530522ba645ddfdc
|
%===============================================================
% Demand Response Problem
% Title: Neighborhood Strucutre Generation
% Institution: Federal University of Minas Gerais (UFMG)
% Department: Graduate Program in Electrical Engineering
% Author: Luciana Sant'Ana Marques Arnoux and Isabella
% Date: Jun 19th, 2017 at 15:19
%===============================================================
function [loadsNew, appIndex] = Neighborhood(loads, nLoads)
% Input:
% loads: (1 x nLoads) structure with a solution of the loads
% nLoads: (1 x 1)
% Action:
% 1- To interruptible:
% Generate another feasible solution by choosing a consumer n and an
% interruptible appliance a at random and changing the status of the
% on/off variables at two time slots
% 2- To uninterruptible:
% Generate another feasible solution by choosing a consumer n and an
% uninterruptible appliance a at random and changing the status of the
% on/off variables at two time slots
% Output:
% loadsNew: (1 x countLoads) structure with a new solution
% appIndex: (1 x 1) index of the modified load
%Begin
% New loads structure gets old loads structure
loadsNew = loads;
% Choose a load
a = randi(nLoads,1,1);
appIndex = a;
% Take the solution vector
sol = loads(a).solution;
% Check which type of appliance
% If interruptible load
if loads(a).isUn == 0
% Execute Nfirst
% Sort the solution vector
[B,I] = sort(sol);
% Calculate how much positions are 0
C = logical(B);
nZeros = size(C(not([C])),2);
% Calculate how much positions are 1
nOnes = loads(a).beta - loads(a).alpha + 1 - nZeros;
% If appliance has more possible time slots than its duration
if (nZeros ~= 0) && (nOnes ~= 0)
% Generate a random position for zeros
posZeros = randi(nZeros,1,1);
% Generate a random position for ones
posOnes = randi(nOnes,1,1);
% Change variable status for posZeros and posOne at solution vector
sol(I(posZeros)) = 1;
sol(I(nZeros+posOnes)) = 0;
end
% If uninterruptible
else
% Execute Nsecond
% Take the duration vetor
duration = loads(a).duration;
% Option chose: Change only ONE position to deslocate the solution
% Decision of direction
if sol(1) == 1
% Direction: right
sol(duration+1) = 1;
sol(1) = 0;
else
if sol(size(sol,2)) == 1
% Direction: left
sol(size(sol,2)-duration) = 1;
sol(size(sol,2)) = 0;
else
aux = rand();
if aux <= 0.5
% Direction right
i = 1;
while sol(i) ~= 1
i = i + 1;
end
sol(i + duration)= 1;
sol(i) = 0;
else
% Direction left
i = size(sol,2);
while sol(i) ~= 1
i = i - 1;
end
sol(i - duration)= 1;
sol(i) = 0;
end
end
end
end
% New loads solution for appliance a gets new solution sol
loadsNew(a).solution = sol;
end
|
github
|
luciana-marques/orps-master
|
RankingHeuristic.m
|
.m
|
orps-master/RankingHeuristic.m
| 3,836 |
utf_8
|
a4a0503f13019128111b426b16b2801b
|
%===============================================================
% Ranking Heuristic to Load Scheduling Problem
% Construction of an initial solution
% Based on: Wu et al. (2012) - with many modifications
% Institution: Federal University of Minas Gerais (UFMG)
% Department: Graduate Program in Electrical Engineering
% Course: Network Optimization
% Author: Luciana Sant'Ana Marques Arnoux and Isabella
% Date: Jun 12th, 2017 at 18:43
%===============================================================
function [loads] = RankingHeuristic(N, last, delta, loads, w, pi, aN)
% Input:
% N: number of consumers
% last: time horizon
% delta: size of slot in hours
% loads: structure with loads information
% w: (1 x last) vector with base load of all consumers
% pi: (1 x last) vector with prices (R$/kWh)
% aN: (1 x N) vector with how many loads each consumer has
% Action:
% Construct a random solution to scheduling loads problem trying to
% minimize peak-to-average ratio (PAR)
% Output:
% loads: structure with loads information
% Ranking process
costShift = 0;
loadCurve = zeros(N,last);
auxPi = pi;
maxPi = max(pi) + 1;
% Randomize loads
tLoads = sum(aN);
a = 1:tLoads;
lRand = a(randperm(length(a)));
% For each load
for i = 1:tLoads
% Consumer of this load
n = loads(lRand(i)).n;
% Auxiliary variables
auxAlpha = loads(lRand(i)).alpha;
auxBeta = loads(lRand(i)).beta;
auxDurat = loads(lRand(i)).duration;
auxPower = loads(lRand(i)).power;
% Get vector pi between starting and ending time
pInterval = auxPi(1,auxAlpha:auxBeta);
pIntervalaux = pi(1,auxAlpha:auxBeta);
% Sort pi
[B,I] = sort(pInterval);
% If interruptible load
if loads(lRand(i)).isUn == 0
% Fill solution vector of load with 1 until durantion
% respecting sorted solution
for j = 1:auxDurat
loads(lRand(i)).solution(I(j)) = 1;
end
% If uninterruptible
else
% Fill solution vector of load with 1 until durantion
% in sequencial manner, after the firt less expensive
% period that respects the last starting time
% (beta - duration + 1)
k = 1;
% While the cheaper periods are greater than
% the last possible starting time for
% uninterruptible load
while I(k) > (auxBeta - auxAlpha - auxDurat + 2)
k = k + 1;
end
for j = I(k):(I(k)+auxDurat-1)
loads(lRand(i)).solution(j) = 1;
end
end
% Calculate cost (se eu mudar o vetor pi ao longo do tempo, criar
% vetor auxiliar que guarda o vetor original)
costShift = costShift + pIntervalaux*...
(loads(lRand(i)).solution*auxPower*delta)';
% Add load to total load curve of consumer n
loadCurve(n,auxAlpha:auxBeta) = loadCurve(n,auxAlpha:auxBeta) +...
loads(lRand(i)).solution*auxPower;
% Calculate total load curve of consumers
loadCurveT = sum(loadCurve) + w;
% Minimization of PAR
% Find peak from load curve
[B I] = sort(loadCurveT,'descend');
% Get maxLoad from total load curve
maxLoad = loadCurveT(I(1));
% Reload pi in auxPi
auxPi = pi;
% For the positions where load == maxLoad
contPP = 1;
while loadCurveT(I(contPP)) == maxLoad
auxPi(1,I(contPP)) = maxPi;
contPP = contPP + 1;
end
end
end
|
github
|
remega/LEDOV-eye-tracking-database-master
|
pm_norm.m
|
.m
|
LEDOV-eye-tracking-database-master/metrics/pm_norm.m
| 114 |
utf_8
|
c065a431c1f58d5599e0824c785841dd
|
function map_new=pm_norm(map)
tempmin=min(map(:));
temp2=map-tempmin;
map_new=temp2./max(temp2(:));
end
|
github
|
remega/LEDOV-eye-tracking-database-master
|
NSS.m
|
.m
|
LEDOV-eye-tracking-database-master/metrics/NSS.m
| 530 |
utf_8
|
76acfa14c05ad2755d4c797e413301d9
|
% created: Zoya Bylinskii, Aug 2014
% This finds the normalized scanpath saliency between two different
% saliency maps as the mean value of the normalized saliency map at
% fixation locations.
function score = NSS(saliencyMap, fixationMap)
% saliencyMap is the saliency map
% fixationMap is the human fixation map (binary matrix)
map = imresize(saliencyMap,size(fixationMap));
% normalize saliency map
map = (map - mean(map(:)))/std(map(:));
% mean value at fixation locations
score = mean(map(logical(fixationMap)));
|
github
|
remega/LEDOV-eye-tracking-database-master
|
TestLEDOV.m
|
.m
|
LEDOV-eye-tracking-database-master/metrics/TestLEDOV.m
| 3,232 |
utf_8
|
670739a69a2d2a617081f2f199427c46
|
function res = TestLEDOV(saldir)
load('./LEDOV/VideoNameList.mat','VideoNameList')
load('./LEDOV/namelist.mat','testlist','trainlist','vaildlist')
videoDir='./LEDOV';
countvid=0;
for m=testlist'
tic
InputVideoName_short=VideoNameList{m};
InputVideoName=[InputVideoName_short '.mp4'];
load([videoDir '/' InputVideoName_short '/Data.mat'],'Data')
countvid=countvid+1;
num_frames=Data.VideoFrames;
frames_rate=Data.VideoFrameRate;
video_size=Data.VideoSize;
width=video_size(1);
height=video_size(2);
video_fixation=Data.fixdata;
distMatrix = getdistMatrix( height, width );
fixationPerFrame=cell(1,num_frames);
frame_durationMs=1000/frames_rate;
centermask=round(height/20);
beginflag=1;
for k=1:size(video_fixation,1)
k;
if k==1
beginflag=1;
elseif video_fixation(k,2)<video_fixation(k-1,2)
beginflag=1;
end
vx=video_fixation(k,4);
vy=video_fixation(k,5);
if beginflag
if (vx>(width/2+centermask)||vx<(width/2-centermask))&&(vy>(height/2+centermask)||vy<(height/2-centermask))&&(vx>0&&vx<width)&&(vy>0&&vy<height)
fixPosition=[video_fixation(k,4);video_fixation(k,5)];
beginflag=0;
else
continue
end
end
if(vx>0&&vx<width)&&(vy>0&&vy<height)
fixPosition=[video_fixation(k,4);video_fixation(k,5)];
else
continue
end
startFrame=ceil(video_fixation(k,2)/frame_durationMs);
endFrame=ceil((video_fixation(k,2)+video_fixation(k,3))/frame_durationMs);
if(startFrame==0)
startFrame=1;
end
if(endFrame>num_frames)
endFrame=num_frames;
end
for i=startFrame:endFrame
fixationPerFrame{i}=[fixationPerFrame{i} fixPosition];
end
end
Auc=[];
nss=[];
coc=[];
KL=[];
countvaild=0;
try
obj=VideoReader([saldir '/' InputVideoName_short '.avi']);
catch
obj=VideoReader([saldir '/' InputVideoName_short '_result.avi']);
end
vidFrames=read(obj);
color_map=colormap(jet(256));
for k=1:size(vidFrames,4)
tempframe=double(vidFrames(:,:,1,k));
if isempty(fixationPerFrame{k})
continue
else
countvaild=countvaild+1;
end
x=fixationPerFrame{k}(1,:);
y=fixationPerFrame{k}(2,:);
saliencymap = imresize(tempframe,[height, width]);
saliencymap = imfilter(saliencymap, fspecial('gaussian', round(height/7.5), round(height/30)));
saliencymap=saliencymap.*distMatrix;
saliencymap=pm_norm(saliencymap);
saliencymap(isnan(saliencymap))=0;
fixationmap=zeros(height, width);
for j=1:length(x)
if y(j)>0&&x(j)>0
fixationmap(y(j),x(j))=1;
end
end
fix_gaussian= make_gauss_masks4(x,y,[height width],100);
Auc(countvaild,1)=computeROC(uint8(round(saliencymap*255)),x,y,0);
nss(countvaild,1)=NSS(saliencymap,fixationmap);
coc(countvaild,1)=cce(saliencymap,fix_gaussian);
KL(countvaild,1) = kl_divergence(saliencymap, fix_gaussian);
end
vidFrames=[];
meanauc(countvid,:)=mean(Auc);
meannss(countvid,:)=mean(nss);
meancoc(countvid,:)=mean(coc);
meanKL(countvid,:)=mean(KL);
stdauc(countvid,:)=std(Auc);
stdnss(countvid,:)=std(nss);
stdcoc(countvid,:)=std(coc);
stdKL(countvid,:)=std(KL);
m
toc
end
res=[meanauc meannss meancoc meanKL stdauc stdnss stdcoc stdKL];
end
|
github
|
jun-zhang/WebRTC-VideoEngine-Demo-master
|
apmtest.m
|
.m
|
WebRTC-VideoEngine-Demo-master/webrtc_videoengine_demo/webrtc/modules/audio_processing/test/apmtest.m
| 9,470 |
utf_8
|
ad72111888b4bb4b7c4605d0bf79d572
|
function apmtest(task, testname, filepath, casenumber, legacy)
%APMTEST is a tool to process APM file sets and easily display the output.
% APMTEST(TASK, TESTNAME, CASENUMBER) performs one of several TASKs:
% 'test' Processes the files to produce test output.
% 'list' Prints a list of cases in the test set, preceded by their
% CASENUMBERs.
% 'show' Uses spclab to show the test case specified by the
% CASENUMBER parameter.
%
% using a set of test files determined by TESTNAME:
% 'all' All tests.
% 'apm' The standard APM test set (default).
% 'apmm' The mobile APM test set.
% 'aec' The AEC test set.
% 'aecm' The AECM test set.
% 'agc' The AGC test set.
% 'ns' The NS test set.
% 'vad' The VAD test set.
%
% FILEPATH specifies the path to the test data files.
%
% CASENUMBER can be used to select a single test case. Omit CASENUMBER,
% or set to zero, to use all test cases.
%
if nargin < 5 || isempty(legacy)
% Set to true to run old VQE recordings.
legacy = false;
end
if nargin < 4 || isempty(casenumber)
casenumber = 0;
end
if nargin < 3 || isempty(filepath)
filepath = 'data/';
end
if nargin < 2 || isempty(testname)
testname = 'all';
end
if nargin < 1 || isempty(task)
task = 'test';
end
if ~strcmp(task, 'test') && ~strcmp(task, 'list') && ~strcmp(task, 'show')
error(['TASK ' task ' is not recognized']);
end
if casenumber == 0 && strcmp(task, 'show')
error(['CASENUMBER must be specified for TASK ' task]);
end
inpath = [filepath 'input/'];
outpath = [filepath 'output/'];
refpath = [filepath 'reference/'];
if strcmp(testname, 'all')
tests = {'apm','apmm','aec','aecm','agc','ns','vad'};
else
tests = {testname};
end
if legacy
progname = './test';
else
progname = './process_test';
end
global farFile;
global nearFile;
global eventFile;
global delayFile;
global driftFile;
if legacy
farFile = 'vqeFar.pcm';
nearFile = 'vqeNear.pcm';
eventFile = 'vqeEvent.dat';
delayFile = 'vqeBuf.dat';
driftFile = 'vqeDrift.dat';
else
farFile = 'apm_far.pcm';
nearFile = 'apm_near.pcm';
eventFile = 'apm_event.dat';
delayFile = 'apm_delay.dat';
driftFile = 'apm_drift.dat';
end
simulateMode = false;
nErr = 0;
nCases = 0;
for i=1:length(tests)
simulateMode = false;
if strcmp(tests{i}, 'apm')
testdir = ['apm/'];
outfile = ['out'];
if legacy
opt = ['-ec 1 -agc 2 -nc 2 -vad 3'];
else
opt = ['--no_progress -hpf' ...
' -aec --drift_compensation -agc --fixed_digital' ...
' -ns --ns_moderate -vad'];
end
elseif strcmp(tests{i}, 'apm-swb')
simulateMode = true;
testdir = ['apm-swb/'];
outfile = ['out'];
if legacy
opt = ['-fs 32000 -ec 1 -agc 2 -nc 2'];
else
opt = ['--no_progress -fs 32000 -hpf' ...
' -aec --drift_compensation -agc --adaptive_digital' ...
' -ns --ns_moderate -vad'];
end
elseif strcmp(tests{i}, 'apmm')
testdir = ['apmm/'];
outfile = ['out'];
opt = ['-aec --drift_compensation -agc --fixed_digital -hpf -ns ' ...
'--ns_moderate'];
else
error(['TESTNAME ' tests{i} ' is not recognized']);
end
inpathtest = [inpath testdir];
outpathtest = [outpath testdir];
refpathtest = [refpath testdir];
if ~exist(inpathtest,'dir')
error(['Input directory ' inpathtest ' does not exist']);
end
if ~exist(refpathtest,'dir')
warning(['Reference directory ' refpathtest ' does not exist']);
end
[status, errMsg] = mkdir(outpathtest);
if (status == 0)
error(errMsg);
end
[nErr, nCases] = recurseDir(inpathtest, outpathtest, refpathtest, outfile, ...
progname, opt, simulateMode, nErr, nCases, task, casenumber, legacy);
if strcmp(task, 'test') || strcmp(task, 'show')
system(['rm ' farFile]);
system(['rm ' nearFile]);
if simulateMode == false
system(['rm ' eventFile]);
system(['rm ' delayFile]);
system(['rm ' driftFile]);
end
end
end
if ~strcmp(task, 'list')
if nErr == 0
fprintf(1, '\nAll files are bit-exact to reference\n', nErr);
else
fprintf(1, '\n%d files are NOT bit-exact to reference\n', nErr);
end
end
function [nErrOut, nCases] = recurseDir(inpath, outpath, refpath, ...
outfile, progname, opt, simulateMode, nErr, nCases, task, casenumber, ...
legacy)
global farFile;
global nearFile;
global eventFile;
global delayFile;
global driftFile;
dirs = dir(inpath);
nDirs = 0;
nErrOut = nErr;
for i=3:length(dirs) % skip . and ..
nDirs = nDirs + dirs(i).isdir;
end
if nDirs == 0
nCases = nCases + 1;
if casenumber == nCases || casenumber == 0
if strcmp(task, 'list')
fprintf([num2str(nCases) '. ' outfile '\n'])
else
vadoutfile = ['vad_' outfile '.dat'];
outfile = [outfile '.pcm'];
% Check for VAD test
vadTest = 0;
if ~isempty(findstr(opt, '-vad'))
vadTest = 1;
if legacy
opt = [opt ' ' outpath vadoutfile];
else
opt = [opt ' --vad_out_file ' outpath vadoutfile];
end
end
if exist([inpath 'vqeFar.pcm'])
system(['ln -s -f ' inpath 'vqeFar.pcm ' farFile]);
elseif exist([inpath 'apm_far.pcm'])
system(['ln -s -f ' inpath 'apm_far.pcm ' farFile]);
end
if exist([inpath 'vqeNear.pcm'])
system(['ln -s -f ' inpath 'vqeNear.pcm ' nearFile]);
elseif exist([inpath 'apm_near.pcm'])
system(['ln -s -f ' inpath 'apm_near.pcm ' nearFile]);
end
if exist([inpath 'vqeEvent.dat'])
system(['ln -s -f ' inpath 'vqeEvent.dat ' eventFile]);
elseif exist([inpath 'apm_event.dat'])
system(['ln -s -f ' inpath 'apm_event.dat ' eventFile]);
end
if exist([inpath 'vqeBuf.dat'])
system(['ln -s -f ' inpath 'vqeBuf.dat ' delayFile]);
elseif exist([inpath 'apm_delay.dat'])
system(['ln -s -f ' inpath 'apm_delay.dat ' delayFile]);
end
if exist([inpath 'vqeSkew.dat'])
system(['ln -s -f ' inpath 'vqeSkew.dat ' driftFile]);
elseif exist([inpath 'vqeDrift.dat'])
system(['ln -s -f ' inpath 'vqeDrift.dat ' driftFile]);
elseif exist([inpath 'apm_drift.dat'])
system(['ln -s -f ' inpath 'apm_drift.dat ' driftFile]);
end
if simulateMode == false
command = [progname ' -o ' outpath outfile ' ' opt];
else
if legacy
inputCmd = [' -in ' nearFile];
else
inputCmd = [' -i ' nearFile];
end
if exist([farFile])
if legacy
inputCmd = [' -if ' farFile inputCmd];
else
inputCmd = [' -ir ' farFile inputCmd];
end
end
command = [progname inputCmd ' -o ' outpath outfile ' ' opt];
end
% This prevents MATLAB from using its own C libraries.
shellcmd = ['bash -c "unset LD_LIBRARY_PATH;'];
fprintf([command '\n']);
[status, result] = system([shellcmd command '"']);
fprintf(result);
fprintf(['Reference file: ' refpath outfile '\n']);
if vadTest == 1
equal_to_ref = are_files_equal([outpath vadoutfile], ...
[refpath vadoutfile], ...
'int8');
if ~equal_to_ref
nErr = nErr + 1;
end
end
[equal_to_ref, diffvector] = are_files_equal([outpath outfile], ...
[refpath outfile], ...
'int16');
if ~equal_to_ref
nErr = nErr + 1;
end
if strcmp(task, 'show')
% Assume the last init gives the sample rate of interest.
str_idx = strfind(result, 'Sample rate:');
fs = str2num(result(str_idx(end) + 13:str_idx(end) + 17));
fprintf('Using %d Hz\n', fs);
if exist([farFile])
spclab(fs, farFile, nearFile, [refpath outfile], ...
[outpath outfile], diffvector);
%spclab(fs, diffvector);
else
spclab(fs, nearFile, [refpath outfile], [outpath outfile], ...
diffvector);
%spclab(fs, diffvector);
end
end
end
end
else
for i=3:length(dirs)
if dirs(i).isdir
[nErr, nCases] = recurseDir([inpath dirs(i).name '/'], outpath, ...
refpath,[outfile '_' dirs(i).name], progname, opt, ...
simulateMode, nErr, nCases, task, casenumber, legacy);
end
end
end
nErrOut = nErr;
function [are_equal, diffvector] = ...
are_files_equal(newfile, reffile, precision, diffvector)
are_equal = false;
diffvector = 0;
if ~exist(newfile,'file')
warning(['Output file ' newfile ' does not exist']);
return
end
if ~exist(reffile,'file')
warning(['Reference file ' reffile ' does not exist']);
return
end
fid = fopen(newfile,'rb');
new = fread(fid,inf,precision);
fclose(fid);
fid = fopen(reffile,'rb');
ref = fread(fid,inf,precision);
fclose(fid);
if length(new) ~= length(ref)
warning('Reference is not the same length as output');
minlength = min(length(new), length(ref));
new = new(1:minlength);
ref = ref(1:minlength);
end
diffvector = new - ref;
if isequal(new, ref)
fprintf([newfile ' is bit-exact to reference\n']);
are_equal = true;
else
if isempty(new)
warning([newfile ' is empty']);
return
end
snr = snrseg(new,ref,80);
fprintf('\n');
are_equal = false;
end
|
github
|
jun-zhang/WebRTC-VideoEngine-Demo-master
|
exportfig.m
|
.m
|
WebRTC-VideoEngine-Demo-master/webrtc_videoengine_demo/webrtc/modules/video_coding/codecs/test_framework/exportfig.m
| 14,995 |
utf_8
|
d7427be6e56c37d4aec2f2c91c9a6341
|
function exportfig(varargin)
%EXPORTFIG Export a figure to Encapsulated Postscript.
% EXPORTFIG(H, FILENAME) writes the figure H to FILENAME. H is
% a figure handle and FILENAME is a string that specifies the
% name of the output file.
%
% EXPORTFIG(...,PARAM1,VAL1,PARAM2,VAL2,...) specifies
% parameters that control various characteristics of the output
% file.
%
% Format Paramter:
% 'Format' one of the strings 'eps','eps2','jpeg','png','preview'
% specifies the output format. Defaults to 'eps'.
% The output format 'preview' does not generate an output
% file but instead creates a new figure window with a
% preview of the exported figure. In this case the
% FILENAME parameter is ignored.
%
% 'Preview' one of the strings 'none', 'tiff'
% specifies a preview for EPS files. Defaults to 'none'.
%
% Size Parameters:
% 'Width' a positive scalar
% specifies the width in the figure's PaperUnits
% 'Height' a positive scalar
% specifies the height in the figure's PaperUnits
%
% Specifying only one dimension sets the other dimension
% so that the exported aspect ratio is the same as the
% figure's current aspect ratio.
% If neither dimension is specified the size defaults to
% the width and height from the figure's PaperPosition.
%
% Rendering Parameters:
% 'Color' one of the strings 'bw', 'gray', 'cmyk'
% 'bw' specifies that lines and text are exported in
% black and all other objects in grayscale
% 'gray' specifies that all objects are exported in grayscale
% 'cmyk' specifies that all objects are exported in color
% using the CMYK color space
% 'Renderer' one of the strings 'painters', 'zbuffer', 'opengl'
% specifies the renderer to use
% 'Resolution' a positive scalar
% specifies the resolution in dots-per-inch.
%
% The default color setting is 'bw'.
%
% Font Parameters:
% 'FontMode' one of the strings 'scaled', 'fixed'
% 'FontSize' a positive scalar
% in 'scaled' mode multiplies with the font size of each
% text object to obtain the exported font size
% in 'fixed' mode specifies the font size of all text
% objects in points
% 'FontEncoding' one of the strings 'latin1', 'adobe'
% specifies the character encoding of the font
%
% If FontMode is 'scaled' but FontSize is not specified then a
% scaling factor is computed from the ratio of the size of the
% exported figure to the size of the actual figure. The minimum
% font size allowed after scaling is 5 points.
% If FontMode is 'fixed' but FontSize is not specified then the
% exported font sizes of all text objects is 7 points.
%
% The default 'FontMode' setting is 'scaled'.
%
% Line Width Parameters:
% 'LineMode' one of the strings 'scaled', 'fixed'
% 'LineWidth' a positive scalar
% the semantics of LineMode and LineWidth are exactly the
% same as FontMode and FontSize, except that they apply
% to line widths instead of font sizes. The minumum line
% width allowed after scaling is 0.5 points.
% If LineMode is 'fixed' but LineWidth is not specified
% then the exported line width of all line objects is 1
% point.
%
% Examples:
% exportfig(gcf,'fig1.eps','height',3);
% Exports the current figure to the file named 'fig1.eps' with
% a height of 3 inches (assuming the figure's PaperUnits is
% inches) and an aspect ratio the same as the figure's aspect
% ratio on screen.
%
% exportfig(gcf, 'fig2.eps', 'FontMode', 'fixed',...
% 'FontSize', 10, 'color', 'cmyk' );
% Exports the current figure to 'fig2.eps' in color with all
% text in 10 point fonts. The size of the exported figure is
% the figure's PaperPostion width and height.
if (nargin < 2)
error('Too few input arguments');
end
% exportfig(H, filename, ...)
H = varargin{1};
if ~ishandle(H) | ~strcmp(get(H,'type'), 'figure')
error('First argument must be a handle to a figure.');
end
filename = varargin{2};
if ~ischar(filename)
error('Second argument must be a string.');
end
paramPairs = varargin(3:end);
% Do some validity checking on param-value pairs
if (rem(length(paramPairs),2) ~= 0)
error(['Invalid input syntax. Optional parameters and values' ...
' must be in pairs.']);
end
format = 'eps';
preview = 'none';
width = -1;
height = -1;
color = 'bw';
fontsize = -1;
fontmode='scaled';
linewidth = -1;
linemode=[];
fontencoding = 'latin1';
renderer = [];
resolution = [];
% Process param-value pairs
args = {};
for k = 1:2:length(paramPairs)
param = lower(paramPairs{k});
if (~ischar(param))
error('Optional parameter names must be strings');
end
value = paramPairs{k+1};
switch (param)
case 'format'
format = value;
if (~strcmp(format,{'eps','eps2','jpeg','png','preview'}))
error(['Format must be ''eps'', ''eps2'', ''jpeg'', ''png'' or' ...
' ''preview''.']);
end
case 'preview'
preview = value;
if (~strcmp(preview,{'none','tiff'}))
error('Preview must be ''none'' or ''tiff''.');
end
case 'width'
width = LocalToNum(value);
if(~LocalIsPositiveScalar(width))
error('Width must be a numeric scalar > 0');
end
case 'height'
height = LocalToNum(value);
if(~LocalIsPositiveScalar(height))
error('Height must be a numeric scalar > 0');
end
case 'color'
color = lower(value);
if (~strcmp(color,{'bw','gray','cmyk'}))
error('Color must be ''bw'', ''gray'' or ''cmyk''.');
end
case 'fontmode'
fontmode = lower(value);
if (~strcmp(fontmode,{'scaled','fixed'}))
error('FontMode must be ''scaled'' or ''fixed''.');
end
case 'fontsize'
fontsize = LocalToNum(value);
if(~LocalIsPositiveScalar(fontsize))
error('FontSize must be a numeric scalar > 0');
end
case 'fontencoding'
fontencoding = lower(value);
if (~strcmp(fontencoding,{'latin1','adobe'}))
error('FontEncoding must be ''latin1'' or ''adobe''.');
end
case 'linemode'
linemode = lower(value);
if (~strcmp(linemode,{'scaled','fixed'}))
error('LineMode must be ''scaled'' or ''fixed''.');
end
case 'linewidth'
linewidth = LocalToNum(value);
if(~LocalIsPositiveScalar(linewidth))
error('LineWidth must be a numeric scalar > 0');
end
case 'renderer'
renderer = lower(value);
if (~strcmp(renderer,{'painters','zbuffer','opengl'}))
error('Renderer must be ''painters'', ''zbuffer'' or ''opengl''.');
end
case 'resolution'
resolution = LocalToNum(value);
if ~(isnumeric(value) & (prod(size(value)) == 1) & (value >= 0));
error('Resolution must be a numeric scalar >= 0');
end
otherwise
error(['Unrecognized option ' param '.']);
end
end
allLines = findall(H, 'type', 'line');
allText = findall(H, 'type', 'text');
allAxes = findall(H, 'type', 'axes');
allImages = findall(H, 'type', 'image');
allLights = findall(H, 'type', 'light');
allPatch = findall(H, 'type', 'patch');
allSurf = findall(H, 'type', 'surface');
allRect = findall(H, 'type', 'rectangle');
allFont = [allText; allAxes];
allColor = [allLines; allText; allAxes; allLights];
allMarker = [allLines; allPatch; allSurf];
allEdge = [allPatch; allSurf];
allCData = [allImages; allPatch; allSurf];
old.objs = {};
old.prop = {};
old.values = {};
% Process format and preview parameter
showPreview = strcmp(format,'preview');
if showPreview
format = 'png';
filename = [tempName '.png'];
end
if strncmp(format,'eps',3) & ~strcmp(preview,'none')
args = {args{:}, ['-' preview]};
end
hadError = 0;
try
% Process size parameters
paperPos = get(H, 'PaperPosition');
old = LocalPushOldData(old, H, 'PaperPosition', paperPos);
figureUnits = get(H, 'Units');
set(H, 'Units', get(H,'PaperUnits'));
figurePos = get(H, 'Position');
aspectRatio = figurePos(3)/figurePos(4);
set(H, 'Units', figureUnits);
if (width == -1) & (height == -1)
width = paperPos(3);
height = paperPos(4);
elseif (width == -1)
width = height * aspectRatio;
elseif (height == -1)
height = width / aspectRatio;
end
set(H, 'PaperPosition', [0 0 width height]);
paperPosMode = get(H, 'PaperPositionMode');
old = LocalPushOldData(old, H, 'PaperPositionMode', paperPosMode);
set(H, 'PaperPositionMode', 'manual');
% Process rendering parameters
switch (color)
case {'bw', 'gray'}
if ~strcmp(color,'bw') & strncmp(format,'eps',3)
format = [format 'c'];
end
args = {args{:}, ['-d' format]};
%compute and set gray colormap
oldcmap = get(H,'Colormap');
newgrays = 0.30*oldcmap(:,1) + 0.59*oldcmap(:,2) + 0.11*oldcmap(:,3);
newcmap = [newgrays newgrays newgrays];
old = LocalPushOldData(old, H, 'Colormap', oldcmap);
set(H, 'Colormap', newcmap);
%compute and set ColorSpec and CData properties
old = LocalUpdateColors(allColor, 'color', old);
old = LocalUpdateColors(allAxes, 'xcolor', old);
old = LocalUpdateColors(allAxes, 'ycolor', old);
old = LocalUpdateColors(allAxes, 'zcolor', old);
old = LocalUpdateColors(allMarker, 'MarkerEdgeColor', old);
old = LocalUpdateColors(allMarker, 'MarkerFaceColor', old);
old = LocalUpdateColors(allEdge, 'EdgeColor', old);
old = LocalUpdateColors(allEdge, 'FaceColor', old);
old = LocalUpdateColors(allCData, 'CData', old);
case 'cmyk'
if strncmp(format,'eps',3)
format = [format 'c'];
args = {args{:}, ['-d' format], '-cmyk'};
else
args = {args{:}, ['-d' format]};
end
otherwise
error('Invalid Color parameter');
end
if (~isempty(renderer))
args = {args{:}, ['-' renderer]};
end
if (~isempty(resolution)) | ~strncmp(format,'eps',3)
if isempty(resolution)
resolution = 0;
end
args = {args{:}, ['-r' int2str(resolution)]};
end
% Process font parameters
if (~isempty(fontmode))
oldfonts = LocalGetAsCell(allFont,'FontSize');
switch (fontmode)
case 'fixed'
oldfontunits = LocalGetAsCell(allFont,'FontUnits');
old = LocalPushOldData(old, allFont, {'FontUnits'}, oldfontunits);
set(allFont,'FontUnits','points');
if (fontsize == -1)
set(allFont,'FontSize',7);
else
set(allFont,'FontSize',fontsize);
end
case 'scaled'
if (fontsize == -1)
wscale = width/figurePos(3);
hscale = height/figurePos(4);
scale = min(wscale, hscale);
else
scale = fontsize;
end
newfonts = LocalScale(oldfonts,scale,5);
set(allFont,{'FontSize'},newfonts);
otherwise
error('Invalid FontMode parameter');
end
% make sure we push the size after the units
old = LocalPushOldData(old, allFont, {'FontSize'}, oldfonts);
end
if strcmp(fontencoding,'adobe') & strncmp(format,'eps',3)
args = {args{:}, '-adobecset'};
end
% Process linewidth parameters
if (~isempty(linemode))
oldlines = LocalGetAsCell(allMarker,'LineWidth');
old = LocalPushOldData(old, allMarker, {'LineWidth'}, oldlines);
switch (linemode)
case 'fixed'
if (linewidth == -1)
set(allMarker,'LineWidth',1);
else
set(allMarker,'LineWidth',linewidth);
end
case 'scaled'
if (linewidth == -1)
wscale = width/figurePos(3);
hscale = height/figurePos(4);
scale = min(wscale, hscale);
else
scale = linewidth;
end
newlines = LocalScale(oldlines, scale, 0.5);
set(allMarker,{'LineWidth'},newlines);
otherwise
error('Invalid LineMode parameter');
end
end
% Export
print(H, filename, args{:});
catch
hadError = 1;
end
% Restore figure settings
for n=1:length(old.objs)
set(old.objs{n}, old.prop{n}, old.values{n});
end
if hadError
error(deblank(lasterr));
end
% Show preview if requested
if showPreview
X = imread(filename,'png');
delete(filename);
f = figure( 'Name', 'Preview', ...
'Menubar', 'none', ...
'NumberTitle', 'off', ...
'Visible', 'off');
image(X);
axis image;
ax = findobj(f, 'type', 'axes');
set(ax, 'Units', get(H,'PaperUnits'), ...
'Position', [0 0 width height], ...
'Visible', 'off');
set(ax, 'Units', 'pixels');
axesPos = get(ax,'Position');
figPos = get(f,'Position');
rootSize = get(0,'ScreenSize');
figPos(3:4) = axesPos(3:4);
if figPos(1) + figPos(3) > rootSize(3)
figPos(1) = rootSize(3) - figPos(3) - 50;
end
if figPos(2) + figPos(4) > rootSize(4)
figPos(2) = rootSize(4) - figPos(4) - 50;
end
set(f, 'Position',figPos, ...
'Visible', 'on');
end
%
% Local Functions
%
function outData = LocalPushOldData(inData, objs, prop, values)
outData.objs = {inData.objs{:}, objs};
outData.prop = {inData.prop{:}, prop};
outData.values = {inData.values{:}, values};
function cellArray = LocalGetAsCell(fig,prop);
cellArray = get(fig,prop);
if (~isempty(cellArray)) & (~iscell(cellArray))
cellArray = {cellArray};
end
function newArray = LocalScale(inArray, scale, minValue)
n = length(inArray);
newArray = cell(n,1);
for k=1:n
newArray{k} = max(minValue,scale*inArray{k}(1));
end
function newArray = LocalMapToGray(inArray);
n = length(inArray);
newArray = cell(n,1);
for k=1:n
color = inArray{k};
if (~isempty(color))
if ischar(color)
switch color(1)
case 'y'
color = [1 1 0];
case 'm'
color = [1 0 1];
case 'c'
color = [0 1 1];
case 'r'
color = [1 0 0];
case 'g'
color = [0 1 0];
case 'b'
color = [0 0 1];
case 'w'
color = [1 1 1];
case 'k'
color = [0 0 0];
otherwise
newArray{k} = color;
end
end
if ~ischar(color)
color = 0.30*color(1) + 0.59*color(2) + 0.11*color(3);
end
end
if isempty(color) | ischar(color)
newArray{k} = color;
else
newArray{k} = [color color color];
end
end
function newArray = LocalMapCData(inArray);
n = length(inArray);
newArray = cell(n,1);
for k=1:n
color = inArray{k};
if (ndims(color) == 3) & isa(color,'double')
gray = 0.30*color(:,:,1) + 0.59*color(:,:,2) + 0.11*color(:,:,3);
color(:,:,1) = gray;
color(:,:,2) = gray;
color(:,:,3) = gray;
end
newArray{k} = color;
end
function outData = LocalUpdateColors(inArray, prop, inData)
value = LocalGetAsCell(inArray,prop);
outData.objs = {inData.objs{:}, inArray};
outData.prop = {inData.prop{:}, {prop}};
outData.values = {inData.values{:}, value};
if (~isempty(value))
if strcmp(prop,'CData')
value = LocalMapCData(value);
else
value = LocalMapToGray(value);
end
set(inArray,{prop},value);
end
function bool = LocalIsPositiveScalar(value)
bool = isnumeric(value) & ...
prod(size(value)) == 1 & ...
value > 0;
function value = LocalToNum(value)
if ischar(value)
value = str2num(value);
end
|
github
|
jun-zhang/WebRTC-VideoEngine-Demo-master
|
plotBenchmark.m
|
.m
|
WebRTC-VideoEngine-Demo-master/webrtc_videoengine_demo/webrtc/modules/video_coding/codecs/test_framework/plotBenchmark.m
| 11,672 |
utf_8
|
a80ed712ca3895c1e7b6383d4cc07d38
|
function plotBenchmark(fileNames, export)
%PLOTBENCHMARK Plots and exports video codec benchmarking results.
% PLOTBENCHMARK(FILENAMES, EXPORT) parses the video codec benchmarking result
% files given by the cell array of strings FILENAME. It plots the results and
% optionally exports each plot to an appropriately named file.
%
% EXPORT parameter:
% 'none' No file exports.
% 'eps' Exports to eps files (default).
% 'pdf' Exports to eps files and uses the command-line utility
% epstopdf to obtain pdf files.
%
% Example:
% plotBenchmark({'H264Benchmark.txt' 'LSVXBenchmark.txt'}, 'pdf')
if (nargin < 1)
error('Too few input arguments');
elseif (nargin < 2)
export = 'eps';
end
if ~iscell(fileNames)
if ischar(fileNames)
% one single file name as a string is ok
if size(fileNames,1) > 1
% this is a char matrix, not ok
error('First argument must not be a char matrix');
end
% wrap in a cell array
fileNames = {fileNames};
else
error('First argument must be a cell array of strings');
end
end
if ~ischar(export)
error('Second argument must be a string');
end
outpath = 'BenchmarkPlots';
[status, errMsg] = mkdir(outpath);
if status == 0
error(errMsg);
end
nCases = 0;
testCases = [];
% Read each test result file
for fileIdx = 1:length(fileNames)
if ~isstr(fileNames{fileIdx})
error('First argument must be a cell array of strings');
end
fid = fopen(fileNames{fileIdx}, 'rt');
if fid == -1
error(['Unable to open ' fileNames{fileIdx}]);
end
version = '1.0';
if ~strcmp(fgetl(fid), ['#!benchmark' version])
fclose(fid);
error(['Requires benchmark file format version ' version]);
end
% Parse results file into testCases struct
codec = fgetl(fid);
tline = fgetl(fid);
while(tline ~= -1)
nCases = nCases + 1;
delim = strfind(tline, ',');
name = tline(1:delim(1)-1);
% Drop underscored suffix from name
underscore = strfind(name, '_');
if ~isempty(underscore)
name = name(1:underscore(1)-1);
end
resolution = tline(delim(1)+1:delim(2)-1);
frameRate = tline(delim(2)+1:end);
tline = fgetl(fid);
delim = strfind(tline, ',');
bitrateLabel = tline(1:delim(1)-1);
bitrate = sscanf(tline(delim(1):end),',%f');
tline = fgetl(fid);
delim = strfind(tline, ',');
psnrLabel = tline(1:delim(1)-1);
psnr = sscanf(tline(delim(1):end),',%f');
% Default data for the optional lines
speedLabel = 'Default';
speed = 0;
ssimLabel = 'Default';
ssim = 0;
tline = fgetl(fid);
delim = strfind(tline, ',');
while ~isempty(delim)
% More data
% Check type of data
if strncmp(lower(tline), 'speed', 5)
% Speed data included
speedLabel = tline(1:delim(1)-1);
speed = sscanf(tline(delim(1):end), ',%f');
tline = fgetl(fid);
elseif strncmp(lower(tline), 'encode time', 11)
% Encode and decode times included
% TODO: take care of the data
% pop two lines from file
tline = fgetl(fid);
tline = fgetl(fid);
elseif strncmp(tline, 'SSIM', 4)
% SSIM data included
ssimLabel = tline(1:delim(1)-1);
ssim = sscanf(tline(delim(1):end), ',%f');
tline = fgetl(fid);
end
delim = strfind(tline, ',');
end
testCases = [testCases struct('codec', codec, 'name', name, 'resolution', ...
resolution, 'frameRate', frameRate, 'bitrate', bitrate, 'psnr', psnr, ...
'speed', speed, 'bitrateLabel', bitrateLabel, 'psnrLabel', psnrLabel, ...
'speedLabel', speedLabel, ...
'ssim', ssim, 'ssimLabel', ssimLabel)];
tline = fgetl(fid);
end
fclose(fid);
end
i = 0;
casesPsnr = testCases;
while ~isempty(casesPsnr)
i = i + 1;
casesPsnr = plotOnePsnr(casesPsnr, i, export, outpath);
end
casesSSIM = testCases;
while ~isempty(casesSSIM)
i = i + 1;
casesSSIM = plotOneSSIM(casesSSIM, i, export, outpath);
end
casesSpeed = testCases;
while ~isempty(casesSpeed)
if casesSpeed(1).speed == 0
casesSpeed = casesSpeed(2:end);
else
i = i + 1;
casesSpeed = plotOneSpeed(casesSpeed, i, export, outpath);
end
end
%%%%%%%%%%%%%%%%%%
%% SUBFUNCTIONS %%
%%%%%%%%%%%%%%%%%%
function casesOut = plotOnePsnr(cases, num, export, outpath)
% Find matching specs
plotIdx = 1;
for i = 2:length(cases)
if strcmp(cases(1).resolution, cases(i).resolution) & ...
strcmp(cases(1).frameRate, cases(i).frameRate)
plotIdx = [plotIdx i];
end
end
% Return unplotted cases
casesOut = cases(setdiff(1:length(cases), plotIdx));
cases = cases(plotIdx);
% Prune similar results
for i = 1:length(cases)
simIndx = find(abs(cases(i).bitrate - [cases(i).bitrate(2:end) ; 0]) < 10);
while ~isempty(simIndx)
diffIndx = setdiff(1:length(cases(i).bitrate), simIndx);
cases(i).psnr = cases(i).psnr(diffIndx);
cases(i).bitrate = cases(i).bitrate(diffIndx);
simIndx = find(abs(cases(i).bitrate - [cases(i).bitrate(2:end) ; 0]) < 10);
end
end
% Prepare figure with axis labels and so on
hFig = figure(num);
clf;
hold on;
grid on;
axis([0 1100 20 50]);
set(gca, 'XTick', 0:200:1000);
set(gca, 'YTick', 20:10:60);
xlabel(cases(1).bitrateLabel);
ylabel(cases(1).psnrLabel);
res = cases(1).resolution;
frRate = cases(1).frameRate;
title([res ', ' frRate]);
hLines = [];
codecs = {};
sequences = {};
i = 0;
while ~isempty(cases)
i = i + 1;
[cases, hLine, codec, sequences] = plotOneCodec(cases, 'bitrate', 'psnr', i, sequences, 1);
% Stored to generate the legend
hLines = [hLines ; hLine];
codecs = {codecs{:} codec};
end
legend(hLines, codecs, 4);
hold off;
if ~strcmp(export, 'none')
% Export figure to an eps file
res = stripws(res);
frRate = stripws(frRate);
exportName = [outpath '/psnr-' res '-' frRate];
exportfig(hFig, exportName, 'Format', 'eps2', 'Color', 'cmyk');
end
if strcmp(export, 'pdf')
% Use the epstopdf utility to convert to pdf
system(['epstopdf ' exportName '.eps']);
end
function casesOut = plotOneSSIM(cases, num, export, outpath)
% Find matching specs
plotIdx = 1;
for i = 2:length(cases)
if strcmp(cases(1).resolution, cases(i).resolution) & ...
strcmp(cases(1).frameRate, cases(i).frameRate)
plotIdx = [plotIdx i];
end
end
% Return unplotted cases
casesOut = cases(setdiff(1:length(cases), plotIdx));
cases = cases(plotIdx);
% Prune similar results
for i = 1:length(cases)
simIndx = find(abs(cases(i).bitrate - [cases(i).bitrate(2:end) ; 0]) < 10);
while ~isempty(simIndx)
diffIndx = setdiff(1:length(cases(i).bitrate), simIndx);
cases(i).ssim = cases(i).ssim(diffIndx);
cases(i).bitrate = cases(i).bitrate(diffIndx);
simIndx = find(abs(cases(i).bitrate - [cases(i).bitrate(2:end) ; 0]) < 10);
end
end
% Prepare figure with axis labels and so on
hFig = figure(num);
clf;
hold on;
grid on;
axis([0 1100 0.5 1]); % y-limit are set to 'auto' below
set(gca, 'XTick', 0:200:1000);
%set(gca, 'YTick', 20:10:60);
xlabel(cases(1).bitrateLabel);
ylabel(cases(1).ssimLabel);
res = cases(1).resolution;
frRate = cases(1).frameRate;
title([res ', ' frRate]);
hLines = [];
codecs = {};
sequences = {};
i = 0;
while ~isempty(cases)
i = i + 1;
[cases, hLine, codec, sequences] = plotOneCodec(cases, 'bitrate', 'ssim', i, sequences, 1);
% Stored to generate the legend
hLines = [hLines ; hLine];
codecs = {codecs{:} codec};
end
%set(gca,'YLimMode','auto')
set(gca,'YLim',[0.5 1])
set(gca,'YScale','log')
legend(hLines, codecs, 4);
hold off;
if ~strcmp(export, 'none')
% Export figure to an eps file
res = stripws(res);
frRate = stripws(frRate);
exportName = [outpath '/psnr-' res '-' frRate];
exportfig(hFig, exportName, 'Format', 'eps2', 'Color', 'cmyk');
end
if strcmp(export, 'pdf')
% Use the epstopdf utility to convert to pdf
system(['epstopdf ' exportName '.eps']);
end
function casesOut = plotOneSpeed(cases, num, export, outpath)
% Find matching specs
plotIdx = 1;
for i = 2:length(cases)
if strcmp(cases(1).resolution, cases(i).resolution) & ...
strcmp(cases(1).frameRate, cases(i).frameRate) & ...
strcmp(cases(1).name, cases(i).name)
plotIdx = [plotIdx i];
end
end
% Return unplotted cases
casesOut = cases(setdiff(1:length(cases), plotIdx));
cases = cases(plotIdx);
% Prune similar results
for i = 1:length(cases)
simIndx = find(abs(cases(i).psnr - [cases(i).psnr(2:end) ; 0]) < 0.25);
while ~isempty(simIndx)
diffIndx = setdiff(1:length(cases(i).psnr), simIndx);
cases(i).psnr = cases(i).psnr(diffIndx);
cases(i).speed = cases(i).speed(diffIndx);
simIndx = find(abs(cases(i).psnr - [cases(i).psnr(2:end) ; 0]) < 0.25);
end
end
hFig = figure(num);
clf;
hold on;
%grid on;
xlabel(cases(1).psnrLabel);
ylabel(cases(1).speedLabel);
res = cases(1).resolution;
name = cases(1).name;
frRate = cases(1).frameRate;
title([name ', ' res ', ' frRate]);
hLines = [];
codecs = {};
sequences = {};
i = 0;
while ~isempty(cases)
i = i + 1;
[cases, hLine, codec, sequences] = plotOneCodec(cases, 'psnr', 'speed', i, sequences, 0);
% Stored to generate the legend
hLines = [hLines ; hLine];
codecs = {codecs{:} codec};
end
legend(hLines, codecs, 1);
hold off;
if ~strcmp(export, 'none')
% Export figure to an eps file
res = stripws(res);
frRate = stripws(frRate);
exportName = [outpath '/speed-' name '-' res '-' frRate];
exportfig(hFig, exportName, 'Format', 'eps2', 'Color', 'cmyk');
end
if strcmp(export, 'pdf')
% Use the epstopdf utility to convert to pdf
system(['epstopdf ' exportName '.eps']);
end
function [casesOut, hLine, codec, sequences] = plotOneCodec(cases, xfield, yfield, num, sequences, annotatePlot)
plotStr = {'gx-', 'bo-', 'r^-', 'kd-', 'cx-', 'go--', 'b^--'};
% Find matching codecs
plotIdx = 1;
for i = 2:length(cases)
if strcmp(cases(1).codec, cases(i).codec)
plotIdx = [plotIdx i];
end
end
% Return unplotted cases
casesOut = cases(setdiff(1:length(cases), plotIdx));
cases = cases(plotIdx);
for i = 1:length(cases)
% Plot a single case
hLine = plot(getfield(cases(i), xfield), getfield(cases(i), yfield), plotStr{num}, ...
'LineWidth', 1.1, 'MarkerSize', 6);
end
% hLine handle and codec are returned to construct the legend afterwards
codec = cases(1).codec;
if annotatePlot == 0
return;
end
for i = 1:length(cases)
% Print the codec name as a text label
% Ensure each codec is only printed once
sequencePlotted = 0;
for j = 1:length(sequences)
if strcmp(cases(i).name, sequences{j})
sequencePlotted = 1;
break;
end
end
if sequencePlotted == 0
text(getfield(cases(i), xfield, {1}), getfield(cases(i), yfield, {1}), ...
[' ' cases(i).name]);
sequences = {sequences{:} cases(i).name};
end
end
% Strip whitespace from string
function str = stripws(str)
if ~isstr(str)
error('String required');
end
str = str(setdiff(1:length(str), find(isspace(str) == 1)));
|
github
|
sudrag/Perception-and-Computer-Vision-in-MATLAB-master
|
dpsimplify.m
|
.m
|
Perception-and-Computer-Vision-in-MATLAB-master/AR Tag Detection/Scripts/dpsimplify.m
| 6,599 |
utf_8
|
ec1b680dd31937dca16da7df9996aeba
|
function [ps,ix] = dpsimplify(p,tol)
% Recursive Douglas-Peucker Polyline Simplification, Simplify
%
% [ps,ix] = dpsimplify(p,tol)
%
% dpsimplify uses the recursive Douglas-Peucker line simplification
% algorithm to reduce the number of vertices in a piecewise linear curve
% according to a specified tolerance. The algorithm is also know as
% Iterative Endpoint Fit. It works also for polylines and polygons
% in higher dimensions.
%
% In case of nans (missing vertex coordinates) dpsimplify assumes that
% nans separate polylines. As such, dpsimplify treats each line
% separately.
%
% For additional information on the algorithm follow this link
% http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
%
% Input arguments
%
% p polyline n*d matrix with n vertices in d
% dimensions.
% tol tolerance (maximal euclidean distance allowed
% between the new line and a vertex)
%
% Output arguments
%
% ps simplified line
% ix linear index of the vertices retained in p (ps = p(ix))
%
% Examples
%
% 1. Simplify line
%
% tol = 1;
% x = 1:0.1:8*pi;
% y = sin(x) + randn(size(x))*0.1;
% p = [x' y'];
% ps = dpsimplify(p,tol);
%
% plot(p(:,1),p(:,2),'k')
% hold on
% plot(ps(:,1),ps(:,2),'r','LineWidth',2);
% legend('original polyline','simplified')
%
% 2. Reduce polyline so that only knickpoints remain by
% choosing a very low tolerance
%
% p = [(1:10)' [1 2 3 2 4 6 7 8 5 2]'];
% p2 = dpsimplify(p,eps);
% plot(p(:,1),p(:,2),'k+--')
% hold on
% plot(p2(:,1),p2(:,2),'ro','MarkerSize',10);
% legend('original line','knickpoints')
%
% 3. Simplify a 3d-curve
%
% x = sin(1:0.01:20)';
% y = cos(1:0.01:20)';
% z = x.*y.*(1:0.01:20)';
% ps = dpsimplify([x y z],0.1);
% plot3(x,y,z);
% hold on
% plot3(ps(:,1),ps(:,2),ps(:,3),'k*-');
%
%
%
% Author: Wolfgang Schwanghart, 13. July, 2010.
% w.schwanghart[at]unibas.ch
if nargin == 0
help dpsimplify
return
end
error(nargoutchk(2, 2, nargin))
% error checking
if ~isscalar(tol) || tol<0;
error('tol must be a positive scalar')
end
% nr of dimensions
nrvertices = size(p,1);
dims = size(p,2);
% anonymous function for starting point and end point comparision
% using a relative tolerance test
compare = @(a,b) abs(a-b)/max(abs(a),abs(b)) <= eps;
% what happens, when there are NaNs?
% NaNs divide polylines.
Inan = any(isnan(p),2);
% any NaN at all?
Inanp = any(Inan);
% if there is only one vertex
if nrvertices == 1 || isempty(p);
ps = p;
ix = 1;
% if there are two
elseif nrvertices == 2 && ~Inanp;
% when the line has no vertices (except end and start point of the
% line) check if the distance between both is less than the tolerance.
% If so, return the center.
if dims == 2;
d = hypot(p(1,1)-p(2,1),p(1,2)-p(2,2));
else
d = sqrt(sum((p(1,:)-p(2,:)).^2));
end
if d <= tol;
ps = sum(p,1)/2;
ix = 1;
else
ps = p;
ix = [1;2];
end
elseif Inanp;
% case: there are nans in the p array
% --> find start and end indices of contiguous non-nan data
Inan = ~Inan;
sIX = strfind(Inan',[0 1])' + 1;
eIX = strfind(Inan',[1 0])';
if Inan(end)==true;
eIX = [eIX;nrvertices];
end
if Inan(1);
sIX = [1;sIX];
end
% calculate length of non-nan components
lIX = eIX-sIX+1;
% put each component into a single cell
c = mat2cell(p(Inan,:),lIX,dims);
% now call dpsimplify again inside cellfun.
if nargout == 2;
[ps,ix] = cellfun(@(x) dpsimplify(x,tol),c,'uniformoutput',false);
ix = cellfun(@(x,six) x+six-1,ix,num2cell(sIX),'uniformoutput',false);
else
ps = cellfun(@(x) dpsimplify(x,tol),c,'uniformoutput',false);
end
% write the data from a cell array back to a matrix
ps = cellfun(@(x) [x;nan(1,dims)],ps,'uniformoutput',false);
ps = cell2mat(ps);
ps(end,:) = [];
% ix wanted? write ix to a matrix, too.
if nargout == 2;
ix = cell2mat(ix);
end
else
% if there are no nans than start the recursive algorithm
ixe = size(p,1);
ixs = 1;
% logical vector for the vertices to be retained
I = true(ixe,1);
% call recursive function
p = simplifyrec(p,tol,ixs,ixe);
ps = p(I,:);
% if desired return the index of retained vertices
if nargout == 2;
ix = find(I);
end
end
% _________________________________________________________
function p = simplifyrec(p,tol,ixs,ixe)
% check if startpoint and endpoint are the same
% better comparison needed which included a tolerance eps
c1 = num2cell(p(ixs,:));
c2 = num2cell(p(ixe,:));
% same start and endpoint with tolerance
sameSE = all(cell2mat(cellfun(compare,c1(:),c2(:),'UniformOutput',false)));
if sameSE;
% calculate the shortest distance of all vertices between ixs and
% ixe to ixs only
if dims == 2;
d = hypot(p(ixs,1)-p(ixs+1:ixe-1,1),p(ixs,2)-p(ixs+1:ixe-1,2));
else
d = sqrt(sum(bsxfun(@minus,p(ixs,:),p(ixs+1:ixe-1,:)).^2,2));
end
else
% calculate shortest distance of all points to the line from ixs to ixe
% subtract starting point from other locations
pt = bsxfun(@minus,p(ixs+1:ixe,:),p(ixs,:));
% end point
a = pt(end,:)';
beta = (a' * pt')./(a'*a);
b = pt-bsxfun(@times,beta,a)';
if dims == 2;
% if line in 2D use the numerical more robust hypot function
d = hypot(b(:,1),b(:,2));
else
d = sqrt(sum(b.^2,2));
end
end
% identify maximum distance and get the linear index of its location
[dmax,ixc] = max(d);
ixc = ixs + ixc;
% if the maximum distance is smaller than the tolerance remove vertices
% between ixs and ixe
if dmax <= tol;
if ixs ~= ixe-1;
I(ixs+1:ixe-1) = false;
end
% if not, call simplifyrec for the segments between ixs and ixc (ixc
% and ixe)
else
p = simplifyrec(p,tol,ixs,ixc);
p = simplifyrec(p,tol,ixc,ixe);
end
end
end
|
github
|
sudrag/Perception-and-Computer-Vision-in-MATLAB-master
|
homography2d.m
|
.m
|
Perception-and-Computer-Vision-in-MATLAB-master/AR Tag Detection/Scripts/homography2d.m
| 2,957 |
utf_8
|
5d39781c0ed194cfeea3f44954b4ea80
|
% HOMOGRAPHY2D - computes 2D homography
%
% Usage: H = homography2d(x1, x2)
% H = homography2d(x)
%
% Arguments:
% x1 - 3xN set of homogeneous points
% x2 - 3xN set of homogeneous points such that x1<->x2
%
% x - If a single argument is supplied it is assumed that it
% is in the form x = [x1; x2]
% Returns:
% H - the 3x3 homography such that x2 = H*x1
%
% This code follows the normalised direct linear transformation
% algorithm given by Hartley and Zisserman "Multiple View Geometry in
% Computer Vision" p92.
% Copyright (c) 2003-2005 Peter Kovesi
% School of Computer Science & Software Engineering
% The University of Western Australia
% pk at csse uwa edu au
% http://www.csse.uwa.edu.au/~pk
%
% Permission is hereby granted, free of charge, to any person obtaining a copy
% of this software and associated documentation files (the "Software"), to deal
% in the Software without restriction, subject to the following conditions:
%
% The above copyright notice and this permission notice shall be included in
% all copies or substantial portions of the Software.
%
% The Software is provided "as is", without warranty of any kind.
% May 2003 - Original version.
% Feb 2004 - Single argument allowed for to enable use with RANSAC.
% Feb 2005 - SVD changed to 'Economy' decomposition (thanks to Paul O'Leary)
function H = homography2d(varargin)
[x1, x2] = checkargs(varargin(:));
% Attempt to normalise each set of points so that the origin
% is at centroid and mean distance from origin is sqrt(2).
[x1, T1] = normalise2dpts(x1);
[x2, T2] = normalise2dpts(x2);
% Note that it may have not been possible to normalise
% the points if one was at infinity so the following does not
% assume that scale parameter w = 1.
Npts = length(x1);
A = zeros(3*Npts,9);
O = [0 0 0];
for n = 1:Npts
X = x1(:,n)';
x = x2(1,n); y = x2(2,n); w = x2(3,n);
A(3*n-2,:) = [ O -w*X y*X];
A(3*n-1,:) = [ w*X O -x*X];
A(3*n ,:) = [-y*X x*X O ];
end
[U,D,V] = svd(A,0); % 'Economy' decomposition for speed
% Extract homography
H = reshape(V(:,9),3,3)';
% Denormalise
H = T2\H*T1;
%--------------------------------------------------------------------------
% Function to check argument values and set defaults
function [x1, x2] = checkargs(arg);
if length(arg) == 2
x1 = arg{1};
x2 = arg{2};
if ~all(size(x1)==size(x2))
error('x1 and x2 must have the same size');
elseif size(x1,1) ~= 3
error('x1 and x2 must be 3xN');
end
elseif length(arg) == 1
if size(arg{1},1) ~= 6
error('Single argument x must be 6xN');
else
x1 = arg{1}(1:3,:);
x2 = arg{1}(4:6,:);
end
else
error('Wrong number of arguments supplied');
end
|
github
|
sudrag/Perception-and-Computer-Vision-in-MATLAB-master
|
normalise2dpts.m
|
.m
|
Perception-and-Computer-Vision-in-MATLAB-master/AR Tag Detection/Scripts/normalise2dpts.m
| 2,430 |
utf_8
|
f0428a0b70a640f4503e444e7e286535
|
% NORMALISE2DPTS - normalises 2D homogeneous points
%
% Function translates and normalises a set of 2D homogeneous points
% so that their centroid is at the origin and their mean distance from
% the origin is sqrt(2). This process typically improves the
% conditioning of any equations used to solve homographies, fundamental
% matrices etc.
%
% Usage: [newpts, T] = normalise2dpts(pts)
%
% Argument:
% pts - 3xN array of 2D homogeneous coordinates
%
% Returns:
% newpts - 3xN array of transformed 2D homogeneous coordinates. The
% scaling parameter is normalised to 1 unless the point is at
% infinity.
% T - The 3x3 transformation matrix, newpts = T*pts
%
% If there are some points at infinity the normalisation transform
% is calculated using just the finite points. Being a scaling and
% translating transform this will not affect the points at infinity.
% Peter Kovesi
% School of Computer Science & Software Engineering
% The University of Western Australia
% pk at csse uwa edu au
% http://www.csse.uwa.edu.au/~pk
%
% May 2003 - Original version
% February 2004 - Modified to deal with points at infinity.
% December 2008 - meandist calculation modified to work with Octave 3.0.1
% (thanks to Ron Parr)
function [newpts, T] = normalise2dpts(pts)
if size(pts,1) ~= 3
error('pts must be 3xN');
end
% Find the indices of the points that are not at infinity
finiteind = find(abs(pts(3,:)) > eps);
if length(finiteind) ~= size(pts,2)
warning('Some points are at infinity');
end
% For the finite points ensure homogeneous coords have scale of 1
pts(1,finiteind) = pts(1,finiteind)./pts(3,finiteind);
pts(2,finiteind) = pts(2,finiteind)./pts(3,finiteind);
pts(3,finiteind) = 1;
c = mean(pts(1:2,finiteind)')'; % Centroid of finite points
newp(1,finiteind) = pts(1,finiteind)-c(1); % Shift origin to centroid.
newp(2,finiteind) = pts(2,finiteind)-c(2);
dist = sqrt(newp(1,finiteind).^2 + newp(2,finiteind).^2);
meandist = mean(dist(:)); % Ensure dist is a column vector for Octave 3.0.1
scale = sqrt(2)/meandist;
T = [scale 0 -scale*c(1)
0 scale -scale*c(2)
0 0 1 ];
newpts = T*pts;
|
github
|
sudrag/Perception-and-Computer-Vision-in-MATLAB-master
|
ransacfitfundmatrix.m
|
.m
|
Perception-and-Computer-Vision-in-MATLAB-master/Visual Odometry/code/ransacfitfundmatrix.m
| 5,973 |
utf_8
|
ade5620bd3b104b73b18d18a647de42c
|
% RANSACFITFUNDMATRIX - fits fundamental matrix using RANSAC
%
% Usage: [F, inliers] = ransacfitfundmatrix(x1, x2, t)
%
% Arguments:
% x1 - 2xN or 3xN set of homogeneous points. If the data is
% 2xN it is assumed the homogeneous scale factor is 1.
% x2 - 2xN or 3xN set of homogeneous points such that x1<->x2.
% t - The distance threshold between data point and the model
% used to decide whether a point is an inlier or not.
% Note that point coordinates are normalised to that their
% mean distance from the origin is sqrt(2). The value of
% t should be set relative to this, say in the range
% 0.001 - 0.01
%
% Note that it is assumed that the matching of x1 and x2 are putative and it
% is expected that a percentage of matches will be wrong.
%
% Returns:
% F - The 3x3 fundamental matrix such that x2'Fx1 = 0.
% inliers - An array of indices of the elements of x1, x2 that were
% the inliers for the best model.
%
% See Also: RANSAC, FUNDMATRIX
% Copyright (c) 2004-2005 Peter Kovesi
% School of Computer Science & Software Engineering
% The University of Western Australia
% http://www.csse.uwa.edu.au/
%
% Permission is hereby granted, free of charge, to any person obtaining a copy
% of this software and associated documentation files (the "Software"), to deal
% in the Software without restriction, subject to the following conditions:
%
% The above copyright notice and this permission notice shall be included in
% all copies or substantial portions of the Software.
%
% The Software is provided "as is", without warranty of any kind.
% February 2004 Original version
% August 2005 Distance error function changed to match changes in RANSAC
% February 2016 Catch case when ransac fails so that we skip trying a final
% least squares solution
function [F, inliers] = ransacfitfundmatrix(x1, x2, t, feedback)
if ~all(size(x1)==size(x2))
error('Data sets x1 and x2 must have the same dimension');
end
if nargin == 3
feedback = 0;
end
[rows,npts] = size(x1);
if ~(rows==2 || rows==3)
error('x1 and x2 must have 2 or 3 rows');
end
if rows == 2 % Pad data with homogeneous scale factor of 1
x1 = [x1; ones(1,npts)];
x2 = [x2; ones(1,npts)];
end
% Normalise each set of points so that the origin is at centroid and
% mean distance from origin is sqrt(2). normalise2dpts also ensures the
% scale parameter is 1. Note that 'fundmatrix' will also call
% 'normalise2dpts' but the code in 'ransac' that calls the distance
% function will not - so it is best that we normalise beforehand.
[x1, T1] = normalise2dpts(x1);
[x2, T2] = normalise2dpts(x2);
s = 8; % Number of points needed to fit a fundamental matrix. Note that
% only 7 are needed but the function 'fundmatrix' only
% implements the 8-point solution.
fittingfn = @fundmatrix;
distfn = @funddist;
degenfn = @isdegenerate;
% x1 and x2 are 'stacked' to create a 6xN array for ransac
[F, inliers] = ransac([x1; x2], fittingfn, distfn, degenfn, s, t, feedback);
if isempty(F) % ransac failed to find a solution.
return; % Do not attempt to do a final least squares fit
end
% Now do a final least squares fit on the data points considered to
% be inliers.
F = fundmatrix(x1(:,inliers), x2(:,inliers));
% Denormalise
F = T2'*F*T1;
%--------------------------------------------------------------------------
% Function to evaluate the first order approximation of the geometric error
% (Sampson distance) of the fit of a fundamental matrix with respect to a
% set of matched points as needed by RANSAC. See: Hartley and Zisserman,
% 'Multiple View Geometry in Computer Vision', 2nd Ed. page 287.
%
% Note that this code allows for F being a cell array of fundamental matrices of
% which we have to pick the best one. (A 7 point solution can return up to 3
% solutions)
function [bestInliers, bestF] = funddist(F, x, t);
x1 = x(1:3,:); % Extract x1 and x2 from x
x2 = x(4:6,:);
if iscell(F) % We have several solutions each of which must be tested
nF = length(F); % Number of solutions to test
bestF = F{1}; % Initial allocation of best solution
ninliers = 0; % Number of inliers
for k = 1:nF
x2tFx1 = zeros(1,length(x1));
for n = 1:length(x1)
x2tFx1(n) = x2(:,n)'*F{k}*x1(:,n);
end
Fx1 = F{k}*x1;
Ftx2 = F{k}'*x2;
% Evaluate distances
d = x2tFx1.^2 ./ ...
(Fx1(1,:).^2 + Fx1(2,:).^2 + Ftx2(1,:).^2 + Ftx2(2,:).^2);
inliers = find(abs(d) < t); % Indices of inlying points
if length(inliers) > ninliers % Record best solution
ninliers = length(inliers);
bestF = F{k};
bestInliers = inliers;
end
end
else % We just have one solution
x2tFx1 = zeros(1,length(x1));
for n = 1:length(x1)
x2tFx1(n) = x2(:,n)'*F*x1(:,n);
end
Fx1 = F*x1;
Ftx2 = F'*x2;
% Evaluate distances
d = x2tFx1.^2 ./ ...
(Fx1(1,:).^2 + Fx1(2,:).^2 + Ftx2(1,:).^2 + Ftx2(2,:).^2);
bestInliers = find(abs(d) < t); % Indices of inlying points
bestF = F; % Copy F directly to bestF
end
%----------------------------------------------------------------------
% (Degenerate!) function to determine if a set of matched points will result
% in a degeneracy in the calculation of a fundamental matrix as needed by
% RANSAC. This function assumes this cannot happen...
function r = isdegenerate(x)
r = 0;
|
github
|
sudrag/Perception-and-Computer-Vision-in-MATLAB-master
|
ransac.m
|
.m
|
Perception-and-Computer-Vision-in-MATLAB-master/Visual Odometry/code/ransac.m
| 10,480 |
utf_8
|
c5e0917ad9d7194d2ab04f6741400fac
|
% RANSAC - Robustly fits a model to data with the RANSAC algorithm
%
% Usage:
%
% [M, inliers] = ransac(x, fittingfn, distfn, degenfn s, t, feedback, ...
% maxDataTrials, maxTrials)
%
% Arguments:
% x - Data sets to which we are seeking to fit a model M
% It is assumed that x is of size [d x Npts]
% where d is the dimensionality of the data and Npts is
% the number of data points.
%
% fittingfn - Handle to a function that fits a model to s
% data from x. It is assumed that the function is of the
% form:
% M = fittingfn(x)
% Note it is possible that the fitting function can return
% multiple models (for example up to 3 fundamental matrices
% can be fitted to 7 matched points). In this case it is
% assumed that the fitting function returns a cell array of
% models.
% If this function cannot fit a model it should return M as
% an empty matrix.
%
% distfn - Handle to a function that evaluates the
% distances from the model to data x.
% It is assumed that the function is of the form:
% [inliers, M] = distfn(M, x, t)
% This function must evaluate the distances between points
% and the model returning the indices of elements in x that
% are inliers, that is, the points that are within distance
% 't' of the model. Additionally, if M is a cell array of
% possible models 'distfn' will return the model that has the
% most inliers. If there is only one model this function
% must still copy the model to the output. After this call M
% will be a non-cell object representing only one model.
%
% degenfn - Handle to a function that determines whether a
% set of datapoints will produce a degenerate model.
% This is used to discard random samples that do not
% result in useful models.
% It is assumed that degenfn is a boolean function of
% the form:
% r = degenfn(x)
% It may be that you cannot devise a test for degeneracy in
% which case you should write a dummy function that always
% returns a value of 1 (true) and rely on 'fittingfn' to return
% an empty model should the data set be degenerate.
%
% s - The minimum number of samples from x required by
% fittingfn to fit a model.
%
% t - The distance threshold between a data point and the model
% used to decide whether the point is an inlier or not.
%
% feedback - An optional flag 0/1. If set to one the trial count and the
% estimated total number of trials required is printed out at
% each step. Defaults to 0.
%
% maxDataTrials - Maximum number of attempts to select a non-degenerate
% data set. This parameter is optional and defaults to 100.
%
% maxTrials - Maximum number of iterations. This parameter is optional and
% defaults to 1000.
%
% Returns:
% M - The model having the greatest number of inliers.
% inliers - An array of indices of the elements of x that were
% the inliers for the best model.
%
% If no solution could be found M and inliers are both returned as empty
% matrices and a warning reported.
%
% Note that the desired probability of choosing at least one sample free from
% outliers is set at 0.99. You will need to edit the code should you wish to
% change this (it should probably be a parameter)
%
% For an example of the use of this function see RANSACFITHOMOGRAPHY or
% RANSACFITPLANE
% References:
% M.A. Fishler and R.C. Boles. "Random sample concensus: A paradigm
% for model fitting with applications to image analysis and automated
% cartography". Comm. Assoc. Comp, Mach., Vol 24, No 6, pp 381-395, 1981
%
% Richard Hartley and Andrew Zisserman. "Multiple View Geometry in
% Computer Vision". pp 101-113. Cambridge University Press, 2001
% Copyright (c) 2003-2013 Peter Kovesi
% Centre for Exploration Targeting
% The University of Western Australia
% peter.kovesi at uwa edu au
% http://www.csse.uwa.edu.au/~pk
%
% Permission is hereby granted, free of charge, to any person obtaining a copy
% of this software and associated documentation files (the "Software"), to deal
% in the Software without restriction, subject to the following conditions:
%
% The above copyright notice and this permission notice shall be included in
% all copies or substantial portions of the Software.
%
% The Software is provided "as is", without warranty of any kind.
%
% May 2003 - Original version
% February 2004 - Tidied up.
% August 2005 - Specification of distfn changed to allow model fitter to
% return multiple models from which the best must be selected
% Sept 2006 - Random selection of data points changed to ensure duplicate
% points are not selected.
% February 2007 - Jordi Ferrer: Arranged warning printout.
% Allow maximum trials as optional parameters.
% Patch the problem when non-generated data
% set is not given in the first iteration.
% August 2008 - 'feedback' parameter restored to argument list and other
% breaks in code introduced in last update fixed.
% December 2008 - Octave compatibility mods
% June 2009 - Argument 'MaxTrials' corrected to 'maxTrials'!
% January 2013 - Separate code path for Octave no longer needed
function [M, inliers] = ransac(x, fittingfn, distfn, degenfn, s, t, feedback, ...
maxDataTrials, maxTrials)
% Test number of parameters
error ( nargchk ( 6, 9, nargin ) );
if nargin < 9; maxTrials = 1000; end;
if nargin < 8; maxDataTrials = 100; end;
if nargin < 7; feedback = 0; end;
[rows, npts] = size(x);
p = 0.99; % Desired probability of choosing at least one sample
% free from outliers (probably should be a parameter)
bestM = NaN; % Sentinel value allowing detection of solution failure.
trialcount = 0;
bestscore = 0;
N = 1; % Dummy initialisation for number of trials.
while N > trialcount
% Select at random s datapoints to form a trial model, M.
% In selecting these points we have to check that they are not in
% a degenerate configuration.
degenerate = 1;
count = 1;
while degenerate
% Generate s random indicies in the range 1..npts
% (If you do not have the statistics toolbox with randsample(),
% use the function RANDOMSAMPLE from my webpage)
if ~exist('randsample', 'file')
ind = randomsample(npts, s);
else
ind = randsample(npts, s);
end
% Test that these points are not a degenerate configuration.
degenerate = feval(degenfn, x(:,ind));
if ~degenerate
% Fit model to this random selection of data points.
% Note that M may represent a set of models that fit the data in
% this case M will be a cell array of models
M = feval(fittingfn, x(:,ind));
% Depending on your problem it might be that the only way you
% can determine whether a data set is degenerate or not is to
% try to fit a model and see if it succeeds. If it fails we
% reset degenerate to true.
if isempty(M)
degenerate = 1;
end
end
% Safeguard against being stuck in this loop forever
count = count + 1;
if count > maxDataTrials
warning('Unable to select a nondegenerate data set');
break
end
end
% Once we are out here we should have some kind of model...
% Evaluate distances between points and model returning the indices
% of elements in x that are inliers. Additionally, if M is a cell
% array of possible models 'distfn' will return the model that has
% the most inliers. After this call M will be a non-cell object
% representing only one model.
[inliers, M] = feval(distfn, M, x, t);
% Find the number of inliers to this model.
ninliers = length(inliers);
if ninliers > bestscore % Largest set of inliers so far...
bestscore = ninliers; % Record data for this model
bestinliers = inliers;
bestM = M;
% Update estimate of N, the number of trials to ensure we pick,
% with probability p, a data set with no outliers.
fracinliers = ninliers/npts;
pNoOutliers = 1 - fracinliers^s;
pNoOutliers = max(eps, pNoOutliers); % Avoid division by -Inf
pNoOutliers = min(1-eps, pNoOutliers);% Avoid division by 0.
N = log(1-p)/log(pNoOutliers);
end
trialcount = trialcount+1;
if feedback
fprintf('trial %d out of %d \r',trialcount, ceil(N));
end
% Safeguard against being stuck in this loop forever
if trialcount > maxTrials
warning( ...
sprintf('ransac reached the maximum number of %d trials',...
maxTrials));
break
end
end
if feedback, fprintf('\n'); end
if ~isnan(bestM) % We got a solution
M = bestM;
inliers = bestinliers;
else
M = [];
inliers = [];
warning('ransac was unable to find a useful solution');
end
|
github
|
sudrag/Perception-and-Computer-Vision-in-MATLAB-master
|
normalise2dpts.m
|
.m
|
Perception-and-Computer-Vision-in-MATLAB-master/Visual Odometry/code/normalise2dpts.m
| 2,501 |
utf_8
|
00a5d4cb7272f147a49e349c2772fe72
|
% NORMALISE2DPTS - normalises 2D homogeneous points
%
% Function translates and normalises a set of 2D homogeneous points
% so that their centroid is at the origin and their mean distance from
% the origin is sqrt(2). This process typically improves the
% conditioning of any equations used to solve homographies, fundamental
% matrices etc.
%
% Usage: [newpts, T] = normalise2dpts(pts)
%
% Argument:
% pts - 3xN array of 2D homogeneous coordinates
%
% Returns:
% newpts - 3xN array of transformed 2D homogeneous coordinates. The
% scaling parameter is normalised to 1 unless the point is at
% infinity.
% T - The 3x3 transformation matrix, newpts = T*pts
%
% If there are some points at infinity the normalisation transform
% is calculated using just the finite points. Being a scaling and
% translating transform this will not affect the points at infinity.
% Peter Kovesi
% School of Computer Science & Software Engineering
% The University of Western Australia
% pk at csse uwa edu au
% http://www.csse.uwa.edu.au/~pk
%
% May 2003 - Original version
% February 2004 - Modified to deal with points at infinity.
% December 2008 - meandist calculation modified to work with Octave 3.0.1
% (thanks to Ron Parr)
% December 2016 - Disabled warning messgae for points at infinity.
function [newpts, T] = normalise2dpts(pts)
if size(pts,1) ~= 3
error('pts must be 3xN');
end
% Find the indices of the points that are not at infinity
finiteind = find(abs(pts(3,:)) > eps);
% if length(finiteind) ~= size(pts,2)
% warning('Some points are at infinity');
% end
% For the finite points ensure homogeneous coords have scale of 1
pts(1,finiteind) = pts(1,finiteind)./pts(3,finiteind);
pts(2,finiteind) = pts(2,finiteind)./pts(3,finiteind);
pts(3,finiteind) = 1;
c = mean(pts(1:2,finiteind)')'; % Centroid of finite points
newp(1,finiteind) = pts(1,finiteind)-c(1); % Shift origin to centroid.
newp(2,finiteind) = pts(2,finiteind)-c(2);
dist = sqrt(newp(1,finiteind).^2 + newp(2,finiteind).^2);
meandist = mean(dist(:)); % Ensure dist is a column vector for Octave 3.0.1
scale = sqrt(2)/meandist;
T = [scale 0 -scale*c(1)
0 scale -scale*c(2)
0 0 1 ];
newpts = T*pts;
|
github
|
johnfgibson/whyjulia-master
|
ksbenchmark.m
|
.m
|
whyjulia-master/codes/ksbenchmark.m
| 2,820 |
utf_8
|
823086545f955482f5a264767f751251
|
function ksbenchmark(Nx, printnorms)
% ksbenchmark: run a Kuramoto-Sivashinky simulation, benchmark, and plot
% Nx = number of gridpoints
% printnorms = 1 => print norm(u0) and norm(uT), 0 => don't
Lx = Nx/16*pi; % spatial domain [0, L] periodic
dt = 1/16; % discrete time step
T = 200; % integrate from t=0 to t=T
Nt = floor(T/dt); % total number of timesteps
if nargin < 2
printnorms = 0;
end
x = (Lx/Nx)*(0:Nx-1);
u0 = cos(x) + 0.1*sin(x/8) + 0.01*cos((2*pi/Lx)*x);
Nruns = 1;
skip = 1;
avgtime = 0;
for r=1:Nruns;
tic();
u = ksintegrate(u0, Lx, dt, Nt);
cputime = toc()
if r > skip
avgtime = avgtime + cputime;
end
end
if printnorms == 1
u0norm = ksnorm(u0)
uTnorm = ksnorm(u)
end
avgtime = avgtime/(Nruns-skip)
end
function n = ksnorm(u)
% ksnorm: compute the 2-norm of u(x) = sqrt(1/Lx int_0^Lx |u|^2 dx)
n = sqrt((u * u') /length(u));
end
function u = ksintegrate(u, Lx, dt, Nt)
% ksintegrate: integrate kuramoto-sivashinsky equation
% u_t = -u*u_x - u_xx - u_xxxx, domain x in [0,Lx], periodic BCs
%
% inputs
% u = initial condition (vector of u(x,0) values on uniform gridpoints))
% Lx = domain length
% dt = time step
% Nt = number of integration timesteps
%
% outputs
%
% u = final state (vector of u(x,T) values on uniform gridpoints))
Nx = length(u); % number of gridpoints
kx = [0:Nx/2-1 0 -Nx/2+1:-1]; % integer wavenumbers: exp(2*pi*kx*x/L)
alpha = 2*pi*kx/Lx; % real wavenumbers: exp(alpha*x)
D = i*alpha; % D = d/dx operator in Fourier space
L = alpha.^2 - alpha.^4; % linear operator -D^2 - D^3 in Fourier space
G = -0.5*D; % -1/2 D operator in Fourier space
% Express PDE as u_t = Lu + N(u), L is linear part, N nonlinear part.
% Then Crank-Nicolson Adams-Bashforth discretization is
%
% (I - dt/2 L) u^{n+1} = (I + dt/2 L) u^n + 3dt/2 N^n - dt/2 N^{n-1}
%
% let A = (I - dt/2 L)
% B = (I + dt/2 L), then the CNAB timestep formula is
%
% u^{n+1} = A^{-1} (B u^n + 3dt/2 N^n - dt/2 N^{n-1})
% some convenience variables
dt2 = dt/2;
dt32 = 3*dt/2;
A_inv = (ones(1,Nx) - dt2*L).^(-1);
B = ones(1,Nx) + dt2*L;
Nn = G.*fft(u.*u); % compute -1/2 d/dx u^2 (spectral), notation Nn = N^n = N(u(n dt))
Nn1 = Nn; % notation Nn1 = N^{n-1} = N(u((n-1) dt))
u = fft(u); % transform u (spectral)
% timestepping loop
for n = 1:Nt
Nn1 = Nn; % shift N(u) in time: N^{n-1} <- N^n
Nn = G.*fft(real(ifft(u)).^2); % compute Nn = N(u) = -1/2 d/dx u^2
u = A_inv .* (B .* u + dt32*Nn - dt2*Nn1);
end
u = real(ifft(u));
end
|
github
|
sajeed786/Earthquake-Prediction-master
|
MFO.m
|
.m
|
Earthquake-Prediction-master/MFO.m
| 5,867 |
utf_8
|
ded7613a78e724ccac5ece6882985164
|
%______________________________________________________________________________________________
% Moth-Flame Optimization Algorithm (MFO)
% Source codes demo version 1.0
%
% Developed in MATLAB R2011b(7.13)
%
% Author and programmer: Seyedali Mirjalili
%
% e-Mail: [email protected]
% [email protected]
%
% Homepage: http://www.alimirjalili.com
%
% Main paper:
% S. Mirjalili, Moth-Flame Optimization Algorithm: A Novel Nature-inspired Heuristic Paradigm,
% Knowledge-Based Systems, DOI: http://dx.doi.org/10.1016/j.knosys.2015.07.006
%_______________________________________________________________________________________________
% You can simply define your cost in a seperate file and load its handle to fobj
% The initial parameters that you need are:
%__________________________________________
% fobj = @YourCostFunction
% dim = number of your variables
% Max_iteration = maximum number of generations
% SearchAgents_no = number of search agents
% lb=[lb1,lb2,...,lbn] where lbn is the lower bound of variable n
% ub=[ub1,ub2,...,ubn] where ubn is the upper bound of variable n
% If all the variables have equal lower bound you can just
% define lb and ub as two single number numbers
% To run MFO: [Best_score,Best_pos,cg_curve]=MFO(SearchAgents_no,Max_iteration,lb,ub,dim,fobj)
%______________________________________________________________________________________________
function [Best_flame_score,Best_flame_pos,Convergence_curve]=MFO(N,Max_iteration,lb,ub,dim,fobj,ty,x)
display('MFO is optimizing your problem');
%Initialize the positions of moths
Moth_pos=initialization(N,dim,ub,lb);
Convergence_curve=zeros(1,Max_iteration);
Iteration=1;
% Main loop
while Iteration<Max_iteration+1
% Number of flames Eq. (3.14) in the paper
Flame_no=round(N-Iteration*((N-1)/Max_iteration));
for i=1:size(Moth_pos,1)
% Check if moths go out of the search spaceand bring it back
Flag4ub=Moth_pos(i,:)>ub;
Flag4lb=Moth_pos(i,:)<lb;
Moth_pos(i,:)=(Moth_pos(i,:).*(~(Flag4ub+Flag4lb)))+ub.*Flag4ub+lb.*Flag4lb;
% Calculate the fitness of moths
Moth_fitness(1,i)=fobj(ty,x,Moth_pos(i,:));
end
if Iteration==1
% Sort the first population of moths
[fitness_sorted, I]=sort(Moth_fitness);
sorted_population=Moth_pos(I,:);
% Update the flames
best_flames=sorted_population;
best_flame_fitness=fitness_sorted;
else
% Sort the moths
double_population=[previous_population;best_flames];
double_fitness=[previous_fitness best_flame_fitness];
[double_fitness_sorted, I]=sort(double_fitness);
double_sorted_population=double_population(I,:);
fitness_sorted=double_fitness_sorted(1:N);
sorted_population=double_sorted_population(1:N,:);
% Update the flames
best_flames=sorted_population;
best_flame_fitness=fitness_sorted;
end
% Update the position best flame obtained so far
Best_flame_score=fitness_sorted(1);
Best_flame_pos=sorted_population(1,:);
previous_population=Moth_pos;
previous_fitness=Moth_fitness;
% a linearly dicreases from -1 to -2 to calculate t in Eq. (3.12)
a=-1+Iteration*((-1)/Max_iteration);
for i=1:size(Moth_pos,1)
for j=1:size(Moth_pos,2)
if i<=Flame_no % Update the position of the moth with respect to its corresponsing flame
% D in Eq. (3.13)
distance_to_flame=abs(sorted_population(i,j)-Moth_pos(i,j));
b=1;
t=(a-1)*rand+1;
% Eq. (3.12)
Moth_pos(i,j)=distance_to_flame*exp(b.*t).*cos(t.*2*pi)+sorted_population(i,j);
end
if i>Flame_no % Upaate the position of the moth with respct to one flame
% Eq. (3.13)
distance_to_flame=abs(sorted_population(i,j)-Moth_pos(i,j));
b=1;
t=(a-1)*rand+1;
% Eq. (3.12)
Moth_pos(i,j)=distance_to_flame*exp(b.*t).*cos(t.*2*pi)+sorted_population(Flame_no,j);
end
end
end
Convergence_curve(Iteration)=Best_flame_score;
% Display the iteration and best optimum obtained so far
if mod(Iteration,50)==0
display(['At iteration ', num2str(Iteration), ' the best fitness is ', num2str(Best_flame_score)]);
end
Iteration=Iteration+1;
end
|
github
|
sajeed786/Earthquake-Prediction-master
|
obj.m
|
.m
|
Earthquake-Prediction-master/obj.m
| 406 |
utf_8
|
81454c92b7086ffa726f5fd97b409dca
|
function [lb,ub,dim,fobj] = obj()
lb=-1;
ub=1;
dim=40;
fobj = @get_cost_mse;
end
function mse = get_cost_mse(y,x,w)
%fprintf('inside cost_mse function ');
pred_random = x * w';
O = ones(437,1);
act_op = O ./ (O + exp(-pred_random));
maxm = max(y);
minm = min(y);
denorm_op = ((act_op - 0.1) / 0.8) * (maxm - minm) + minm;
error = y - denorm_op;
error = error .^ 2;
mse = mean(error);
end
|
github
|
sajeed786/Earthquake-Prediction-master
|
initialization.m
|
.m
|
Earthquake-Prediction-master/initialization.m
| 2,272 |
utf_8
|
092dd93bd670c6c2b240828b47f0ef0a
|
%______________________________________________________________________________________________
% Moth-Flame Optimization Algorithm (MFO)
% Source codes demo version 1.0
%
% Developed in MATLAB R2011b(7.13)
%
% Author and programmer: Seyedali Mirjalili
%
% e-Mail: [email protected]
% [email protected]
%
% Homepage: http://www.alimirjalili.com
%
% Main paper:
% S. Mirjalili, Moth-Flame Optimization Algorithm: A Novel Nature-inspired Heuristic Paradigm,
% Knowledge-Based Systems, DOI: http://dx.doi.org/10.1016/j.knosys.2015.07.006
%_______________________________________________________________________________________________
% This function creates the first random population of moths
function X=initialization(SearchAgents_no,dim,ub,lb)
Boundary_no= size(ub,2); % numnber of boundaries
% If the boundaries of all variables are equal and user enter a signle
% number for both ub and lb
if Boundary_no==1
X=rand(SearchAgents_no,dim).*(ub-lb)+lb;
end
% If each variable has a different lb and ub
if Boundary_no>1
for i=1:dim
ub_i=ub(i);
lb_i=lb(i);
X(:,i)=rand(SearchAgents_no,1).*(ub_i-lb_i)+lb_i;
end
end
end
|
github
|
uncledickHe/basic_beamforming-master
|
espritBeamforming.m
|
.m
|
basic_beamforming-master/esprit/espritBeamforming.m
| 25,178 |
utf_8
|
f069d37c8ba26ff11e24f8ce3fc1cb1f
|
function espritBeamforming()
% ------------------------------------------------
% ESPRIT BEAMFORMING DEMO
% Simulation of several sources around the array
%
% Jose Ignacio Dominguez Simon
%
% Array Signal Processing
% Aalborg University - 2015
% ------------------------------------------------
%
% Edit the setup parameters in this file before
% running it.
%
% Joe.
% ---------------------- SIMULATION PARAMETERS -----------------------
AMOUNT_OF_SENSORS = 35;
SENSORS_POSITION_ORIGIN = [0, 0];
SENSORS_DISPLACEMENT_VECTOR = [0.05, 0]; % <-- if the separation between sensors is too big, you get spatial aliasing unless you reduce the frequency of the sources
SENSOR_NOISE = -60;
PROPAGATION_SPEED = 340;
SAMPLE_RATE = 44100;
AMOUNT_OF_SOURCES = 3;
inputSignal_length = 20000;
sourcesAngles_origin = [0 45 90];
sourcesAngles_speed = [1 1 1];
SHOW_SOURCE_SIGNALS = 0;
% freq = (PROPAGATION_SPEED/(2 * norm(SENSORS_DISPLACEMENT_VECTOR))) /1.7;
freq = 1133.3333;
inputSignal_freqs = [freq, freq/1.47, freq/1.99];
inputSignal_amplitudes = [1, 1, 1];
% --------------------------- MAIN PROGRAM ---------------------------
inputSignals = zeros(AMOUNT_OF_SOURCES, inputSignal_length);
sensorCoordinates = zeros(AMOUNT_OF_SENSORS, 2);
disp(sprintf(' ------------------------------------------------\n ESPRIT BEAMFORMING DEMO\n\n Jose Ignacio Dominguez Simon\n Tobias Van Baarsel\n\n Array Signal Processing\n Aalborg University - 2015\n ------------------------------------------------\n'));
figureHandler = figure('name', 'ESPRIT algorithm interactive demo - I. D. Simon, T. V. Baarsel.', 'NumberTitle','off', 'menubar', 'none');
frameCounter = 1;
updateSourceSignals();
updateSensorsPlacement();
currentAngles = sourcesAngles_origin(1 : AMOUNT_OF_SOURCES);
% Find amount of needed iterations (based on the sources' initial positions)
amountOfIterations = min((180 - currentAngles(1 : AMOUNT_OF_SOURCES)) ./ sourcesAngles_speed(1 : AMOUNT_OF_SOURCES));
% Initialize all data variables
estimatedAngles = zeros(AMOUNT_OF_SOURCES, amountOfIterations);
estimationErrors = zeros(2, 1, AMOUNT_OF_SOURCES);
eigenvectors = zeros(AMOUNT_OF_SENSORS, AMOUNT_OF_SOURCES, amountOfIterations);
C0s = zeros(AMOUNT_OF_SOURCES, AMOUNT_OF_SOURCES, amountOfIterations);
covariances = zeros(AMOUNT_OF_SENSORS, AMOUNT_OF_SENSORS, amountOfIterations);
% Initialize structure for all origin data
dataToProcess = struct('AMOUNT_OF_SENSORS', AMOUNT_OF_SENSORS, ...
'SENSORS_POSITION_ORIGIN', SENSORS_POSITION_ORIGIN, ...
'SENSORS_DISPLACEMENT_VECTOR', SENSORS_DISPLACEMENT_VECTOR, ...
'SENSOR_NOISE', SENSOR_NOISE, ...
'PROPAGATION_SPEED', PROPAGATION_SPEED, ...
'SAMPLE_RATE', SAMPLE_RATE, ...
'AMOUNT_OF_SOURCES', AMOUNT_OF_SOURCES, ...
'SOURCE_ANGLES', currentAngles(1 : AMOUNT_OF_SOURCES), ...
'inputSignals', inputSignals(1 : AMOUNT_OF_SOURCES, :), ...
'inputSignal_freqs', inputSignal_freqs(1 : AMOUNT_OF_SOURCES));
disp(sprintf('Processing data from setup. Please, wait ...'));
for currentIteration = 1 : amountOfIterations
% Update new angle(s) to process
currentAngles = currentAngles + sourcesAngles_speed(1 : AMOUNT_OF_SOURCES);
dataToProcess.SOURCE_ANGLES = currentAngles(1 : AMOUNT_OF_SOURCES);
% Process the current angle(s)
processedData = processData(dataToProcess);
% Accumulate results from processing current angle(s)
estimatedAngles(:, currentIteration) = processedData.estimatedAngles;
estimationErrors(1, currentIteration, :) = currentAngles;
estimationErrors(2, currentIteration, :) = processedData.currentErrors;
eigenvectors(:, :, currentIteration) = processedData.U;
C0s(:, :, currentIteration) = processedData.C0;
covariances(:, :, currentIteration) = processedData.covarianceMatrix;
% Adds additional fields to the structure, needed to plot the data
processedData.sensorCoordinates = sensorCoordinates;
processedData.AMOUNT_OF_SOURCES = AMOUNT_OF_SOURCES;
processedData.AMOUNT_OF_SENSORS = AMOUNT_OF_SENSORS;
processedData.SOURCE_ANGLES = currentAngles;
processedData.SENSORS_DISPLACEMENT_VECTOR = SENSORS_DISPLACEMENT_VECTOR;
processedData.SENSOR_NOISE = SENSOR_NOISE;
processedData.inputSignal_freqs = inputSignal_freqs;
processedData.estimationError = estimationErrors;
processedData.U1 = processedData.U(1 : size(processedData.U, 1) - 1, :);
processedData.U2 = processedData.U(2 : size(processedData.U, 1), :);
processedData.inputSignals = inputSignals;
processedData.SAMPLE_RATE = SAMPLE_RATE;
processedData.PROPAGATION_SPEED = PROPAGATION_SPEED;
processedData.plotLimit = currentIteration;
processedData.sourcesAngles_speed = sourcesAngles_speed;
% Show current iteration plots
if ~updatePlot(figureHandler, processedData)
return
end
% Save current frame for the animation
currentFrame = getframe(figureHandler);
if frameCounter == 1
[imageToSave, colorsMap] = rgb2ind(currentFrame.cdata, 256, 'nodither');
else
[imageToSave(:, :, 1, frameCounter), colorsMap] = rgb2ind(currentFrame.cdata, colorsMap, 'nodither');
end
frameCounter = frameCounter + 1;
end
% Saves animation to file
disp('Saving GIF ...')
fileNumber = 1;
filenameToSave = sprintf('simulation_animation_%d.gif', fileNumber);
while exist(filenameToSave, 'file')
fileNumber = fileNumber + 1;
filenameToSave = sprintf('simulation_animation_%d.gif', fileNumber);
end
imwrite(imageToSave, colorsMap, filenameToSave, 'DelayTime', 0, 'LoopCount', 0);
disp(sprintf('Saved to "%s"', filenameToSave));
disp(sprintf('Initial processing done! Use left and right arrows to navigate ...\n'))
% Wires the callback for the keyboard presses
if ~isempty(figureHandler) && ~ishandle(figureHandler)
disp('[main function] Closed window. Terminating.')
return
end
set(figureHandler,'KeyPressFcn',{@lineCallback});
function lineCallback(~, second)
shouldUpdatePlot = 0;
if strcmp(second.Key, 'leftarrow')
if processedData.plotLimit > 1
processedData.plotLimit = processedData.plotLimit -1;
shouldUpdatePlot = 1;
end
end
if strcmp(second.Key, 'rightarrow')
if processedData.plotLimit < amountOfIterations
processedData.plotLimit = processedData.plotLimit +1;
shouldUpdatePlot = 1;
end
end
if shouldUpdatePlot
% Update data for the plot
processedData.SOURCE_ANGLES = sourcesAngles_origin(1 : AMOUNT_OF_SOURCES) + processedData.plotLimit * sourcesAngles_speed(1 : AMOUNT_OF_SOURCES);
processedData.estimatedAngles = estimatedAngles(:, processedData.plotLimit);
processedData.covarianceMatrix = covariances(:, :, processedData.plotLimit);
processedData.U = eigenvectors(:, :, processedData.plotLimit);
processedData.U1 = processedData.U(1 : size(processedData.U, 1) - 1, :);
processedData.U2 = processedData.U(2 : size(processedData.U, 1), :);
processedData.C0 = C0s(:, :, processedData.plotLimit);
disp(sprintf('Loading data from iteration %d of %d ...', processedData.plotLimit, amountOfIterations));
% Refresh plot window
updatePlot(figureHandler, processedData);
end
end
% ----------------------- INTERNAL FUNCTIONS -----------------------
function updateSourceSignals()
inputSignals = zeros(AMOUNT_OF_SOURCES, inputSignal_length);
% Generate a sine signal with a hamming window
rng(0);
inputSignals(1,:) = sin(2 * pi * ...
inputSignal_freqs(1) * [1 : inputSignal_length] / SAMPLE_RATE) ...
.* (hamming(inputSignal_length)'.^2) ...
* inputSignal_amplitudes(1);
rng(0);
inputSignals(2,:) = sin(2 * pi * ...
inputSignal_freqs(2) * [1 : inputSignal_length] / SAMPLE_RATE) ...
.* (hamming(inputSignal_length)'.^2) ...
* inputSignal_amplitudes(2);
rng(0);
inputSignals(3,:) = sin(2 * pi * ...
inputSignal_freqs(3) * [1 : inputSignal_length] / SAMPLE_RATE) ...
.* (hamming(inputSignal_length)'.^2) ...
* inputSignal_amplitudes(3);
% ------- PLOT (AND SAVE IMAGE) OF SOURCE SIGNALS WITH NOISE -------
if SHOW_SOURCE_SIGNALS
SENSOR_NOISE = 3;
figureHandler = figure();
set(figureHandler, 'Position', [500 100 1000 800])
timeAxis = [1 : length(inputSignals(1,:))] / SAMPLE_RATE;
for i = 1 : 3
subplot(3,2,(2*(i-1) + 1))
plot(timeAxis, inputSignals(i,:) + randn(1, inputSignal_length) * 10^(SENSOR_NOISE/20));
ylim([-3 3])
xlim([0 max(timeAxis)])
grid
xlabel('Time [s]', 'FontSize', 14);
ylabel('Amplitude [.]', 'FontSize', 14);
set(gca, 'FontSize', 14);
legend({sprintf('Signal %d: %.1f [Hz]', i, inputSignal_freqs(i))})
end
subplot(3,2,1)
title(sprintf('Source signals.\nAdditive white noise at %.1f [dB]', SENSOR_NOISE), 'FontSize', 14)
timeRange = [2500 : 4000];
timeAxis = [1 : length(inputSignals(1,:))] / SAMPLE_RATE;
for i = 1 : 3
subplot(3, 2, (2 * (i)))
plot(timeAxis(timeRange), inputSignals(i, timeRange) + randn(1, length(timeRange)) * 10^(SENSOR_NOISE/20));
ylim([-3 3])
xlim([min(timeRange) max(timeRange)]/SAMPLE_RATE)
grid
xlabel('Time [s]', 'FontSize', 14);
ylabel('Amplitude [.]', 'FontSize', 14);
set(gca, 'FontSize', 14);
legend({sprintf('Signal %d: %.1f [Hz]', i, inputSignal_freqs(i))})
end
subplot(3,2,2)
title(sprintf('Source signals (zoom).\nAdditive white noise at %.1f [dB]', SENSOR_NOISE), 'FontSize', 14)
currentFrame = getframe(figureHandler);
[imageToSave, colorsMap] = rgb2ind(currentFrame.cdata, 256, 'nodither');
imwrite(imageToSave, colorsMap, sprintf('source_signals_noise_%d.gif', SENSOR_NOISE), 'DelayTime', 0, 'LoopCount', 0);
end
end
% ------------------------ SENSOR PLACEMENT ------------------------
function updateSensorsPlacement()
% Places the sensors according the array geometry
for index = 1 : AMOUNT_OF_SENSORS
% Uniform linear array
currentSensorCoordinates = SENSORS_POSITION_ORIGIN + ...
(index - 1) * SENSORS_DISPLACEMENT_VECTOR;
sensorCoordinates(index, :) = currentSensorCoordinates;
end
end
end
function returnedResult = updatePlot(figureHandler, processedData)
VERTICAL_ERROR_RANGE = [-100 100];
signalColors = [0.5 0.1 0.2
0.1 0.2 0.5
0.2 0.5 0.1];
% Checks if the plot window still exists
if ~isempty(figureHandler) && ~ishandle(figureHandler)
disp('[updatePlot() function] Closed window. Terminating.')
returnedResult = 0;
return
end
% Cleans window and sets size
clf
windowPosition = get(figureHandler, 'Position');
set(figureHandler, 'Position', [windowPosition(1) windowPosition(2) 1400 600])
% --------------- SOURCES POSITION AND ESTIMATIONS ---------------
subplot(2,3,1)
% Trick to display the legend as desired
plot([10 11], [10 11], '--', 'LineWidth', 4, 'Color', signalColors(1, :))
hold on
plot([10 11], [10 11], 'LineWidth', 12, 'Color', signalColors(1, :) + 0.5)
legend({'Estimated direction', 'Real direction'}, 'Location', 'southeast', 'FontSize', 12)
% Plot the half circumference
circleFunction = [cos([0 : 180] / 180 * pi);
sin([0 : 180] / 180 * pi)];
plot(circleFunction(1,:), circleFunction(2,:), 'LineWidth', 2);
xlim([-1.3 1.3])
ylim([-0.4 1.2])
grid
% Plot the array sensors
for i = 1 : processedData.AMOUNT_OF_SENSORS
plot(processedData.sensorCoordinates(i,1) / 2, processedData.sensorCoordinates(i,2) / 2, 'o', 'Color', [0.2 0.5 0.8]);
end
% Plot the source(s) line(s)
for i = 1 : processedData.AMOUNT_OF_SOURCES
x_actual = cos(processedData.SOURCE_ANGLES(i) / 180 * pi) * 0.95;
y_actual = sin(processedData.SOURCE_ANGLES(i) / 180 * pi) * 0.95;
plot([0 x_actual], [0 y_actual], 'LineWidth', 12, 'Color', signalColors(i, :) + 0.5)
plot([x_actual / 0.95], [y_actual / 0.95], 'x', 'MarkerSize', 12, 'LineWidth', 8, 'Color', [0.2 .2 .8])
plot([x_actual / 0.95], [y_actual / 0.95], 'x', 'MarkerSize', 8, 'LineWidth', 3, 'Color', [0.8 .2 .3])
text(x_actual * 1.2, y_actual * 1.2, sprintf('%.0f', processedData.SOURCE_ANGLES(i)), 'FontSize', 14, 'HorizontalAlign', 'center');
end
% Plot the estimated line(s)
for i = 1 : processedData.AMOUNT_OF_SOURCES
if imag(processedData.estimatedAngles(i)) == 0
x_estimated = cos(processedData.estimatedAngles(i)) * 0.95;
y_estimated = sin(processedData.estimatedAngles(i)) * 0.95;
plot([0 x_estimated], [0 y_estimated], '--', 'LineWidth', 4, 'Color', signalColors(i, :))
else
x_estimated = cos(real(processedData.estimatedAngles(i))) * 0.95;
y_estimated = sin(real(processedData.estimatedAngles(i))) * 0.95;
plot([0 x_estimated], [0 y_estimated], '--', 'LineWidth', 4, 'Color', signalColors(i, :))
text(x_estimated * 0.5 + 0.05, y_estimated * 0.5 + 0.05, ' Wrong!', 'FontSize', 14, 'Color', [0.8 .2 .3])
% pause(1)
end
end
% text(1.04, 1.1, sprintf('Amount of sensors: %d', processedData.AMOUNT_OF_SENSORS), 'FontSize', 12)
% text(1.04, 1, sprintf('Noise: %.1f [dB]', processedData.SENSOR_NOISE), 'FontSize', 12)
% for i = 1 : processedData.AMOUNT_OF_SOURCES
% text(1.04, 1-(i *0.1), sprintf('Freq. #%d: %.1f [Hz]', i, processedData.inputSignal_freqs(i)), 'FontSize', 12);
% end
set(gca, 'FontSize', 11)
set(gca,'XTickLabel', {});
set(gca,'YTickLabel', {});
title(sprintf('Rotating source detected angle'), 'FontSize', 14)
% -------------------- ANGLE ESTIMATION ERROR --------------------
subplot(2,3,4)
for i = 1 : processedData.AMOUNT_OF_SOURCES
plot(processedData.estimationError(1, :, i), real(processedData.estimationError(2, :, i)), 'LineWidth', 2, 'Color', signalColors(i, :));
hold on
end
for i = 1 : processedData.AMOUNT_OF_SOURCES
plot(processedData.estimationError(1, processedData.plotLimit, i), real(processedData.estimationError(2, processedData.plotLimit, i)), 'x', 'MarkerSize', 10, 'LineWidth', 2, 'Color', signalColors(i, :));
end
xlim([0 180])
ylim(VERTICAL_ERROR_RANGE)
set(gca, 'FontSize', 11)
xlabel('Actual source angle [degrees]', 'FontSize', 14)
ylabel('Estimation error [degrees]', 'FontSize', 14)
grid
title('Angle estimation error', 'FontSize', 14)
% ------------------- ARRAY COVARIANCE MATRIX --------------------
subplot(2,3,2)
realPart = real(processedData.covarianceMatrix);
imagPart = imag(processedData.covarianceMatrix);
anglePart = angle(processedData.covarianceMatrix);
realPart = (realPart - min(realPart(:)));
realPart = realPart / max(realPart(:));
imagPart = (imagPart - min(imagPart(:)));
imagPart = imagPart / max(imagPart(:));
anglePart = (anglePart - min(anglePart(:)));
anglePart = anglePart / max(anglePart(:));
imagesc([realPart imagPart anglePart ])
set(gca,'YDir','normal')
axisValues = [[1 : processedData.AMOUNT_OF_SENSORS] [1 : processedData.AMOUNT_OF_SENSORS] [1 : processedData.AMOUNT_OF_SENSORS]];
valuesXaxis = num2cell(axisValues);
ax = gca;
ax.XTickLabel = valuesXaxis(ax.XTick);
hold on
plot([0 processedData.AMOUNT_OF_SENSORS], [0 processedData.AMOUNT_OF_SENSORS], 'LineWidth', 1.5, 'Color', [1 0 0])
plot([processedData.AMOUNT_OF_SENSORS processedData.AMOUNT_OF_SENSORS*2], [0 processedData.AMOUNT_OF_SENSORS], 'LineWidth', 1.5, 'Color', [1 0 0])
plot([processedData.AMOUNT_OF_SENSORS*2 processedData.AMOUNT_OF_SENSORS*3], [0 processedData.AMOUNT_OF_SENSORS], 'LineWidth', 1.5, 'Color', [1 0 0])
% Plot separation lines
plot([processedData.AMOUNT_OF_SENSORS processedData.AMOUNT_OF_SENSORS], [0 processedData.AMOUNT_OF_SENSORS], 'LineWidth', 2, 'Color', [1 1 1]);
plot([processedData.AMOUNT_OF_SENSORS processedData.AMOUNT_OF_SENSORS] * 2, [0 processedData.AMOUNT_OF_SENSORS], 'LineWidth', 2, 'Color', [1 1 1]);
set(gca, 'FontSize', 11)
xlabel('Sensor number', 'FontSize', 14)
ylabel('Sensor number', 'FontSize', 14)
title(sprintf('Array covariance matrix\nReal part Imag part Angle'), 'FontSize', 14)
% -------------- EIGENVECTORS OF COVARIANCE MATRIX ---------------
subplot(2,3,5)
eigenvectorsMatrix = angle([processedData.U2, fliplr(processedData.U1 * processedData.C0)]);
imagesc(eigenvectorsMatrix)
set(gca,'YDir','normal')
hold on
cells = get(gca,'XTickLabel');
axisNumbers = zeros(length(cells), 1);
for i = 1 : length(cells)
axisNumbers(i) = str2double(cell2mat(cells(i)));
end
center_x_pos = (max(axisNumbers) + min(axisNumbers))/2;
plot([center_x_pos center_x_pos], [0 size(eigenvectorsMatrix, 1)], 'Color', [1 1 1]);
set(gca, 'FontSize', 11)
ylabel('Eigenvector length = Sources count', 'FontSize', 14)
set(gca,'XTickLabel', {});
title(sprintf('Computed eigenvectors checking\n |<------------------ U -----------------> | <----------------- U'' ----------------->| '), 'FontSize', 14)
% ----------------------- SOURCE SIGNALS -------------------------
subplot(2,3,6)
timeAxis = [1 : length(processedData.inputSignals(1,:))] / processedData.SAMPLE_RATE;
for i = 1 : processedData.AMOUNT_OF_SOURCES
rng(0);
plot(timeAxis, processedData.inputSignals(i,:) + randn(1, length(processedData.inputSignals)) * 10^(processedData.SENSOR_NOISE/20) - 2*(i - 1));
hold on
end
ylim([-1.5-2*(i-1) 1.5])
xlim([0 max(timeAxis)])
grid
xlabel('Time [s]', 'FontSize', 14);
ylabel('Amplitude [.]', 'FontSize', 14);
set(gca, 'FontSize', 11);
legendCells = cell(processedData.AMOUNT_OF_SOURCES, 1);
for i = 1 : processedData.AMOUNT_OF_SOURCES
legendCells{i} = sprintf('Signal %d: %.1f [Hz]', i, processedData.inputSignal_freqs(i));
end
legend(legendCells, 'FontSize', 12)
% Places plots in their desired position
subplot(2,3,1)
set(gca,'position', [0.03 0.55 0.3 0.4])
subplot(2,3,2)
set(gca,'position', [0.37 0.55 0.3 0.37])
subplot(2,3,4)
set(gca,'position', [0.04 0.06 0.29 0.42])
subplot(2,3,5)
set(gca,'position', [0.37 0.06 0.3 0.355])
subplot(2,3,6)
set(gca,'position', [0.71 0.06 0.28 0.355])
% Displays text with details of setup
textToShow = sprintf(' Current setup details:');
textToShow = strcat(textToShow, sprintf('\n\n Amount of sources: %d', processedData.AMOUNT_OF_SOURCES));
textToShow = sprintf('%s\n Amount of sensors: %d\n', textToShow, processedData.AMOUNT_OF_SENSORS);
for i = 1 : processedData.AMOUNT_OF_SOURCES
textToShow = sprintf('%s\n Source #%d angle: %d [deg]', textToShow, i, processedData.SOURCE_ANGLES(i));
textToShow = sprintf('%s\n Estimated angle #%d: %.0f [deg]', textToShow, i, processedData.estimatedAngles(i) * 180 / pi);
end
textToShow = sprintf('%s\n\n Sensors distance: %.2f [m]', textToShow, norm(processedData.SENSORS_DISPLACEMENT_VECTOR));
textToShow = strcat(textToShow, sprintf('\n Critical frequency: %.1f [Hz]', processedData.PROPAGATION_SPEED / norm(processedData.SENSORS_DISPLACEMENT_VECTOR) /2));
textToShow = strcat(textToShow, sprintf('\n Propagation speed: %.f [m/s]', processedData.PROPAGATION_SPEED));
textToShow = strcat(textToShow, sprintf('\n Sample rate: %.f [S/s]', processedData.SAMPLE_RATE));
textToShow = sprintf('%s\n Additive noise level: %.0f [dB]\n', textToShow, processedData.SENSOR_NOISE);
% textToShow = sprintf('%s\n Covariance matrix rank: %d', textToShow, rank(processedData.covarianceMatrix));
for i = 1 : processedData.AMOUNT_OF_SOURCES
textToShow = sprintf('%s\n Source %d ang. speed: %.0f [deg/iteration]', textToShow, i, processedData.sourcesAngles_speed(i));
end
textToShow = sprintf('%s\n', textToShow);
for i = 1 : processedData.AMOUNT_OF_SOURCES
textToShow = sprintf('%s\n Signal %d frequency: %.1f [Hz]', textToShow, i, processedData.inputSignal_freqs(i));
end
uicontrol('Style','text', 'Position',[975 280 400 300], 'String', textToShow, 'FontSize', 13, 'HorizontalAlignment', 'left');
% Forces drawing of window
drawnow
returnedResult = 1;
end
function returnedData = processData(dataToProcess)
% Create sensors' "recordings"
sensorSignals = zeros(dataToProcess.AMOUNT_OF_SENSORS, length(dataToProcess.inputSignals));
% Recreate the Vandermonde vector with associated delays
for currentSource = 1 : dataToProcess.AMOUNT_OF_SOURCES
% Construct the new Vandermonde vector
if dataToProcess.SOURCE_ANGLES(currentSource) == 90
theta = 0;
else
theta = dataToProcess.SOURCE_ANGLES(currentSource) / 180 * pi;
end
lambda = dataToProcess.PROPAGATION_SPEED / dataToProcess.inputSignal_freqs(currentSource);
xsi = norm(dataToProcess.SENSORS_DISPLACEMENT_VECTOR) / lambda * cos(theta);
vandermonde = exp(1i * 2 * pi * xsi * [0 : dataToProcess.AMOUNT_OF_SENSORS-1]');
for currentSensor = 1 : dataToProcess.AMOUNT_OF_SENSORS
sensorSignals(currentSensor, :) = sensorSignals(currentSensor, :) + ...
dataToProcess.inputSignals(currentSource, :) * vandermonde(currentSensor) + ...
randn(1, length(dataToProcess.inputSignals)) * 10^(dataToProcess.SENSOR_NOISE/20);
end
end
% Estimation of covariance matrix from sensed signals
covarianceMatrix = zeros(dataToProcess.AMOUNT_OF_SENSORS);
timeRange = [1, length(dataToProcess.inputSignals)];
for t = timeRange(1) : timeRange(2)
covarianceMatrix = covarianceMatrix + ...
sensorSignals(:, t) * sensorSignals(:, t)';
end
covarianceMatrix = covarianceMatrix / (timeRange(2) - timeRange(1) + 1);
% Obtain eigenvectors from the covariance matrix -> U
[eigenvectors, ~, flags] = eigs(rot90(covarianceMatrix), dataToProcess.AMOUNT_OF_SOURCES);
if flags
disp('[Warning] Not all eigenvectors of the covariance matrix converged!');
end
% Form U1 and U2 from U
U = eigenvectors;
U1 = U(1:size(U,1)-1, :);
U2 = U(2:size(U,1), :);
% Solve C for U2 = U1 * C
C0 = inv(corr(U1)) * corr(U1, U2); % <-- this seems to be the right order for the: corr(U1, U2)
if ~any(isnan(C0(:))) && ~any(isinf(C0(:)))
% Compute eta, the eigenvalues of C (as many as sources)
[~, eigenvalues] = eigs(C0);
eta = diag(eigenvalues);
% Find the estimated angles
xsi = angle(eta) / (2 * pi);
lambdas = dataToProcess.PROPAGATION_SPEED ./ dataToProcess.inputSignal_freqs(1 : dataToProcess.AMOUNT_OF_SOURCES)';
estimatedAngles = sort(acos(lambdas ./ norm(dataToProcess.SENSORS_DISPLACEMENT_VECTOR) .* xsi));
% Compute the error for the current source(s) angle(s)
currentErrors = (estimatedAngles - (dataToProcess.SOURCE_ANGLES(1 : dataToProcess.AMOUNT_OF_SOURCES) / 180 * pi)') / pi * 180;
else
disp('TODO: finish this here ...');
end
returnedData = struct('estimatedAngles', estimatedAngles, ...
'currentErrors', currentErrors, ...
'U', U, ...
'C0', C0, ...
'covarianceMatrix', covarianceMatrix);
end
|
github
|
yluthu/fpga-nn-experiment-master
|
hexdump.m
|
.m
|
fpga-nn-experiment-master/matlab/hexdump.m
| 1,013 |
utf_8
|
0a4bda98e31a5a278a5b4b452bc39400
|
% dump data as single precision numbers into Intel HEX format
function [] = hexdump(data, filename)
data = single(data);
line = [];
twos = [];
result = [];
for i = 1:length(data)
% size address type data
line = [':04', dec2hex(i - 1, 4), '00', upper(num2hex(data(i)))];
% calculate checksum
ch = 0;
for oo = 1:(length(line) - 1) / 2
ch = ch + hex2dec(line((2 * oo):(2 * oo + 1)));
end
sh = dec2bin(ch, 8);
k = 1;
for j = length(sh) - 7:length(sh)
if sh(j) == '1'
twos(k) = '0';
else
twos(k) = '1';
end
k = k + 1;
end
s = dec2hex((bin2dec(char(twos)) + 1), 2);
result = [result, line, s(end - 1:end), char(13), char(10)];
end
result = [result, ':00000001FF', char(13), char(10)];
f = fopen(filename, 'w');
fprintf(f, '%c', result);
fclose(f);
end
|
github
|
eslamtharwat/2-IRIS-Detection-and-Recognition-master
|
face.m
|
.m
|
2-IRIS-Detection-and-Recognition-master/face.m
| 955 |
utf_8
|
95628a1095dc996f9ace708adc32baff
|
% function [face,skin_region]=face(I);
%
% skin_region=skin(I);
%
% se = strel('disk',3);
% dil = imdilate(skin_region,se); % morphologic dilation
% d2 = imfill(dil, 'holes'); % morphologic fill
% face = bwdist(~d2); % computing minimal euclidean distance to non-white pixel
% figure;imshow(face,[]);
function [face_a,skin_region]=face(I);
skin_region=skin(I);
se = strel('disk',5);
se2 = strel('disk',3);
er = imerode(skin_region,se2);
cl = imclose(er,se);
dil = imdilate(cl,se); % morphologic dilation
dil = imdilate(dil,se);
cl2 = imclose(dil,se);
d2 = imfill(cl2, 'holes'); % morphologic fill
facearea = bwdist(~d2); % computing minimal euclidean distance to non-white pixel
% figure;imshow(facearea,[]);
% imshow(d2);
face(:,:,1)=double(I(:,:,1)).*d2;
face(:,:,2)=double(I(:,:,2)).*d2;
face(:,:,3)=double(I(:,:,3)).*d2;
face_a=uint8(face);
% figure;imshow(face_a);
|
github
|
danielemarinazzo/multiscaleGrangerCausality-master
|
egc_SetLag.m
|
.m
|
multiscaleGrangerCausality-master/egc_SetLag.m
| 795 |
utf_8
|
05c6dc2b94538906b60bf5c932dddab6
|
%% Sets the vector of indexes for series and lags to be used in Conditional Entropy estimation
% inputs:
% p: vector of embedding dimensions (one for each series; if 0, the series is excluded)
% tau: vector of embedding delays (one for each series)
% u: vector of propagation times (one for each series)
% zerolag: for each series, 1 if zerolag effect is wanted, 0 if not
function [V]=egc_SetLag(p,tau,u,zerolag)
%% for internal test (leave commented)
% clear; close all; clc;
% % % iX=1;
% % % iY=2;
% p=[0 4 0 0]';
% u=[3 1 7 1]';
% tau=[1 1 7 1]';
% zerolag=[0 0 0 0]';
%% 2) Set time series and lags
M=length(p);
V=[];
for m=1:M
if zerolag(m)==1
V=[V; [m 0]];
end
for k=1:p(m)
V=[V; [m u(m)+tau(m)*(k-1)]];
end
end
|
github
|
danielemarinazzo/multiscaleGrangerCausality-master
|
iss_varma2iss.m
|
.m
|
multiscaleGrangerCausality-master/iss_varma2iss.m
| 1,210 |
utf_8
|
a0ce27bd483b148861a8982d8af50c31
|
%% VARMA with B0 term to (Innovations form) State Space parameters
% computes innovations form parameters for a state space model from VARMA
% parameters using Aoki's method - this version allows for zero-lag MA coefficients
function [A,C,K,R,lambda0] = iss_varma2iss(Am,Bm,V,B0)
% INPUT: VARMA parameters Am, Bm, V=cov(U)
% OUTPUT: innovations form SS parameters A, C, K, R
%%%%% internal test
%variables to be passed are Am, Bm, B0, V=Su
% clear; close all; clc;
% Am=[0.9 0 0 0.5; 0 0.6 0.2 0];
% Bm=[0.5 0; 0 0.5]; B0=Bm./5;
% V=eye(2);
%
M = size(Am,1); %dimension of observed process
p=floor(size(Am,2)/M); %number of AR lags
q=floor(size(Bm,2)/M); %number of MA lags
L=M*(p+q); % dimension of state process (SS order)
C=[Am Bm];
R=B0*V*B0';
Ip=eye(M*p);
Iq=eye(M*q);
A11=[Am;Ip(1:end-M,:)];
if q==0
A=A11;
K=[eye(M); zeros(M*(p-1),M)];
else
A12=[Bm;zeros(M*(p-1),M*q)];
A21=zeros(M*q,M*p);
A22=[zeros(M,M*q); Iq(1:end-M,:)];
A=[A11 A12; A21 A22];
K=[eye(M); zeros(M*(p-1),M); inv(B0); zeros(M*(q-1),M)];
end
% determine the variance of the process lambda0=E[Yn Yn']
O=dlyap(A,K*R*K');
lambda0=C*O*C'+R;
end
|
github
|
danielemarinazzo/multiscaleGrangerCausality-master
|
egc_LinReg_Ftest.m
|
.m
|
multiscaleGrangerCausality-master/egc_LinReg_Ftest.m
| 1,710 |
utf_8
|
c67017c67f4810a95b07f8d1dc4c6bbf
|
%% Statistics of difference in conditional entropies estimated through linear regression
% Upu: reiduals of unrestricted regression
% Upr: reiduals of restricted regression
% Nu: number of coefficients for unrestricted regression
% Nr: number of coefficients for restricted regression
function [p_value] = egc_LinReg_Ftest(Upu,Upr,Nu,Nr)
% F-statistic for significance
RSSu=sum(Upu.^2);
RSSr=sum(Upr.^2);
n1=Nu-Nr; %number of restrictions
% n2=(length(Upu)-size(Vu,1)); % n. of observations - n. tot of coeffs
n2=length(Upr)-Nu; % n. of observations - n. tot of coeffs
f_value=((RSSr-RSSu)/n1)/(RSSu/n2);
p_value=1-cdff_fun(f_value,n1,n2); % function of Seth Toolbox
end
%% function of Seth Toolbox: CDF computes the cumulative 'F' distribution
function p = cdff_fun(x,v1,v2)
p = 0;
t = (v1 <= 0 | v2 <= 0 | isnan(x) | isnan(v1) | isnan(v2));
p(t) = NaN;
s = (x==Inf) & ~t;
if any(s)
p(s) = 1;
t = t | s;
end
% Compute P when X > 0.
k = find(x > 0 & ~t & isfinite(v1) & isfinite(v2));
if any(k),
xx = x(k)./(x(k) + v2(k)./v1(k));
p(k) = betainc(xx, v1(k)/2, v2(k)/2);
end
if any(~isfinite(v1(:)) | ~isfinite(v2(:)))
k = find(x > 0 & ~t & isfinite(v1) & ~isfinite(v2) & v2>0);
if any(k)
p(k) = chi2cdf(v1(k).*x(k),v1(k));
end
k = find(x > 0 & ~t & ~isfinite(v1) & v1>0 & isfinite(v2));
if any(k)
p(k) = 1 - chi2cdf(v2(k)./x(k),v2(k));
end
k = find(x > 0 & ~t & ~isfinite(v1) & v1>0 & ~isfinite(v2) & v2>0);
if any(k)
p(k) = (x(k)>=1);
end
end
end
|
github
|
danielemarinazzo/multiscaleGrangerCausality-master
|
eMVAR_MVARfilter.m
|
.m
|
multiscaleGrangerCausality-master/eMVAR_MVARfilter.m
| 620 |
utf_8
|
83b62a141b725ae83f035fc783ccfd21
|
%% FILTER A VECTOR NOISE WITH A SPECIFIED STRICTLY CAUSAL MVAR MODEL: Y(n)=A(1)Y(n-1)+...+A(p)Y(n-p)+U(n)
%%% INPUT
% A=[A(1)...A(p)]: M*pM matrix of the MVAR model coefficients (strictly causal model)
% U: M*N matrix of innovations
%%% OUTPUT
% Y: M*N matrix of simulated time series
function [Y]=eMVAR_MVARfilter(A,U)
N=length(U);
M=size(A,1);
p=size(A,2)/M;
% Y(n)=A(1)Y(n-1)+...+A(p)Y(n-p)+U(n)
Y=zeros(M,N);
for n=1:N
for k=1:p
if n-k<=0, break; end; % if n<=p, stop when k>=n
Y(:,n)=Y(:,n) + ( A(:,(k-1)*M+(1:M)) * Y(:,n-k) );
end
Y(:,n)=Y(:,n)+U(:,n);
end
|
github
|
danielemarinazzo/multiscaleGrangerCausality-master
|
surrshuf.m
|
.m
|
multiscaleGrangerCausality-master/surrshuf.m
| 194 |
utf_8
|
3f27b575a884ee11981562fdb7eb740d
|
%genera x surrogato con sample shuffling - distruggeanche gli autospettri
function xs=surrshuf(x)
sx=size(x);
p=randperm(sx(1));
xs=zeros(sx(1),1);
for k = 1:sx(1)
xs(k)=x(p(k));
end
|
github
|
danielemarinazzo/multiscaleGrangerCausality-master
|
egc_buildvectors.m
|
.m
|
multiscaleGrangerCausality-master/egc_buildvectors.m
| 1,046 |
utf_8
|
c0fabb97db424e857b26211aa6ff0c39
|
%% form embedding matrix (for entropy computation)
% Y: (quantized) input multiple time series, dimension M*N
% V: list of candidates, dimension Nc*2, Nc is number of candidates; 1st column: index of the signal; 2nd column: index of the lag
% A: output matrix of the vectors specified from the signals Y according to the list V
% B: complete matrix with added the current samples as first column
function B=egc_buildvectors(Y,j,V)
% clear;close all;clc;
% tmp=[5 0 0 0 3 5 4 3 4 2 1 2 4 0 5 5]'; Y =[tmp tmp tmp];
% V=[1 1; 1 3; 2 4; 3 1; 2 1]; % V=[y1(n-1),y1(n-3),y2(n-4),y3(n-1),y2(n-1)]
if isempty(V) % if no conditioning, simply returns the j-th column of data
B=Y(:,j);
else
[N,M]=size(Y);
Nc=size(V,1); % number of candidates
Lmax=max(V(:,2)); %maximum lag (across all signals)
A=NaN*ones(N-Lmax,Nc);
for n=Lmax+1:N
for i=1:Nc %riempio la i-esima riga di A
A(n-Lmax,i)=Y(n-V(i,2),V(i,1));
end
end
B=[Y((Lmax+1:N)',j) A]; % add current value
end
|
github
|
danielemarinazzo/multiscaleGrangerCausality-master
|
eMVAR_idMVAR.m
|
.m
|
multiscaleGrangerCausality-master/eMVAR_idMVAR.m
| 1,452 |
utf_8
|
d1cedfd57662c15c9969b53a64ab836b
|
%% IDENTIFICATION OF STRICTLY CAUSAL MVAR MODEL: Y(n)=A(1)Y(n-1)+...+A(p)Y(n-p)+U(n)
% makes use of autocovariance method (vector least squares)
%%% input:
% Y, M*N matrix of time series (each time series is in a row)
% p, model order
% Mode, determines estimation algorithm (0:builtin least squares, else other methods [see mvar.m from biosig package])
%%% output:
% Am=[A(1)...A(p)], M*pM matrix of the estimated MVAR model coefficients
% S, estimated M*M input covariance matrix
% Yp, estimated time series
% Up, estimated residuals
% Z, observation matrix (often optional, useful e.g. for resampling)
function [Am,S,Yp,Up,Z,Yb]=eMVAR_idMVAR(Y,p,Mode)
% error(nargchk(1,3,nargin));
% if nargin < 3, Mode=0; end % default use least squares estimate
% if nargin < 2, p=10; end % default model order
[M,N]=size(Y);
%% IDENTIFICATION
Z=NaN*ones(p*M,N-p); % observation matrix
for j=1:p
for i=1:M
Z((j-1)*M+i,1:N-p)=Y(i, p+1-j:N-j);
end
end
if Mode==0
Yb=NaN*ones(M,N-p); % Ybar
for i=1:M
Yb(i,1:N-p)=Y(i,p+1:N);
end
Am=Yb/Z; % least squares!
% fprintf('using least squares\n');
else
Am = mvar(Y', p, Mode); % estimates from biosig code
% fprintf(['using biosig ' int2str(Mode) ' mode\n']);
end
Yp=Am*Z;
Yp=[NaN*ones(M,p) Yp]; % Vector of predicted data
Up=Y-Yp; Up=Up(:,p+1:N); % residuals of strictly causal model
S=cov(Up');
|
github
|
danielemarinazzo/multiscaleGrangerCausality-master
|
eMVAR_InstModelfilter.m
|
.m
|
multiscaleGrangerCausality-master/eMVAR_InstModelfilter.m
| 2,355 |
utf_8
|
a77cbc2e982dff26e398b5f61504ed3e
|
%% realization of the instantaneous model : U = L*W
%%% OUTPUT
% U: N*M matrix of filtered noises
% INPUT
% N data length
% C: input covariance matrix (may be interpreted as Su or Sw, see above)
% B0: M*M matrix of instantaneous effects (when relevant)
% when flag='StrictlyCausal':
% given Su, applies Cholesky decomposition to find L and Sw
% then generates U = L*W, for a realization of gaussian W of variance Sw
% when flag='ExtendedGauss':
% given Sw and B(0), computes L=[I-B(0)]^(-1)
% then generates U = L*W, for a realization of gaussian W of variance Sw
% when flag='ExtendedNonGauss':
% given Swand B(0), computes L=[I-B(0)]^(-1)
% then generates U = L*W, for a realization of nongaussian W of variance Sw
function U=eMVAR_InstModelfilter(N,C,flag,B0)
error(nargchk(3,4,nargin));%min and max input arguments
M=size(C,1);
switch flag
case {'StrictlyCausal'} % C is Su
[L,Sw]=eMVAR_choldiag(C);
W = randn(M,N); % W independent and gaussian
for m=1:M % This normalizes W to have the appropriate variance (and zero mean)
W(m,:)=sqrt(Sw(m,m))*(W(m,:)-mean(W(m,:)))/std(W(m,:));
end
U=L*W;
case {'ExtendedGauss'} % C is Sw
invL=eye(M)-B0;
if det(invL)==0, error('B0 is not invertible, ill-conditioned problem!'), end;
L=inv(invL);
W = randn(M,N); % W independent and gaussian
for m=1:M % This normalizes W to have the appropriate variance (and zero mean)
W(m,:)=sqrt(C(m,m))*(W(m,:)-mean(W(m,:)))/std(W(m,:));
end
U=L*W;
case {'ExtendedNonGauss'} % C is Sw
invL=eye(M)-B0;
if det(invL)==0, error('B0 is not invertible, ill-conditioned problem!'), end;
L=inv(invL);
%note: here we generate W independent but non-Gaussian
% Nonlinearity exponent, selected to lie in [0.5, 0.8] or [1.2, 2.0]. (<1 gives subgaussian, >1 gives supergaussian)
q = rand(M,1)*1.1+0.5;
ind = find(q>0.8);
q(ind) = q(ind)+0.4;
% This generates the disturbance variables, which are mutually independent, and non-gaussian
W = randn(M,N);
W = sign(W).*(abs(W).^(q*ones(1,N)));
% This normalizes the disturbance variables to have the appropriate scales
W = W./( ( sqrt(mean((W').^2)') ./ sqrt(diag(C)) )*ones(1,N) );
U=L*W;
end
|
github
|
danielemarinazzo/multiscaleGrangerCausality-master
|
egc_LinReg.m
|
.m
|
multiscaleGrangerCausality-master/egc_LinReg.m
| 738 |
utf_8
|
4044f07d3346650cddc464cec66273c7
|
%% LINEAR REGRESSION
%%% INPUTS:
% data: N*M matrix of the M signals each having length N
% j: index (column) of the series considered as output, the one we want to describe
% V: two column vector of series (col 1) and lag (col 2) indexes
function [S,Up,Am]=egc_LinReg(data,j,V)
if isempty(V) %if no conditioning, ce will be the Entropy of B
S=var(data(:,j));
Up=data(:,j)-mean(data(:,j)); Am=[];
else % compute Conditional entropy
B=egc_buildvectors(data,j,V); %% form the observation matrix
% Linear Regression
Yb=B(:,1)'; % inversion works with data organized in rows
A=B; A(:,1)=[]; Z=A';
Am=Yb/Z; % least squares!
Yp=Am*Z;
Up=Yb-Yp;
S=cov(Up');
end
|
github
|
danielemarinazzo/multiscaleGrangerCausality-master
|
msgc.m
|
.m
|
multiscaleGrangerCausality-master/msgc.m
| 2,184 |
utf_8
|
08c1d9bb93a08811c532e7ee5ba4e736
|
%% MULTISCALE GC computation
%%% inputs
% Am, Su: VAR parameters (Am: M x pM coeff matrix; Su: M x M innovation covariance matrix)
% tau: scale factor
% ncoeff: number of coefficients of FIR filter (if no averaging)
% whichfilter: 'F' for FIR (default), 'A' for averaging
%%% outputs
% GCdws: GC at scale tau - MxM matrix with GC i->j in position (j,i)
% GCflt: GC at scale tau after first step (filtering)
% b: filter coeffs
function [GCdws,GCflt,b] = msgc(Am,Su,tau,ncoeff,whichfilter)
if nargin<5, whichfilter='F'; end;
M=size(Am,1);
GCflt=nan*ones(M,M);
GCdws=nan*ones(M,M);
% MA parameters resulting from the change of scale
switch whichfilter
case 'A'%%% AVERAGING FILTER
epsi=0.001; ntau0=1+epsi*(tau-1); ntau=1-epsi; %have to put an epsilon to guarantee stability
B0=ntau0/tau*eye(M);
Bm=repmat(ntau/tau*eye(M),1,tau-1);
b=(1/tau)*ones(1,tau);
case 'F' %%% FIR FILTER
if tau==1
q=0; b=1;
else
q=ncoeff; % number of filter coeffs
ft=1/(2*tau); %cutoff frequency
Wn=2*ft; %normalized cutoff frequency (fNyquist=1)
b=fir1(q,Wn,'noscale'); %Hamming window, linear phase (symmetry of b coeffs)
end
Bk=zeros(M,M,q+1);
for l=1:q+1
Bk(:,:,l)=b(l)*eye(M);
end
Bm=[];
for kk=1:q+1
Bm=[Bm Bk(:,:,kk)];
end
B0=Bm(1:M,1:M);
Bm=Bm(1:M,M+1:end);
end
% ISS parameters
[A,C,K,V,Vy] = iss_varma2iss(Am,Bm,Su,B0); % max(abs(eig(A-K*C)))
% FILTERING
ret_flt = iss_PV(A,C,K,V);
for jj=1:M
for ii=1:M
if ii~=jj
GCflt(jj,ii)=log(ret_flt.Sigmaj_j(jj)/ret_flt.Sigmaj_ij(jj,ii));
end
end
end
%%% DOWNSAMPLING
[Ad,Kd,Vd] = iss_ds(A,C,K,V,tau);
Cd=C;
ret_dws = iss_PV(Ad,Cd,Kd,Vd);
for jj=1:M
for ii=1:M
if ii~=jj
GCdws(jj,ii)=log(ret_dws.Sigmaj_jk(jj,ii)/ret_dws.Sigmaj_ijk(jj));
end
end
end
end
|
github
|
danielemarinazzo/multiscaleGrangerCausality-master
|
egc_gcMVAR.m
|
.m
|
multiscaleGrangerCausality-master/egc_gcMVAR.m
| 1,845 |
utf_8
|
cbbb1c17fc5a26db10138e66abb9af3f
|
%% GRANGER CAUSALITY FROM STRICTLY CAUSAL MVAR MODEL: Y(n)=A(1)Y(n-1)+...+A(p)Y(n-p)+U(n)
% estimates Granger Causality in multiple time series from MVAR model fitted on data
% performs also row-by-row MVAR identification, equivalent to idMVAR (with vector least squares)
%%% input:
% Y, M*N matrix of time series (each time series is in a row)
% p, model order
%%% output:
% GC - M*M Granger Causality matrix - ij element is the GC from j to i
% p_val, matrix of p values of F test associated to GC causality
% Am=[A(1)...A(p)], M*pM matrix of the estimated MVAR model coefficients
% Su, estimated M*M input covariance matrix
% SigmaR, M*M matrix of restricted regression residual variances
% Ures, residuals of unrestricted model, dimension M*N
function [GC,p_val,Am,Su,SigmaR,Ures]=egc_gcMVAR(Y,p)
M=size(Y,1);
tau=ones(1,M);
u=ones(1,M);
zerolag=zeros(1,M);
p_vett=p*ones(1,M);
[Vu]=egc_SetLag(p_vett,tau,u,zerolag);
Nu=size(Vu,1); %number of coeff for unrestricted regression
% Unrestricted regressions
Am=NaN*ones(M,p*M);
SigmaU=NaN*ones(M,M);
for jj=1:M
[Sigma,Upu,coeff]=egc_LinReg(Y',jj,Vu); % execute linear regression
SigmaU(jj,:)=Sigma;
Upred(:,jj)=Upu; % residuals of the unrestricted regression
for k=1:size(Vu,1)
Am(jj,M*(Vu(k,2)-1)+Vu(k,1))=coeff(k);
end
end
Su=cov(Upred);
Ures=Upred';
% restricted regressions
SigmaR=NaN*ones(M,M); p_val=NaN*ones(M,M);
for jj=1:M
for ii=1:M
p_r=p_vett; p_r(ii)=0;
Vr=egc_SetLag(p_r,tau,u,zerolag);
[Sigma,Upr,~]=egc_LinReg(Y',jj,Vr);
SigmaR(jj,ii)=Sigma;
% Ftest for significance
Nr=size(Vr,1);%number of coeff for restricted regression
p_val(jj,ii) = egc_LinReg_Ftest(Upred(:,jj),Upr',Nu,Nr);
end
end
GC=log(SigmaR./SigmaU);
end
|
github
|
danielemarinazzo/multiscaleGrangerCausality-master
|
surriaafft.m
|
.m
|
multiscaleGrangerCausality-master/surriaafft.m
| 1,502 |
utf_8
|
f60f7c2acff33418e0cc6efa5895b5ab
|
% genera iterative amplitude adjusted fourier tranform surrogates
% algoritmo di Schreiber e Schmitz - Physical Review Letters 1996
% y: serie da surrogare
% nit: numero di iterazioni volute (default 7)
% stop: se metto 'spe' esce con lo spettro conservato, se metto 'dis' esce con la distribuzione conservata
function ys=surriaafft(y,nit,stop)
error(nargchk(1,3,nargin));%min e max di input arguments
if nargin < 3, stop='spe'; end %default matcha lo spettro
if nargin < 2, nit=7; end %default 7 iterazioni
% clear;close all;
% percorso='D:\johnny\lavoro\integrate_nlpred\elaborati_loo_si\';% percorso dei dati da analizzare
% nomefile='b-ca.prn';
% rs=load([percorso nomefile]);
% y=rs(:,1);
% y=(y-mean(y))/std(y);
%%
[ysorted,yindice]=sort(y);
my=abs(fft(y));
% ys=surrfft(y); %inizializzazione
ys=surrshuf(y); %inizializzazione
%% ciclo
for i=1:nit
% step 1: impone lo spettro
faseys=angle(fft(ys));
fys=my.*(cos(faseys)+j*sin(faseys));
ys=ifft(fys);ys=real(ys);
ys=ys-mean(ys);
% step 2: impone la distribuzione
[yssorted,ysindice]=sort(ys);
ypermuted=zeros(length(y),1);
for i=1:length(y)
ypermuted(ysindice(i))=ysorted(i);
end
ys=ypermuted;
end
%se volevo conservare lo spettro, faccio 1 altro mezzo giro dove impongo solo quello
if stop=='spe'
faseys=angle(fft(ys));
fys=my.*(cos(faseys)+j*sin(faseys));
ys=ifft(fys);ys=real(ys);
ys=ys-mean(ys);
end
|
github
|
avinashk94/CVIP-master
|
harris.m
|
.m
|
CVIP-master/Assignment/hw2/50248877_hw2/code/harris.m
| 3,097 |
utf_8
|
c3a11b6fc77fa908635b8f9b14ad52a2
|
% HARRIS - Harris corner detector
%
% Usage: [cim, r, c] = harris(im, sigma, thresh, radius, disp)
%
% Arguments:
% im - image to be processed.
% sigma - standard deviation of smoothing Gaussian. Typical
% values to use might be 1-3.
% thresh - threshold (optional). Try a value ~1000.
% radius - radius of region considered in non-maximal
% suppression (optional). Typical values to use might
% be 1-3.
% disp - optional flag (0 or 1) indicating whether you want
% to display corners overlayed on the original
% image. This can be useful for parameter tuning.
%
% Returns:
% cim - binary image marking corners.
% r - row coordinates of corner points.
% c - column coordinates of corner points.
%
% If thresh and radius are omitted from the argument list 'cim' is returned
% as a raw corner strength image and r and c are returned empty.
% Reference:
% C.G. Harris and M.J. Stephens. "A combined corner and edge detector",
% Proceedings Fourth Alvey Vision Conference, Manchester.
% pp 147-151, 1988.
%
% Author:
% Peter Kovesi
% Department of Computer Science & Software Engineering
% The University of Western Australia
% [email protected] www.cs.uwa.edu.au/~pk
%
% March 2002
function [cim, r, c] = harris(im, sigma, thresh, radius, disp)
error(nargchk(2,5,nargin));
dx = [-1 0 1; -1 0 1; -1 0 1]; % Derivative masks
dy = dx';
Ix = conv2(im, dx, 'same'); % Image derivatives
Iy = conv2(im, dy, 'same');
% Generate Gaussian filter of size 6*sigma (+/- 3sigma) and of
% minimum size 1x1.
g = fspecial('gaussian',max(1,fix(6*sigma)), sigma);
Ix2 = conv2(Ix.^2, g, 'same'); % Smoothed squared image derivatives
Iy2 = conv2(Iy.^2, g, 'same');
Ixy = conv2(Ix.*Iy, g, 'same');
cim = (Ix2.*Iy2 - Ixy.^2)./(Ix2 + Iy2 + eps); % Harris corner measure
% Alternate Harris corner measure used by some. Suggested that
% k=0.04 - I find this a bit arbitrary and unsatisfactory.
% cim = (Ix2.*Iy2 - Ixy.^2) - k*(Ix2 + Iy2).^2;
if nargin > 2 % We should perform nonmaximal suppression and threshold
% Extract local maxima by performing a grey scale morphological
% dilation and then finding points in the corner strength image that
% match the dilated image and are also greater than the threshold.
sze = 2*radius+1; % Size of mask.
mx = ordfilt2(cim,sze^2,ones(sze)); % Grey-scale dilate.
cim = (cim==mx)&(cim>thresh); % Find maxima.
[r,c] = find(cim); % Find row,col coords.
if nargin==5 & disp % overlay corners on original image
figure, imagesc(im), axis image, colormap(gray), hold on
plot(c,r,'ys'), title('corners detected');
end
else % leave cim as a corner strength image and make r and c empty.
r = []; c = [];
end
|
github
|
avinashk94/CVIP-master
|
harris.m
|
.m
|
CVIP-master/Assignment/hw2/hw2/code/harris.m
| 3,097 |
utf_8
|
c3a11b6fc77fa908635b8f9b14ad52a2
|
% HARRIS - Harris corner detector
%
% Usage: [cim, r, c] = harris(im, sigma, thresh, radius, disp)
%
% Arguments:
% im - image to be processed.
% sigma - standard deviation of smoothing Gaussian. Typical
% values to use might be 1-3.
% thresh - threshold (optional). Try a value ~1000.
% radius - radius of region considered in non-maximal
% suppression (optional). Typical values to use might
% be 1-3.
% disp - optional flag (0 or 1) indicating whether you want
% to display corners overlayed on the original
% image. This can be useful for parameter tuning.
%
% Returns:
% cim - binary image marking corners.
% r - row coordinates of corner points.
% c - column coordinates of corner points.
%
% If thresh and radius are omitted from the argument list 'cim' is returned
% as a raw corner strength image and r and c are returned empty.
% Reference:
% C.G. Harris and M.J. Stephens. "A combined corner and edge detector",
% Proceedings Fourth Alvey Vision Conference, Manchester.
% pp 147-151, 1988.
%
% Author:
% Peter Kovesi
% Department of Computer Science & Software Engineering
% The University of Western Australia
% [email protected] www.cs.uwa.edu.au/~pk
%
% March 2002
function [cim, r, c] = harris(im, sigma, thresh, radius, disp)
error(nargchk(2,5,nargin));
dx = [-1 0 1; -1 0 1; -1 0 1]; % Derivative masks
dy = dx';
Ix = conv2(im, dx, 'same'); % Image derivatives
Iy = conv2(im, dy, 'same');
% Generate Gaussian filter of size 6*sigma (+/- 3sigma) and of
% minimum size 1x1.
g = fspecial('gaussian',max(1,fix(6*sigma)), sigma);
Ix2 = conv2(Ix.^2, g, 'same'); % Smoothed squared image derivatives
Iy2 = conv2(Iy.^2, g, 'same');
Ixy = conv2(Ix.*Iy, g, 'same');
cim = (Ix2.*Iy2 - Ixy.^2)./(Ix2 + Iy2 + eps); % Harris corner measure
% Alternate Harris corner measure used by some. Suggested that
% k=0.04 - I find this a bit arbitrary and unsatisfactory.
% cim = (Ix2.*Iy2 - Ixy.^2) - k*(Ix2 + Iy2).^2;
if nargin > 2 % We should perform nonmaximal suppression and threshold
% Extract local maxima by performing a grey scale morphological
% dilation and then finding points in the corner strength image that
% match the dilated image and are also greater than the threshold.
sze = 2*radius+1; % Size of mask.
mx = ordfilt2(cim,sze^2,ones(sze)); % Grey-scale dilate.
cim = (cim==mx)&(cim>thresh); % Find maxima.
[r,c] = find(cim); % Find row,col coords.
if nargin==5 & disp % overlay corners on original image
figure, imagesc(im), axis image, colormap(gray), hold on
plot(c,r,'ys'), title('corners detected');
end
else % leave cim as a corner strength image and make r and c empty.
r = []; c = [];
end
|
github
|
avinashk94/CVIP-master
|
find_sift.m
|
.m
|
CVIP-master/Assignment/hw3/hw3/code/find_sift.m
| 5,054 |
utf_8
|
bd661341ed3535975182b3f451a3c152
|
function sift_arr = find_sift(I, circles, enlarge_factor)
%%
%% Compute non-rotation-invariant SIFT descriptors of a set of circles
%% I is the image
%% circles is an Nx3 array where N is the number of circles, where the
%% first column is the x-coordinate, the second column is the y-coordinate,
%% and the third column is the radius
%% enlarge_factor is by how much to enarge the radius of the circle before
%% computing the descriptor (a factor of 1.5 or larger is usually necessary
%% for best performance)
%% The output is an Nx128 array of SIFT descriptors
%%
%% Note that this code is not rotation-invariant, i.e., it does not attempt
%% to normalize the patches by rotating them so that the horizontal direction
%% is aligned with the dominant gradient orientation of the patch.
%%
%% (c) Lana Lazebnik
%%
if ndims(I) == 3
I = im2double(rgb2gray(I));
else
I = im2double(I);
end
fprintf('Running find_sift\n');
% parameters (default SIFT size)
num_angles = 8;
num_bins = 4;
num_samples = num_bins * num_bins;
alpha = 9; % smoothing for orientation histogram
if nargin < 3
enlarge_factor = 1.5;
end
angle_step = 2 * pi / num_angles;
angles = 0:angle_step:2*pi;
angles(num_angles+1) = []; % bin centers
[hgt wid] = size(I);
num_pts = size(circles,1);
sift_arr = zeros(num_pts, num_samples * num_angles);
% edge image
sigma_edge = 1;
[G_X,G_Y]=gen_dgauss(sigma_edge);
I_X = filter2(G_X, I, 'same'); % vertical edges
I_Y = filter2(G_Y, I, 'same'); % horizontal edges
I_mag = sqrt(I_X.^2 + I_Y.^2); % gradient magnitude
I_theta = atan2(I_Y,I_X);
I_theta(isnan(I_theta)) = 0; % necessary????
% make default grid of samples (centered at zero, width 2)
interval = 2/num_bins:2/num_bins:2;
interval = interval - (1/num_bins + 1);
[grid_x grid_y] = meshgrid(interval, interval);
grid_x = reshape(grid_x, [1 num_samples]);
grid_y = reshape(grid_y, [1 num_samples]);
% make orientation images
I_orientation = zeros(hgt, wid, num_angles);
% for each histogram angle
for a=1:num_angles
% compute each orientation channel
tmp = cos(I_theta - angles(a)).^alpha;
tmp = tmp .* (tmp > 0);
% weight by magnitude
I_orientation(:,:,a) = tmp .* I_mag;
end
% for all circles
for i=1:num_pts
cx = circles(i,1);
cy = circles(i,2);
r = circles(i,3) * enlarge_factor;
% find coordinates of sample points (bin centers)
grid_x_t = grid_x * r + cx;
grid_y_t = grid_y * r + cy;
grid_res = grid_y_t(2) - grid_y_t(1);
% find window of pixels that contributes to this descriptor
x_lo = floor(max(cx - r - grid_res/2, 1));
x_hi = ceil(min(cx + r + grid_res/2, wid));
y_lo = floor(max(cy - r - grid_res/2, 1));
y_hi = ceil(min(cy + r + grid_res/2, hgt));
% find coordinates of pixels
[grid_px, grid_py] = meshgrid(x_lo:x_hi,y_lo:y_hi);
num_pix = numel(grid_px);
grid_px = reshape(grid_px, [num_pix 1]);
grid_py = reshape(grid_py, [num_pix 1]);
% find (horiz, vert) distance between each pixel and each grid sample
dist_px = abs(repmat(grid_px, [1 num_samples]) - repmat(grid_x_t, [num_pix 1]));
dist_py = abs(repmat(grid_py, [1 num_samples]) - repmat(grid_y_t, [num_pix 1]));
% find weight of contribution of each pixel to each bin
weights_x = dist_px/grid_res;
weights_x = (1 - weights_x) .* (weights_x <= 1);
weights_y = dist_py/grid_res;
weights_y = (1 - weights_y) .* (weights_y <= 1);
weights = weights_x .* weights_y;
% make sift descriptor
curr_sift = zeros(num_angles, num_samples);
for a = 1:num_angles
tmp = reshape(I_orientation(y_lo:y_hi,x_lo:x_hi,a),[num_pix 1]);
tmp = repmat(tmp, [1 num_samples]);
curr_sift(a,:) = sum(tmp .* weights);
end
sift_arr(i,:) = reshape(curr_sift, [1 num_samples * num_angles]);
% % visualization
% if sigma_edge >= 3
% subplot(1,2,1);
% rescale_and_imshow(I(y_lo:y_hi,x_lo:x_hi) .* reshape(sum(weights,2), [y_hi-y_lo+1,x_hi-x_lo+1]));
% subplot(1,2,2);
% rescale_and_imshow(curr_sift);
% pause;
% end
end
%%
%% normalize the SIFT descriptors more or less as described in Lowe (2004)
%%
tmp = sqrt(sum(sift_arr.^2, 2));
normalize_ind = find(tmp > 1);
sift_arr_norm = sift_arr(normalize_ind,:);
sift_arr_norm = sift_arr_norm ./ repmat(tmp(normalize_ind,:), [1 size(sift_arr,2)]);
% suppress large gradients
sift_arr_norm(find(sift_arr_norm > 0.2)) = 0.2;
% finally, renormalize to unit length
tmp = sqrt(sum(sift_arr_norm.^2, 2));
sift_arr_norm = sift_arr_norm ./ repmat(tmp, [1 size(sift_arr,2)]);
sift_arr(normalize_ind,:) = sift_arr_norm;
function [GX,GY]=gen_dgauss(sigma)
f_wid = 4 * floor(sigma);
G = normpdf(-f_wid:f_wid,0,sigma);
G = G' * G;
[GX,GY] = gradient(G);
GX = GX * 2 ./ sum(sum(abs(GX)));
GY = GY * 2 ./ sum(sum(abs(GY)));
|
github
|
avinashk94/CVIP-master
|
harris.m
|
.m
|
CVIP-master/Assignment/hw3/hw3/code/harris.m
| 3,097 |
utf_8
|
c3a11b6fc77fa908635b8f9b14ad52a2
|
% HARRIS - Harris corner detector
%
% Usage: [cim, r, c] = harris(im, sigma, thresh, radius, disp)
%
% Arguments:
% im - image to be processed.
% sigma - standard deviation of smoothing Gaussian. Typical
% values to use might be 1-3.
% thresh - threshold (optional). Try a value ~1000.
% radius - radius of region considered in non-maximal
% suppression (optional). Typical values to use might
% be 1-3.
% disp - optional flag (0 or 1) indicating whether you want
% to display corners overlayed on the original
% image. This can be useful for parameter tuning.
%
% Returns:
% cim - binary image marking corners.
% r - row coordinates of corner points.
% c - column coordinates of corner points.
%
% If thresh and radius are omitted from the argument list 'cim' is returned
% as a raw corner strength image and r and c are returned empty.
% Reference:
% C.G. Harris and M.J. Stephens. "A combined corner and edge detector",
% Proceedings Fourth Alvey Vision Conference, Manchester.
% pp 147-151, 1988.
%
% Author:
% Peter Kovesi
% Department of Computer Science & Software Engineering
% The University of Western Australia
% [email protected] www.cs.uwa.edu.au/~pk
%
% March 2002
function [cim, r, c] = harris(im, sigma, thresh, radius, disp)
error(nargchk(2,5,nargin));
dx = [-1 0 1; -1 0 1; -1 0 1]; % Derivative masks
dy = dx';
Ix = conv2(im, dx, 'same'); % Image derivatives
Iy = conv2(im, dy, 'same');
% Generate Gaussian filter of size 6*sigma (+/- 3sigma) and of
% minimum size 1x1.
g = fspecial('gaussian',max(1,fix(6*sigma)), sigma);
Ix2 = conv2(Ix.^2, g, 'same'); % Smoothed squared image derivatives
Iy2 = conv2(Iy.^2, g, 'same');
Ixy = conv2(Ix.*Iy, g, 'same');
cim = (Ix2.*Iy2 - Ixy.^2)./(Ix2 + Iy2 + eps); % Harris corner measure
% Alternate Harris corner measure used by some. Suggested that
% k=0.04 - I find this a bit arbitrary and unsatisfactory.
% cim = (Ix2.*Iy2 - Ixy.^2) - k*(Ix2 + Iy2).^2;
if nargin > 2 % We should perform nonmaximal suppression and threshold
% Extract local maxima by performing a grey scale morphological
% dilation and then finding points in the corner strength image that
% match the dilated image and are also greater than the threshold.
sze = 2*radius+1; % Size of mask.
mx = ordfilt2(cim,sze^2,ones(sze)); % Grey-scale dilate.
cim = (cim==mx)&(cim>thresh); % Find maxima.
[r,c] = find(cim); % Find row,col coords.
if nargin==5 & disp % overlay corners on original image
figure, imagesc(im), axis image, colormap(gray), hold on
plot(c,r,'ys'), title('corners detected');
end
else % leave cim as a corner strength image and make r and c empty.
r = []; c = [];
end
|
github
|
avinashk94/CVIP-master
|
distanceToSet.m
|
.m
|
CVIP-master/Assignment/hw1/release/custom/distanceToSet.m
| 1,570 |
utf_8
|
e468b68972d12528f3824237c308520c
|
function histInter = distanceToSet(wordHist, histograms)
% Sum of minimums (Baseline implementation)
%intersections = bsxfun(@min, histograms, repmat(wordHist, 1, size(histograms, 2)));
%histInter = sum(intersections);
% Bhattacharyya coefficient
%intersections = bsxfun(@times, sqrt(histograms), repmat(sqrt(wordHist), 1, size(histograms, 2)));
%histInter = sum(intersections);
% Sum of products
%intersections = bsxfun(@times, histograms, repmat(wordHist, 1, size(histograms, 2)));
%histInter = sum(intersections);
% Sum of squared differences (Euclidean distance)
%intersections = bsxfun(@minus, histograms, repmat(wordHist, 1, size(histograms, 2)));
%histInter = sum(intersections.^2);
% Standard Euclidean
%histInter = -pdist2(histograms', wordHist', 'seuclidean');
% Chebychev distance
%histInter = -pdist2(histograms', wordHist', 'chebychev');
% Correlation
%histInter = -pdist2(histograms', wordHist', 'correlation');
% Chi-squared
%histInter = -pdist2(histograms', wordHist', @distChiSq);
% Spearman's rank correlation
histInter = -pdist2(histograms', wordHist', 'spearman');
end
%% Chi-squared function
% Downloaded from:
% http://www.mathworks.com/matlabcentral/fileexchange/29004-feature-points-in-image--keypoint-extraction/content/FPS_in_image/FPS%20in%20image/Help%20Functions/SearchingMatches/pdist2.m
function D = distChiSq( X, Y )
m = size(X,1); n = size(Y,1);
mOnes = ones(1,m); D = zeros(m,n);
for i=1:n
yi = Y(i,:); yiRep = yi( mOnes, : );
s = yiRep + X; d = yiRep - X;
D(:,i) = sum( d.^2 ./ (s+eps), 2 );
end
D = D/2;
end
|
github
|
avinashk94/CVIP-master
|
computeDictionary.m
|
.m
|
CVIP-master/Assignment/hw1/release/matlab/computeDictionary.m
| 356 |
utf_8
|
7baa8b2034d3469217474b5c13857ac2
|
% Computes filter bank and dictionary, and saves it in dictionary.mat
function computeDictionary()
load('../data/traintest.mat');
interval= 1;
train_imagenames = train_imagenames(1:interval:end);
[filterBank,dictionary] = getFilterBankAndDictionary(strcat(['../data/'],train_imagenames));
save('dictionary.mat','filterBank','dictionary');
end
|
github
|
numpde/as-master
|
sample_network3.m
|
.m
|
as-master/p/network/20171002-RatPathways-EPFL/B-subgraphs/sample_network3.m
| 4,012 |
utf_8
|
e569b5292d7cf526f9ed50bf6bd28064
|
% Subsampling from a large network.
% Pick a seed according to the degree distribution.
% Carry out random walks in the network.
% Stop when the number of subsampled nodes reaches the pre-specified number.
% Difference from sample_network2: attempt to control both node and edge sizes.
% Select the seed according to the node degree distribution.
% Incorporate new nodes by sampling neighbors of the current network according to the distribution of the density of the augmented network.
% f(x;rho)=|x-rho|. g(x;rho)=max(f(x;rho))-f(x;rho)+delta. p(x;rho)=g(x;rho)/sum_{x}g(x;rho).
function subG = sample_network3(G, nnodes, nsubnodes, nsubedges)
[a,b]=find(G>0); subset=find(a<b); a=a(subset); b=b(subset);
degs=zeros(1,nnodes);
for i=1:length(a)
degs(a(i))=degs(a(i))+1; degs(b(i))=degs(b(i))+1;
end
found=0;
while (found==0)
%vals=find(degs>=10);
vals=find(degs>=1);
% For small networks, ensure the candidates are in large enough components.
if (nnodes<=10000)
[ncomps,comps]=find_conn_comps2(nnodes,G);
sel=zeros(1,length(vals));
for i=1:length(vals)
j=vals(i); k=1;
while ((sel(i)==0)&(comps{k}.n>=nsubnodes)&(k<=ncomps))
if (ismember(j,comps{k}.comps)==1)
sel(i)=1;
end
k=k+1;
end
end
vals=vals(find(sel==1));
end
ps=degs(vals); ps=ps/sum(ps);
seed=sample_discrete_rv(vals,ps);
% Perform random walks from the current network.
% Sample nodes according to the deviation from the desired density.
targetdensity=nsubedges/nsubnodes;
subinds=seed; flag=1;
while (flag==1)
[a,b]=find(G(subinds,:)>0); b=unique(b); neighbors=setdiff(b,subinds);
if (length(neighbors)==0)
flag=0;
else
ps=zeros(1,length(neighbors));
for i=1:length(neighbors)
tmpinds=[subinds neighbors(i)];
tmpG=G(tmpinds,tmpinds);
[a,b]=find(tmpG>0); subset=find(a<b);
val=length(subset)/(length(subinds)+1);
ps(i)=abs(val-targetdensity);
end
val=max(ps); ps=val-ps+0.001; ps=ps/sum(ps);
newind=sample_discrete_rv(neighbors,ps);
subinds=union(subinds,newind);
if (length(subinds)>=nsubnodes)
flag=0;
end
end
% Debug
[a,b]=find(G(subinds,subinds)>0); subset=find(a<b);
%fprintf('nsubnodes=%d, nsubedges=%d, density=%.4f, targetdensity=%.4f\n',length(subinds),length(subset),length(subset)/length(subinds),targetdensity);
end
% If the density is much smaller than expected, then resample the subgraph by starting with a seed with higher degrees.
density=length(subset)/length(subinds);
if (density<(0.8*targetdensity))
vals=find(degs>=10);
% For small networks, ensure the candidates are in large enough components.
if (nnodes<=10000)
[ncomps,comps]=find_conn_comps2(nnodes,G);
sel=zeros(1,length(vals));
for i=1:length(vals)
j=vals(i); k=1;
while ((sel(i)==0)&(comps{k}.n>=nsubnodes)&(k<=ncomps))
if (ismember(j,comps{k}.comps)==1)
sel(i)=1;
end
k=k+1;
end
end
vals=vals(find(sel==1));
end
ps=degs(vals); ps=ps/sum(ps);
seed=sample_discrete_rv(vals,ps);
% Perform random walks from the current network.
% Sample nodes according to the deviation from the desired density.
targetdensity=nsubedges/nsubnodes;
subinds=seed; flag=1;
while (flag==1)
[a,b]=find(G(subinds,:)>0); b=unique(b); neighbors=setdiff(b,subinds);
if (length(neighbors)==0)
flag=0;
else
ps=zeros(1,length(neighbors));
for i=1:length(neighbors)
tmpinds=[subinds neighbors(i)];
tmpG=G(tmpinds,tmpinds);
[a,b]=find(tmpG>0); subset=find(a<b);
val=length(subset)/(length(subinds)+1);
ps(i)=abs(val-targetdensity);
end
val=max(ps); ps=val-ps+0.001; ps=ps/sum(ps);
newind=sample_discrete_rv(neighbors,ps);
subinds=union(subinds,newind);
if (length(subinds)>=nsubnodes)
flag=0;
end
end
% Debug
[a,b]=find(G(subinds,subinds)>0); subset=find(a<b);
%fprintf('nsubnodes=%d, nsubedges=%d, density=%.4f, targetdensity=%.4f\n',length(subinds),length(subset),length(subset)/length(subinds),targetdensity);
end
end
% Extract the subgraph spanned by subinds.
subG=G(subinds,subinds);
% If the number of edges is too large, then continue sampling the network.
[a,b]=find(subG>0); subset=find(a<b); k=length(subset);
if (k<=(nsubedges*2))
found=1;
end
end
|
github
|
numpde/as-master
|
construct_clique_complex.m
|
.m
|
as-master/p/network/20171002-RatPathways-EPFL/B-subgraphs/construct_clique_complex.m
| 2,403 |
utf_8
|
fd8bd622b6598d8ae7862faa4dd7c5f5
|
% Construct the clique complex from an undirected graph.
% Apply only to small graphs.
function [nsimplices, simplices] = construct_clique_complex(G)
nnodes = length(G(1, :));
% Exhaust all cliques.
nsimplices = 0; simplices = {};
% Nodes.
for n = 1:nnodes
simplices{n} = [n];
end
nsimplices = nnodes; maxdim = 1;
% Edges.
[a, b] = find(G > 0);
subset = find(a < b);
edges = [a(subset)';b(subset)'];
nedges = length(edges(1, :));
if (nedges > 0)
for n = 1:nedges
simplices{nsimplices + n} = transpose(edges(:, n));
end
nsimplices = nsimplices + nedges;
maxdim = 2;
end
% Higher-order cliques.
% Stop when there are no cliques of a given order.
maxdim = maxdim + 1; flag = 1;
while (flag == 1)
% Build cliques on top of lower-order cliques.
% Only allow the permutations of an increasing order.
ncliques = 0; cliques = [];
for n = 1:nsimplices
if (length(simplices{n}) == (maxdim - 1))
lowerclique = simplices{n};
vec = ones(1, nnodes);
for i = 1:length(lowerclique)
vec = vec .* G(lowerclique(i), :);
end
subset = find(vec > 0); subset = setdiff(subset, 1:max(lowerclique));
for i = 1:length(subset)
ncliques = ncliques + 1; cliques(ncliques, :) = [lowerclique subset(i)];
end
% for i=1:nnodes
% if ((ismember(i,lowerclique)==0)&(sum(G(i,lowerclique)>0)==(maxdim-1))&(i>max(lowerclique)))
% subset=[lowerclique i];
% ncliques=ncliques+1; cliques(ncliques,:)=subset;
% end
% end
end
end
% Write cliques to the simplices.
for n = 1:ncliques
simplices{nsimplices + n} = cliques(n, :);
end
nsimplices = nsimplices + ncliques;
% Debug
% fprintf('maxdim=%d, ncliques=%d, nsimplices=%d\n',maxdim,ncliques,nsimplices);
% Stop if ncliques=0.
if (ncliques <= 0)
flag = 0; % maxdim=maxdim-1;
% Other increment maxdim by one and proceed.
else
maxdim = maxdim + 1;
end
end
|
github
|
numpde/as-master
|
atof2.m
|
.m
|
as-master/p/network/20171002-RatPathways-EPFL/B-subgraphs/atof2.m
| 911 |
utf_8
|
a0aa9721ee4c6eda6667d14146264604
|
% Convert a string into a real number.
% Also works for exponential representation.
function val = atof2(s)
% If s=NA then return NaN.
if (strcmp(s,'NA')==1)
val=NaN;
else
val=0; sgn=1; cnt=1; afterpoint=0; afterexp=0;
expval=0; expsign=1; power=1.0;
while (cnt<=length(s))
ch=s(cnt);
if ((cnt==1)&(ch=='-'))
sgn=-1;
elseif ((ch=='-')&(afterexp==1))
expsign=-1;
elseif (ch=='.')
afterpoint=1; power=0.1;
elseif ((ch=='e')|(ch=='E'))
afterexp=1; afterpoint=0;
elseif ((ch>='0')&(ch<='9')&(afterpoint==0))
if (afterexp==0)
val=val*10+(ch-'0');
else
expval=expval*10+(ch-'0');
end
elseif ((ch>='0')&(ch<='9')&(afterpoint==1))
if (afterexp==0)
val=val+(ch-'0')*power;
else
expval=expval+(ch-'0')*power;
end
power=power*0.1;
end
cnt=cnt+1;
end
if (sgn==-1)
val=val*(-1);
end
if (expsign==-1)
expval=expval*(-1);
end
val=val*10^expval;
end
|
github
|
numpde/as-master
|
find_conn_comps2.m
|
.m
|
as-master/p/network/20171002-RatPathways-EPFL/B-subgraphs/find_conn_comps2.m
| 1,813 |
utf_8
|
389f33c6086c7446c4d2627197f3666b
|
% Find all connected components of a graph.
% Difference from find_conn_comps.m: do not incur recursive functions.
function [nconncomps, conncomps] = find_conn_comps2(nnodes, G)
% Label nodes in the graph until all nodes are labeled.
labeled=zeros(1,nnodes); flag=1;
% Label all the singletons.
tmpG=G; tmpG=tmpG-diag(diag(tmpG));
%for i=1:nnodes
%tmpG(i,i)=0;
%end
degs=sum(tmpG>0);
subset=find(degs==0); k=0;
for i=1:length(subset)
k=k+1; labeled(subset(i))=k;
end
while (flag==1)
% Check if all nodes are labeled.
n=sum(labeled==0);
% Debug
%if (mod(n,1000)==0)
%n
%end
% If yes then return.
if (n==0)
flag=0;
% Otherwise pick up an unlabeled node and make its neighbors all have identical labels.
% Pick up the remaining node with the highest degree.
else
subset=find(labeled==0);
ind=subset(find(degs(subset)>=max(degs(subset))));
ind=ind(1);
% Ensure all direct and indirect neighbors have identical labels.
subset=union(find(tmpG(ind,:)>0),find(tmpG(:,ind)>0));
neighborlabels=unique(labeled(subset));
if (neighborlabels==0)
k=k+1; labeled(ind)=k; labeled(subset)=k;
else
curlabel=setdiff(neighborlabels,0); curlabel=curlabel(1);
labeled(ind)=curlabel; labeled(subset)=curlabel;
for i=1:length(neighborlabels)
j=neighborlabels(i);
if ((j~=0)&(j~=curlabel))
labeled(find(labeled==j))=curlabel;
end
end
end
end
end
% Extract unique labels. Each unique label constitutes one component.
uniquelabels=unique(labeled);
nconncomps=length(uniquelabels);
for i=1:nconncomps
j=uniquelabels(i);
subset=find(labeled==j);
conncomps{i}.n=length(subset);
conncomps{i}.comps=subset;
end
% Sort the connected components by size.
tmp=zeros(1,nconncomps);
for i=1:nconncomps
tmp(i)=conncomps{i}.n;
end
[Y,I]=sort(tmp,'descend'); clear tmp2;
for i=1:nconncomps
tmp2{i}=conncomps{I(i)};
end
conncomps=tmp2;
|
github
|
numpde/as-master
|
getitemval4.m
|
.m
|
as-master/p/network/20171002-RatPathways-EPFL/B-subgraphs/getitemval4.m
| 703 |
utf_8
|
f54f8a064512756737e0a6eda7b4ae6d
|
% Get the selected item in a string.
% Difference from getitemval3: fix the bug that the last entry contains \n.
function item = getitemval4(s, ind, sepch)
tabcnt=0; curind=1;
while ((tabcnt<ind)&(curind<=length(s)))
ch=s(curind);
if (ch==sepch)
tabcnt=tabcnt+1;
elseif ((ch=='\n')&(tabcnt<ind))
tabcnt=ind+1;
end
curind=curind+1;
end
if (tabcnt==ind)
cnt=1; template='';
while ((curind<=length(s))&(s(curind)~=sepch))
%while ((curind<=length(s))&(s(curind)~=sepch)&(s(curind)~='\n'))
template(cnt)=s(curind); curind=curind+1; cnt=cnt+1;
end
if (cnt>1)
item=template;
else
item='';
end
else
item='';
end
if (length(item)>0)
if (isspace(item(length(item)))==1)
item=item(1:(length(item)-1));
end
end
|
github
|
numpde/as-master
|
evaluate_complex_homology.m
|
.m
|
as-master/p/network/20171002-RatPathways-EPFL/B-subgraphs/evaluate_complex_homology.m
| 1,821 |
utf_8
|
a603270bb0a52f38cb60830f5c7e75af
|
% Evaluate the Betti numbers of a simplicial complex.
function bs = evaluate_complex_homology(nsimplices, simplices)
nnodes=0;
for n=1:nsimplices
nnodes=max(nnodes,max(simplices{n}));
end
% Construct the boundary maps.
% rho_{k}.
maxdim=length(simplices{nsimplices})-1;
clear bds rs;
% Debug
%for k=1:min(3,maxdim)
for k=1:maxdim
nlows=0; lows=[]; nhighs=0; highs=[];
for n=1:nsimplices
if (length(simplices{n})==k)
nlows=nlows+1; lows(nlows,:)=simplices{n};
elseif (length(simplices{n})==(k+1))
nhighs=nhighs+1; highs(nhighs,:)=simplices{n};
end
end
%bd=zeros(nhighs,nlows);
%for n=1:nhighs
%for m=1:nlows
%if (sum(ismember(lows(m,:),highs(n,:))==1)==k)
%i=setdiff(highs(n,:),lows(m,:)); i=find(highs(n,:)==i);
%c=(-1).^(i-1);
%bd(n,m)=c;
%end
%end
%end
%bds{k}=bd; rs(k)=nlows;
bd=zeros(nhighs,nlows);
tmp1=zeros(nhighs,nnodes);
tmp2=zeros(nlows,nnodes);
for n=1:nhighs
tmp1(n,highs(n,:))=1;
end
for n=1:nlows
tmp2(n,lows(n,:))=1;
end
bd=tmp1*transpose(tmp2);
bd(find(bd<k))=0; bd(find(bd==k))=1;
[a,b]=find(bd==1);
for j=1:length(a)
n=a(j); m=b(j);
i=setdiff(highs(n,:),lows(m,:)); i=find(highs(n,:)==i);
c=(-1).^(i-1);
bd(n,m)=c;
end
bds{k}=bd; rs(k)=nlows;
% Debug
%fprintf('%d %d %d\n',k,length(bds{k}(:,1)),length(bds{k}(1,:)));
end
% Evaluate Betti numbers.
bs=zeros(1,maxdim);
% Debug
%for k=0:min(2,maxdim-1)
for k=0:(maxdim-1)
% Debug
% Do not calculate the k-dimensional Betti number if the prior two consecutive Betti numbers are 0.
if ((k>=2)&(bs(k)==0)&(bs(k-1)==0))
noaction=1;
else
noaction=0;
end
if (noaction==0)
if (k==0)
d=rs(k+1);
else
%d=rs(k)-rank(transpose(bds{k}));
tmp=null(transpose(bds{k}));
d=length(tmp(1,:));
end
bs(k+1)=d-rank(transpose(bds{k+1}));
% Debug
%fprintf('bs(%d)=%d\n',k,bs(k+1));
end
end
%for k=1:(maxdim-1)
%bs(k)=rs(k)-rank(bds{k+1})-rank(bds{k});
%end
|
github
|
numpde/as-master
|
sample_discrete_rv.m
|
.m
|
as-master/p/network/20171002-RatPathways-EPFL/B-subgraphs/sample_discrete_rv.m
| 764 |
utf_8
|
a589464d25a383adb18d965b86a7c5b4
|
% Sample a value from a discrete distribution.
% RA: discretize(rand(1, n) * sum(p), [0; cumsum(p)])
% https://www.mathworks.com/matlabcentral/fileexchange/21912-sampling-from-a-discrete-distribution
function randval = sample_discrete_rv(vals, ps)
nstates=length(vals);
[Y,I]=sort(ps,'descend');
bds=zeros(1,nstates);
bds(1)=Y(1);
for n=2:nstates
bds(n)=bds(n-1)+Y(n);
end
bds=bds/sum(Y);
val=rand(1); lwd=1; upp=nstates; randval=0;
if (val<=bds(lwd))
randval=I(lwd);
elseif (val>=bds(upp))
randval=I(upp);
end
while (randval==0)
mid=ceil((lwd+upp)/2);
if ((val>bds(mid-1))&(val<=bds(mid)))
randval=I(mid);
elseif ((val>=bds(mid))&(val<bds(mid+1)))
randval=I(mid);
elseif (val>bds(mid))
lwd=mid;
elseif (val<bds(mid))
upp=mid;
end
end
randval=vals(randval);
|
github
|
numpde/as-master
|
find_conn_comps.m
|
.m
|
as-master/p/network/20171002-RatPathways-EPFL/B-subgraphs/find_conn_comps.m
| 1,685 |
utf_8
|
22ce1726dfe96d45a20696926c0702fd
|
% Find all connected components of a graph.
function [nconncomps, conncomps] = find_conn_comps(nnodes, G)
% Recursively label nodes in the graph until all nodes are labeled.
labeled=zeros(1,nnodes); flag=1;
% Label all the singletons.
%k=0;
%for i=1:nnodes
%tmp=find(G(i,:)>0);
%if (length(tmp)==0)
%k=k+1; labeled(i)=k;
%end
%end
% Label all the singletons.
tmpG=G; tmpG=tmpG-diag(diag(tmpG));
%for i=1:nnodes
%tmpG(i,i)=0;
%end
vec=sum(tmpG>0);
subset=find(vec==0); k=0;
for i=1:length(subset)
k=k+1; labeled(subset(i))=k;
end
while (flag==1)
% Check if all nodes are labeled.
n=0;
for i=1:nnodes
if (labeled(i)==0)
n=n+1;
end
end
% If yes, then return.
if (n==0)
flag=0;
% Otherwise pick up an unlabeled node and recursively label its neighbors.
else
i=1; j=0;
while ((i<=nnodes)&(j==0))
if (labeled(i)==0)
j=i;
end
i=i+1;
end
k=k+1;
labeled=recurse_label2(j,nnodes,G,labeled,k);
end
end
% Find the connected components from the labels.
nconncomps=k; clear conncomps;
for i=1:nconncomps
conncomps{i}.n=0;
conncomps{i}.comps=[];
end
for i=1:nnodes
j=labeled(i);
conncomps{j}.n=conncomps{j}.n+1;
conncomps{j}.comps(conncomps{j}.n)=i;
end
% Sort the connected components by size.
tmp=zeros(1,nconncomps);
for i=1:nconncomps
tmp(i)=conncomps{i}.n;
end
[Y,I]=sort(tmp,'descend'); clear tmp2;
for i=1:nconncomps
tmp2{i}=conncomps{I(i)};
end
conncomps=tmp2;
% Discard the connected components which do not have type 1 or type 2 nodes.
discard=zeros(1,nconncomps);
for i=1:nconncomps
if (conncomps{i}.n<=0)
discard(i)=1;
end
end
tmp=find(discard==0);
for i=1:length(tmp)
tmp2{i}=conncomps{tmp(i)};
end
nconncomps=length(tmp); conncomps=tmp2;
clear tmp tmp2 Y I discard;
|
github
|
numpde/as-master
|
betti.m
|
.m
|
as-master/p/network/20171002-RatPathways-EPFL/D-topology/correctness/betti.m
| 5,084 |
utf_8
|
29d444ea4384c7afc49469c62236f239
|
function b = betti(file)
G = getfield(load(file), 'G');
[nsimplices, simplices] = construct_clique_complex(G);
b = evaluate_complex_homology(nsimplices,simplices);
save(file, 'G', 'b');
end
%%% CODE BY C-H YEANG %%%
% Construct the clique complex from an undirected graph.
% Apply only to small graphs.
function [nsimplices, simplices] = construct_clique_complex(G)
nnodes = length(G(1, :));
% Exhaust all cliques.
nsimplices = 0; simplices = {};
% Nodes.
for n = 1:nnodes
simplices{n} = [n];
end
nsimplices = nnodes; maxdim = 1;
% Edges.
[a, b] = find(G > 0);
subset = find(a < b);
edges = [a(subset)';b(subset)'];
nedges = length(edges(1, :));
if (nedges > 0)
for n = 1:nedges
simplices{nsimplices + n} = transpose(edges(:, n));
end
nsimplices = nsimplices + nedges;
maxdim = 2;
end
% Higher-order cliques.
% Stop when there are no cliques of a given order.
maxdim = maxdim + 1; flag = 1;
while (flag == 1)
% Build cliques on top of lower-order cliques.
% Only allow the permutations of an increasing order.
ncliques = 0; cliques = [];
for n = 1:nsimplices
if (length(simplices{n}) == (maxdim - 1))
lowerclique = simplices{n};
vec = ones(1, nnodes);
for i = 1:length(lowerclique)
vec = vec .* G(lowerclique(i), :);
end
subset = find(vec > 0); subset = setdiff(subset, 1:max(lowerclique));
for i = 1:length(subset)
ncliques = ncliques + 1; cliques(ncliques, :) = [lowerclique subset(i)];
end
% for i=1:nnodes
% if ((ismember(i,lowerclique)==0)&(sum(G(i,lowerclique)>0)==(maxdim-1))&(i>max(lowerclique)))
% subset=[lowerclique i];
% ncliques=ncliques+1; cliques(ncliques,:)=subset;
% end
% end
end
end
% Write cliques to the simplices.
for n = 1:ncliques
simplices{nsimplices + n} = cliques(n, :);
end
nsimplices = nsimplices + ncliques;
% Debug
% fprintf('maxdim=%d, ncliques=%d, nsimplices=%d\n',maxdim,ncliques,nsimplices);
% Stop if ncliques=0.
if (ncliques <= 0)
flag = 0; % maxdim=maxdim-1;
% Other increment maxdim by one and proceed.
else
maxdim = maxdim + 1;
end
end
end
% Evaluate the Betti numbers of a simplicial complex.
function bs = evaluate_complex_homology(nsimplices, simplices)
nnodes=0;
for n=1:nsimplices
nnodes=max(nnodes,max(simplices{n}));
end
% Construct the boundary maps.
% rho_{k}.
maxdim=length(simplices{nsimplices})-1;
clear bds rs;
% Debug
%for k=1:min(3,maxdim)
for k=1:maxdim
nlows=0; lows=[]; nhighs=0; highs=[];
for n=1:nsimplices
if (length(simplices{n})==k)
nlows=nlows+1; lows(nlows,:)=simplices{n};
elseif (length(simplices{n})==(k+1))
nhighs=nhighs+1; highs(nhighs,:)=simplices{n};
end
end
%bd=zeros(nhighs,nlows);
%for n=1:nhighs
%for m=1:nlows
%if (sum(ismember(lows(m,:),highs(n,:))==1)==k)
%i=setdiff(highs(n,:),lows(m,:)); i=find(highs(n,:)==i);
%c=(-1).^(i-1);
%bd(n,m)=c;
%end
%end
%end
%bds{k}=bd; rs(k)=nlows;
bd=zeros(nhighs,nlows);
tmp1=zeros(nhighs,nnodes);
tmp2=zeros(nlows,nnodes);
for n=1:nhighs
tmp1(n,highs(n,:))=1;
end
for n=1:nlows
tmp2(n,lows(n,:))=1;
end
bd=tmp1*transpose(tmp2);
bd(find(bd<k))=0; bd(find(bd==k))=1;
[a,b]=find(bd==1);
for j=1:length(a)
n=a(j); m=b(j);
i=setdiff(highs(n,:),lows(m,:)); i=find(highs(n,:)==i);
c=(-1).^(i-1);
bd(n,m)=c;
end
bds{k}=bd; rs(k)=nlows;
% Debug
%fprintf('%d %d %d\n',k,length(bds{k}(:,1)),length(bds{k}(1,:)));
end
% Evaluate Betti numbers.
bs=zeros(1,maxdim);
% Debug
%for k=0:min(2,maxdim-1)
for k=0:(maxdim-1)
% Debug
% Do not calculate the k-dimensional Betti number if the prior two consecutive Betti numbers are 0.
if ((k>=2)&(bs(k)==0)&(bs(k-1)==0))
noaction=1;
else
noaction=0;
end
if (noaction==0)
if (k==0)
d=rs(k+1);
else
%d=rs(k)-rank(transpose(bds{k}));
tmp=null(transpose(bds{k}));
d=length(tmp(1,:));
end
bs(k+1)=d-rank(transpose(bds{k+1}));
% Debug
%fprintf('bs(%d)=%d\n',k,bs(k+1));
end
end
%for k=1:(maxdim-1)
%bs(k)=rs(k)-rank(bds{k+1})-rank(bds{k});
%end
end
|
github
|
Lancelot899/ICRA2018-master
|
rukfUpdate.m
|
.m
|
ICRA2018-master/filters/rukfUpdate.m
| 1,788 |
utf_8
|
24325b68cd25a2d02a2b35e7dd34f88d
|
function [chi,omega_b,a_b,S] = rukfUpdate(chi,omega_b,a_b,...
S,y,param,R,ParamFilter)
param.Pi = ParamFilter.Pi;
param.chiC = ParamFilter.chiC;
k = length(y);
q = length(S);
N_aug = q+k;
Rc = chol(kron(eye(k/2),R));
S_aug = blkdiag(S,Rc);
% scaled unsented transform
W0 = 1-N_aug/3;
Wj = (1-W0)/(2*N_aug);
gamma = sqrt(N_aug/(1-W0));
alpha = 1;
beta = 2;
% Compute transformed measurement
X = gamma*[zeros(N_aug,1) S_aug' -S_aug'];% sigma-points
Y = zeros(k,2*N_aug+1);
Y(:,1) = h(chi,zeros(q-6,1),param,zeros(N_aug-q,1));
for j = 2:2*N_aug+1
xi_j = X([1:9 16:q],j);
v_j = X(q+1:N_aug,j);
Y(:,j) = h(chi,xi_j,param,v_j);
end
ybar = W0*Y(:,1) + Wj*sum(Y(:,2:end),2);% Measurement mean
Y(:,1) = sqrt(abs(W0+(1-alpha^2+beta)))*(Y(:,1)-ybar);
YY = sqrt(Wj)*(Y(:,2:2*N_aug+1)-ybar*ones(1,2*N_aug));
[~,Rs] = qr(YY');
Ss = Rs(1:k,1:k);
[Sy,~] = cholupdate(Ss,Y(:,1),'-'); % Sy'*Sy = Pyy
Pxy = zeros(q,k);
for j = 2:2*N_aug+1
Pxy = Pxy + Wj*X(1:q,j)*(Y(:,j)-ybar)';
end
K = Pxy*Sy^-1*Sy'^-1; % Gain
xibar = K*(y-ybar);
omega_b = omega_b + xibar(10:12);
a_b = a_b + xibar(13:15);
xibar = xibar([1:9 16:q]);
% Covariance update
A = K*Sy';
for n = 1:k
S = cholupdate(S,A(:,n),'-');
end
% Update mean state
chi = exp_multiSE3(xibar)*chi;
J = xi2calJl(xibar);
S = S*J;
end
%--------------------------------------------------------------------------
function y = h(chi,xi,param,v)
Pi = param.Pi;
chiC = param.chiC;
RotC = chiC(1:3,1:3);
xC = chiC(1:3,4);
yAmers = param.yAmers;
NbAmers = length(yAmers);
chi_j = exp_multiSE3(xi)*chi;
Rot = chi_j(1:3,1:3);
x = chi_j(1:3,5);
PosAmers = chi_j(1:3,6:end);
posAmers = PosAmers(:,yAmers);
z = Pi*( (Rot*RotC)'*(posAmers-kron(x,ones(1,NbAmers))) ...
- kron(xC,ones(1,NbAmers)));
y = z(1:2,:)./z(3,:);
y = y(:) + v;
end
|
github
|
Lancelot899/ICRA2018-master
|
EsimatePosAmers.m
|
.m
|
ICRA2018-master/filters/EsimatePosAmers.m
| 2,093 |
utf_8
|
98a1882400a96d76ab9789928887664e
|
function [points3d, errors] = EsimatePosAmers(pointTracks, ...
camPoses, cameraParams)
numTracks = numel(pointTracks);
points3d = zeros(numTracks, 3);
numCameras = size(camPoses, 2);
cameraMatrices = containers.Map('KeyType', 'uint32', 'ValueType', 'any');
for i = 1:numCameras
id = camPoses(i).ViewId;
R = camPoses(i).Orientation;
t = camPoses(i).Location;
size_t = size(t);
if size_t(1) == 3
t = t';
end
cameraMatrices(id) = cameraMatrix(cameraParams, R', -t*R');
end
for i = 1:numTracks
track = pointTracks(i);
points3d(i, :) = triangulateOnePoint(track, cameraMatrices);
end
if nargout > 1
[~, errors] = reprojectionErrors(points3d, cameraMatrices, pointTracks);
end
%--------------------------------------------------------------------------
function point3d = triangulateOnePoint(track, cameraMatrices)
% do the triangulation
numViews = numel(track.ViewIds);
A = zeros(numViews * 2, 4);
for i = 1:numViews
id = track.ViewIds(i);
P = cameraMatrices(id)';
A(2*i - 1, :) = track.Points(i, 1) * P(3,:) - P(1,:);
A(2*i , :) = track.Points(i, 2) * P(3,:) - P(2,:);
end
[~,~,V] = svd(A);
X = V(:, end);
X = X/X(end);
point3d = X(1:3)';
%--------------------------------------------------------------------------
function [errors, meanErrorsPerTrack] = reprojectionErrors(points3d, ...
cameraMatrices, tracks)
numPoints = size(points3d, 1);
points3dh = [points3d, ones(numPoints, 1)];
meanErrorsPerTrack = zeros(numPoints, 1);
errors = [];
for i = 1:numPoints
p3d = points3dh(i, :);
reprojPoints2d = reprojectPoint(p3d, tracks(i).ViewIds, cameraMatrices);
e = sqrt(sum((tracks(i).Points - reprojPoints2d).^2, 2));
meanErrorsPerTrack(i) = mean(e);
errors = [errors; e];
end
%--------------------------------------------------------------------------
function points2d = reprojectPoint(p3dh, viewIds, cameraMatrices)
numPoints = numel(viewIds);
points2d = zeros(numPoints, 2);
for i = 1:numPoints
p2dh = p3dh * cameraMatrices(viewIds(i));
points2d(i, :) = p2dh(1:2) ./ p2dh(3);
end
|
github
|
Lancelot899/ICRA2018-master
|
manageAmers.m
|
.m
|
ICRA2018-master/filters/manageAmers.m
| 5,456 |
utf_8
|
bf85aa3a6e9c58d710fe3434db202d1f
|
function [S,PosAmers,ParamFilter,trackerBis,myTracks,PosAmersNew,...
IdxAmersNew,trackCov,pointsMain,validityMain] = manageAmers(S,...
PosAmers,ParamFilter,ParamGlobal,trackerBis,trajFilter,I,...
pointsMain,validityMain,IdxImage,myTracks,pointsBis)
PosAmersNew = [];
IdxAmersNew = [];
trackCov = [];
MaxAmersNew = 10;
if sum(validityMain) < ParamFilter.NbAmersMin && I>120
P = S'*S; %not computianny efficient
NbAmersNew = min(sum(validityMain == 0),MaxAmersNew);
[PosAmersNew,trackPoints,trackerBis,myTracks,trackCov] = ObserveAmersNew(ParamFilter,ParamGlobal,...
trajFilter,I,IdxImage,trackerBis,myTracks,NbAmersNew,pointsMain,pointsBis,S);
j = 1;
IdxAmersNew = zeros(length(trackCov),1);
IdxAmersOld = find(validityMain == 0);
%if number of new landmarks is small
IdxAmersOld = IdxAmersOld(1:length(trackCov));
for ii = 1:length(IdxAmersOld)
idxAmersOld = IdxAmersOld(ii);
idxP = 15+(3*idxAmersOld-2:3*idxAmersOld);
P(:,idxP) = 0;
P(idxP,:) = 0;
P(idxP,idxP) = trackCov{j};
PosAmers(:,idxAmersOld) = PosAmersNew(j,:)';
pointsMain(idxAmersOld,:) = trackPoints(:,j);
validityMain(idxAmersOld) = 1;
IdxAmersNew(j) = idxAmersOld;
j = j+1;
end
if sum(pointsMain(:) < 0) > 0
validityMain(pointsMain(:,1)<0) = 0;
validityMain(pointsMain(:,2)<0) = 0;
pointsMain(pointsMain(:)<0) = 1;
end
S = chol(P); %not computianny efficient
end
end
%--------------------------------------------------------------------------
function [posAmersNew,trackPoints,trackerBis,myTracks,trackCov] = ...
ObserveAmersNew(ParamFilter,ParamGlobal,trajFilter,...
I,IdxImage,trackerBis,myTracks,NbAmersNew,pointsMain,pointsBis,S)
% compute estimated locations for new landmarks
cameraParams = ParamFilter.cameraParams;
dirImage = ParamGlobal.dirImage;
fileImages = ParamGlobal.fileImages;
image = strcat(dirImage,int2str(fileImages(IdxImage)),'.png');
image = undistortImage(imread(image),cameraParams);
Newpoints = detectMinEigenFeatures(image);
Newpoints = selectUniform(Newpoints,NbAmersNew+150,size(image));
%number of views of candidate points
nbViews = zeros(1,length(myTracks));
for i = 1:length(myTracks)
nbViews(i) = length(myTracks(i).ViewIds);
end
% tracking new points
posAmersNew = zeros(NbAmersNew,3);
trackPoints = ones(2,NbAmersNew);
trackCov = cell(NbAmersNew,1);
i = 1;
nbViewsMin = 7;
errorMax = 0.5;
PixelMin = 30;
while i <= NbAmersNew
ok = 0;
% find possible new point
while ok == 0
nbViews2 = find(nbViews>nbViewsMin);
if isempty(nbViews2)
if(nbViewsMin > 3)
nbViewsMin = nbViewsMin - 1;
end
[trackerBis,myTracks] = razTrackerBis(ParamGlobal,ParamFilter,I,IdxImage,myTracks);
for ii = 1:length(myTracks)
nbViews(ii) = length(myTracks(ii).ViewIds);
end
errorMax = errorMax+1;
nbViews2 = find(nbViews>nbViewsMin);
end
idx = randsample(nbViews2,1);
ok = 1;
pNew = myTracks(idx).Points(end,:);
idxViewNew = myTracks(idx).ViewIds(end);
for ii = 1:ParamFilter.NbAmers
if norm(pNew-pointsMain(ii,:)) < PixelMin || idxViewNew < I
ok = 0;
break
end
end
nbIdx = nbViews(idx);
nbViews(idx) = 0;
end
% estimate location
iReal = myTracks(idx).ViewIds(1);
Rot = squeeze(trajFilter.Rot(:,:,iReal));
x = trajFilter.x(:,iReal);
camPoses = struct('ViewId',myTracks(idx).ViewIds(1),...
'Orientation',Rot,'Location',x,'S',S);
try
for ii = 2:nbViewsMin% to be more time efficient (else use 2:nbIdx)
iReal = myTracks(idx).ViewIds(round(ii/nbViewsMin*nbIdx));
Rot = squeeze(trajFilter.Rot(:,:,iReal));
x = trajFilter.x(:,iReal);
camPoses(ii).ViewId = iReal;
camPoses(ii).Orientation = Rot;
camPoses(ii).Location = x;
camPoses(ii).S = S;
end
myTracks(idx).ViewIds = myTracks(idx).ViewIds([1 round((2:nbViewsMin)*nbIdx/nbViewsMin)]);
myTracks(idx).Points = myTracks(idx).Points([1 round((2:nbViewsMin)*nbIdx/nbViewsMin)],:);
[xyzPoint,covariance] = myEsimatePosAmers(myTracks(idx),camPoses,cameraParams,ParamFilter);
errors = sum(diag(covariance(end-2:end,end-2:end)));
catch
xyzPoint = ones(1,3);
errorMax = errorMax*2;
errors = errorMax+1;
for iii = 1:length(myTracks)
nbViews(iii) = length(myTracks(iii).ViewIds);
end
PixelMin = PixelMin/2;
end
% add is error is suficiently small
if isempty(Newpoints)
Newpoints = detectMinEigenFeatures(image);
Newpoints = selectUniform(Newpoints,NbAmersNew+150,size(image));
end
idxNew = randi(length(Newpoints),1);
if (errors < errorMax)
posAmersNew(i,:) = xyzPoint';
trackPoints(:,i) = myTracks(idx).Points(end,:)';
trackCov{i} = 3*10^-3*eye(3);%covariance(end-2:end,end-2:end);%2*10^-3*eye(3);
myTracks(idx).ViewIds = I;
myTracks(idx).Points = Newpoints(idxNew).Location;
pointsBis(idx,:) = Newpoints(idxNew).Location;
Newpoints(idxNew) = [];
i = i+1;
end
end
pointsBis(pointsBis(:)<=0) = 1;
trackerBis.setPoints(pointsBis);
end
|
github
|
Lancelot899/ICRA2018-master
|
ukfRefUpdate.m
|
.m
|
ICRA2018-master/filters/ukfRefUpdate.m
| 2,508 |
utf_8
|
0ffab5e57239a98e26302370a16c3f8c
|
function [chi,v,PosAmers,omega_b,a_b,S,xidot] = ukfRefUpdate(chi,v,omega_b,a_b,...
S,y,param,R,ParamFilter,PosAmers,xidot)
param.Pi = ParamFilter.Pi;
param.chiC = ParamFilter.chiC;
k = length(y);
q = length(S);
N_aug = q+k;
Rc = chol(kron(eye(k/2),R));
S_aug = blkdiag(S,Rc);
% scaled unsented transform
W0 = 1-N_aug/3;
Wj = (1-W0)/(2*N_aug);
gamma = sqrt(N_aug/(1-W0));
alpha = 1;
beta = 2;
% Compute transformed measurement
X = gamma*[zeros(N_aug,1) S_aug' -S_aug'];% sigma-points
Y = zeros(k,2*N_aug+1);
Y(:,1) = h(chi,zeros(q-9,1),param,zeros(N_aug-q,1));
for j = 2:2*N_aug+1
xi_j = X([1:3 7:9 16:q],j);
v_j = X(q+1:N_aug,j);
Y(:,j) = h(chi,xi_j,param,v_j);
end
ybar = W0*Y(:,1) + Wj*sum(Y(:,2:end),2);% Measurement mean
Y(:,1) = sqrt(abs(W0+(1-alpha^2+beta)))*(Y(:,1)-ybar);
YY = sqrt(Wj)*(Y(:,2:2*N_aug+1)-ybar*ones(1,2*N_aug));
[~,Rs] = qr(YY');
Ss = Rs(1:k,1:k);
[Sy,~] = cholupdate(Ss,Y(:,1),'-'); % Sy'*Sy = Pyy
Pxy = zeros(q,k);
for j = 2:2*N_aug+1
Pxy = Pxy + Wj*X(1:q,j)*(Y(:,j)-ybar)';
end
K = Pxy*Sy^-1*Sy'^-1; % Gain
xibar = K*(y-ybar);
omega_b = omega_b + xibar(10:12);
a_b = a_b + xibar(13:15);
xibar = xibar([1:9 16:q]);
% Covariance update
A = K*Sy';
for n = 1:k
[S,~] = cholupdate(S,A(:,n),'-');
end
PosAmers = PosAmers + reshape(xibar(10:end),[3 length(xibar(10:end))/3]);
% Update mean state
v = v + xibar(4:6);
chi = [chi(1:3,1:3) chi(1:3,5);0 0 0 1]*expSE3(xibar([1:3 7:9]));
% Parallel transport
B = vecto(xidot(1:3));
C = vecto(xidot(4:6));
alphaB = norm(xidot(1:3));
alphaC = 1/2*norm(xidot(4:6));
B = (eye(3) +1/(alphaC^2)*(1-cos(alphaB))*C + 1/(alphaC^3)*C^2)*B;
C = eye(3) + 1/(alphaC^2)*(1-cos(alphaC))*C^2 + sin(alphaB)/alphaC*C;
expA = [C zeros(3);
B eye(3)];
P = S'*S;
P([1:3 7:9],[1:3 7:9]) = expA*P([1:3 7:9],[1:3 7:9])*expA';
P([1:3 7:9],[4:6,10:end]) = expA*P([1:3 7:9],[4:6,10:end]);
P([4:6,10:end],[1:3 7:9]) = P([1:3 7:9],[4:6,10:end])';
S = chol(P);
xidot = zeros(6,1);
end
%--------------------------------------------------------------------------
function y = h(chi,xi,param,v)
Pi = param.Pi;
chiC = param.chiC;
RotC = chiC(1:3,1:3);
xC = chiC(1:3,4);
yAmers = param.yAmers;
NbAmers = length(yAmers);
chi_j = chi([1:3 5],[1:3 5])*expSE3(xi(1:6));
Rot = chi_j(1:3,1:3);
x = chi_j(1:3,4);
PosAmers = chi(1:3,6:end) + reshape(xi(7:end),[3 length(xi(7:end))/3]);
posAmers = PosAmers(:,yAmers);
z = Pi*( (Rot*RotC)'*(posAmers-kron(x,ones(1,NbAmers))) ...
- kron(xC,ones(1,NbAmers)));
y = z(1:2,:)./z(3,:);
y = y(:) + v;
end
|
github
|
Lancelot899/ICRA2018-master
|
ukfUpdate.m
|
.m
|
ICRA2018-master/filters/ukfUpdate.m
| 1,933 |
utf_8
|
0c034d87cb979ce37c640d5fdf9a74b4
|
function [Rot,v,x,PosAmers,omega_b,a_b,S] = ukfUpdate(Rot,v,x,omega_b,a_b,...
S,y,param,R,ParamFilter,PosAmers)
param.Pi = ParamFilter.Pi;
param.chiC = ParamFilter.chiC;
k = length(y);
q = length(S);
N_aug = q+k;
Rc = chol(kron(eye(k/2),R));
S_aug = blkdiag(S,Rc);
% scaled unsented transform
W0 = 1-N_aug/3;
Wj = (1-W0)/(2*N_aug);
gamma = sqrt(N_aug/(1-W0));
alpha = 1;
beta = 2;
% Compute transformed measurement
X = gamma*[zeros(N_aug,1) S_aug' -S_aug'];% sigma-points
Y = zeros(k,2*N_aug+1);
Y(:,1) = h(Rot,x,zeros(q-9,1),param,zeros(N_aug-q,1));
for j = 2:2*N_aug+1
xi_j = X([1:3 7:9 16:q],j);
v_j = X(q+1:N_aug,j);
Y(:,j) = h(Rot,x,xi_j,param,v_j);
end
ybar = W0*Y(:,1) + Wj*sum(Y(:,2:end),2);% Measurement mean
Y(:,1) = sqrt(abs(W0+(1-alpha^2+beta)))*(Y(:,1)-ybar);
YY = sqrt(Wj)*(Y(:,2:2*N_aug+1)-ybar*ones(1,2*N_aug));
[~,Rs] = qr(YY');
Ss = Rs(1:k,1:k);
[Sy,~] = cholupdate(Ss,Y(:,1),'-'); % Sy'*Sy = Pyy
Pxy = zeros(q,k);
for j = 2:2*N_aug+1
Pxy = Pxy + Wj*X(1:q,j)*(Y(:,j)-ybar)';
end
K = Pxy*Sy^-1*Sy'^-1; % Gain
xibar = K*(y-ybar);
omega_b = omega_b + xibar(10:12);
a_b = a_b + xibar(13:15);
xibar = xibar([1:9 16:q]);
% Covariance update
A = K*Sy';
for n = 1:k
[S,~] = cholupdate(S,A(:,n),'-');
end
% Update mean state
Rot = Rot*expSO3(xibar(1:3));
v = v + xibar(4:6);
x = x + xibar(7:9);
PosAmers = PosAmers + reshape(xibar(10:end),[3 length(xibar(10:end))/3]);
end
%--------------------------------------------------------------------------
function y = h(Rot,x,xi,param,v)
Pi = param.Pi;
chiC = param.chiC;
RotC = chiC(1:3,1:3);
xC = chiC(1:3,4);
yAmers = param.yAmers;
NbAmers = length(yAmers);
Rot = Rot*expSO3(xi(1:3));
x = x + xi(4:6);
PosAmers = param.PosAmers + reshape(xi(7:end),[3 length(xi(7:end))/3]);
posAmers = PosAmers(:,yAmers);
z = Pi*( (Rot*RotC)'*(posAmers-kron(x,ones(1,NbAmers))) ...
- kron(xC,ones(1,NbAmers)));
y = z(1:2,:)./z(3,:);
y = y(:) + v;
end
|
github
|
Lancelot899/ICRA2018-master
|
lukfUpdate.m
|
.m
|
ICRA2018-master/filters/lukfUpdate.m
| 1,788 |
utf_8
|
9b390dc82185a8f43fa24167dcce1816
|
function [chi,omega_b,a_b,S] = lukfUpdate(chi,omega_b,a_b,...
S,y,param,R,ParamFilter)
param.Pi = ParamFilter.Pi;
param.chiC = ParamFilter.chiC;
k = length(y);
q = length(S);
N_aug = q+k;
Rc = chol(kron(eye(k/2),R));
S_aug = blkdiag(S,Rc);
% scaled unsented transform
W0 = 1-N_aug/3;
Wj = (1-W0)/(2*N_aug);
gamma = sqrt(N_aug/(1-W0));
alpha = 1;
beta = 2;
% Compute transformed measurement
X = gamma*[zeros(N_aug,1) S_aug' -S_aug'];% sigma-points
Y = zeros(k,2*N_aug+1);
Y(:,1) = h(chi,zeros(q-6,1),param,zeros(N_aug-q,1));
for j = 2:2*N_aug+1
xi_j = X([1:9 16:q],j);
v_j = X(q+1:N_aug,j);
Y(:,j) = h(chi,xi_j,param,v_j);
end
ybar = W0*Y(:,1) + Wj*sum(Y(:,2:end),2);% Measurement mean
Y(:,1) = sqrt(abs(W0+(1-alpha^2+beta)))*(Y(:,1)-ybar);
YY = sqrt(Wj)*(Y(:,2:2*N_aug+1)-ybar*ones(1,2*N_aug));
[~,Rs] = qr(YY');
Ss = Rs(1:k,1:k);
[Sy,~] = cholupdate(Ss,Y(:,1),'-'); % Sy'*Sy = Pyy
Pxy = zeros(q,k);
for j = 2:2*N_aug+1
Pxy = Pxy + Wj*X(1:q,j)*(Y(:,j)-ybar)';
end
K = Pxy*Sy^-1*Sy'^-1; % Gain
xibar = K*(y-ybar);
omega_b = omega_b + xibar(10:12);
a_b = a_b + xibar(13:15);
xibar = xibar([1:9 16:q]);
% Covariance update
A = K*Sy';
for n = 1:k
S = cholupdate(S,A(:,n),'-');
end
% Update mean state
chi = chi*exp_multiSE3(xibar);
J = xi2calJr(xibar);
S = S*J;
end
%--------------------------------------------------------------------------
function y = h(chi,xi,param,v)
Pi = param.Pi;
chiC = param.chiC;
RotC = chiC(1:3,1:3);
xC = chiC(1:3,4);
yAmers = param.yAmers;
NbAmers = length(yAmers);
chi_j = chi*exp_multiSE3(xi);
Rot = chi_j(1:3,1:3);
x = chi_j(1:3,5);
PosAmers = chi_j(1:3,6:end);
posAmers = PosAmers(:,yAmers);
z = Pi*( (Rot*RotC)'*(posAmers-kron(x,ones(1,NbAmers))) ...
- kron(xC,ones(1,NbAmers)));
y = z(1:2,:)./z(3,:);
y = y(:) + v;
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.