plateform
stringclasses 1
value | repo_name
stringlengths 13
113
| name
stringlengths 3
74
| ext
stringclasses 1
value | path
stringlengths 12
229
| size
int64 23
843k
| source_encoding
stringclasses 9
values | md5
stringlengths 32
32
| text
stringlengths 23
843k
|
---|---|---|---|---|---|---|---|---|
github
|
zzlyw/machine-learning-exercises-master
|
saveubjson.m
|
.m
|
machine-learning-exercises-master/machine-learning-ex3/ex3/lib/jsonlab/saveubjson.m
| 16,123 |
utf_8
|
61d4f51010aedbf97753396f5d2d9ec0
|
function json=saveubjson(rootname,obj,varargin)
%
% json=saveubjson(rootname,obj,filename)
% or
% json=saveubjson(rootname,obj,opt)
% json=saveubjson(rootname,obj,'param1',value1,'param2',value2,...)
%
% convert a MATLAB object (cell, struct or array) into a Universal
% Binary JSON (UBJSON) binary string
%
% author: Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% created on 2013/08/17
%
% $Id: saveubjson.m 460 2015-01-03 00:30:45Z fangq $
%
% input:
% rootname: the name of the root-object, when set to '', the root name
% is ignored, however, when opt.ForceRootName is set to 1 (see below),
% the MATLAB variable name will be used as the root name.
% obj: a MATLAB object (array, cell, cell array, struct, struct array)
% filename: a string for the file name to save the output UBJSON data
% opt: a struct for additional options, ignore to use default values.
% opt can have the following fields (first in [.|.] is the default)
%
% opt.FileName [''|string]: a file name to save the output JSON data
% opt.ArrayToStruct[0|1]: when set to 0, saveubjson outputs 1D/2D
% array in JSON array format; if sets to 1, an
% array will be shown as a struct with fields
% "_ArrayType_", "_ArraySize_" and "_ArrayData_"; for
% sparse arrays, the non-zero elements will be
% saved to _ArrayData_ field in triplet-format i.e.
% (ix,iy,val) and "_ArrayIsSparse_" will be added
% with a value of 1; for a complex array, the
% _ArrayData_ array will include two columns
% (4 for sparse) to record the real and imaginary
% parts, and also "_ArrayIsComplex_":1 is added.
% opt.ParseLogical [1|0]: if this is set to 1, logical array elem
% will use true/false rather than 1/0.
% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single
% numerical element will be shown without a square
% bracket, unless it is the root object; if 0, square
% brackets are forced for any numerical arrays.
% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, saveubjson
% will use the name of the passed obj variable as the
% root object name; if obj is an expression and
% does not have a name, 'root' will be used; if this
% is set to 0 and rootname is empty, the root level
% will be merged down to the lower level.
% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),
% for example, if opt.JSON='foo', the JSON data is
% wrapped inside a function call as 'foo(...);'
% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson
% back to the string form
%
% opt can be replaced by a list of ('param',value) pairs. The param
% string is equivallent to a field in opt and is case sensitive.
% output:
% json: a binary string in the UBJSON format (see http://ubjson.org)
%
% examples:
% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],...
% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...
% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...
% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...
% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...
% 'SpecialData',[nan, inf, -inf]);
% saveubjson('jsonmesh',jsonmesh)
% saveubjson('jsonmesh',jsonmesh,'meshdata.ubj')
%
% license:
% BSD, see LICENSE_BSD.txt files for details
%
% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
if(nargin==1)
varname=inputname(1);
obj=rootname;
if(isempty(varname))
varname='root';
end
rootname=varname;
else
varname=inputname(2);
end
if(length(varargin)==1 && ischar(varargin{1}))
opt=struct('FileName',varargin{1});
else
opt=varargin2struct(varargin{:});
end
opt.IsOctave=exist('OCTAVE_VERSION','builtin');
rootisarray=0;
rootlevel=1;
forceroot=jsonopt('ForceRootName',0,opt);
if((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)
rootisarray=1;
rootlevel=0;
else
if(isempty(rootname))
rootname=varname;
end
end
if((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)
rootname='root';
end
json=obj2ubjson(rootname,obj,rootlevel,opt);
if(~rootisarray)
json=['{' json '}'];
end
jsonp=jsonopt('JSONP','',opt);
if(~isempty(jsonp))
json=[jsonp '(' json ')'];
end
% save to a file if FileName is set, suggested by Patrick Rapin
if(~isempty(jsonopt('FileName','',opt)))
fid = fopen(opt.FileName, 'wb');
fwrite(fid,json);
fclose(fid);
end
%%-------------------------------------------------------------------------
function txt=obj2ubjson(name,item,level,varargin)
if(iscell(item))
txt=cell2ubjson(name,item,level,varargin{:});
elseif(isstruct(item))
txt=struct2ubjson(name,item,level,varargin{:});
elseif(ischar(item))
txt=str2ubjson(name,item,level,varargin{:});
else
txt=mat2ubjson(name,item,level,varargin{:});
end
%%-------------------------------------------------------------------------
function txt=cell2ubjson(name,item,level,varargin)
txt='';
if(~iscell(item))
error('input is not a cell');
end
dim=size(item);
if(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now
item=reshape(item,dim(1),numel(item)/dim(1));
dim=size(item);
end
len=numel(item); % let's handle 1D cell first
if(len>1)
if(~isempty(name))
txt=[S_(checkname(name,varargin{:})) '[']; name='';
else
txt='[';
end
elseif(len==0)
if(~isempty(name))
txt=[S_(checkname(name,varargin{:})) 'Z']; name='';
else
txt='Z';
end
end
for j=1:dim(2)
if(dim(1)>1) txt=[txt '[']; end
for i=1:dim(1)
txt=[txt obj2ubjson(name,item{i,j},level+(len>1),varargin{:})];
end
if(dim(1)>1) txt=[txt ']']; end
end
if(len>1) txt=[txt ']']; end
%%-------------------------------------------------------------------------
function txt=struct2ubjson(name,item,level,varargin)
txt='';
if(~isstruct(item))
error('input is not a struct');
end
dim=size(item);
if(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now
item=reshape(item,dim(1),numel(item)/dim(1));
dim=size(item);
end
len=numel(item);
if(~isempty(name))
if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end
else
if(len>1) txt='['; end
end
for j=1:dim(2)
if(dim(1)>1) txt=[txt '[']; end
for i=1:dim(1)
names = fieldnames(item(i,j));
if(~isempty(name) && len==1)
txt=[txt S_(checkname(name,varargin{:})) '{'];
else
txt=[txt '{'];
end
if(~isempty(names))
for e=1:length(names)
txt=[txt obj2ubjson(names{e},getfield(item(i,j),...
names{e}),level+(dim(1)>1)+1+(len>1),varargin{:})];
end
end
txt=[txt '}'];
end
if(dim(1)>1) txt=[txt ']']; end
end
if(len>1) txt=[txt ']']; end
%%-------------------------------------------------------------------------
function txt=str2ubjson(name,item,level,varargin)
txt='';
if(~ischar(item))
error('input is not a string');
end
item=reshape(item, max(size(item),[1 0]));
len=size(item,1);
if(~isempty(name))
if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end
else
if(len>1) txt='['; end
end
isoct=jsonopt('IsOctave',0,varargin{:});
for e=1:len
val=item(e,:);
if(len==1)
obj=['' S_(checkname(name,varargin{:})) '' '',S_(val),''];
if(isempty(name)) obj=['',S_(val),'']; end
txt=[txt,'',obj];
else
txt=[txt,'',['',S_(val),'']];
end
end
if(len>1) txt=[txt ']']; end
%%-------------------------------------------------------------------------
function txt=mat2ubjson(name,item,level,varargin)
if(~isnumeric(item) && ~islogical(item))
error('input is not an array');
end
if(length(size(item))>2 || issparse(item) || ~isreal(item) || ...
isempty(item) || jsonopt('ArrayToStruct',0,varargin{:}))
cid=I_(uint32(max(size(item))));
if(isempty(name))
txt=['{' S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1)) ];
else
if(isempty(item))
txt=[S_(checkname(name,varargin{:})),'Z'];
return;
else
txt=[S_(checkname(name,varargin{:})),'{',S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1))];
end
end
else
if(isempty(name))
txt=matdata2ubjson(item,level+1,varargin{:});
else
if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)
numtxt=regexprep(regexprep(matdata2ubjson(item,level+1,varargin{:}),'^\[',''),']','');
txt=[S_(checkname(name,varargin{:})) numtxt];
else
txt=[S_(checkname(name,varargin{:})),matdata2ubjson(item,level+1,varargin{:})];
end
end
return;
end
if(issparse(item))
[ix,iy]=find(item);
data=full(item(find(item)));
if(~isreal(item))
data=[real(data(:)),imag(data(:))];
if(size(item,1)==1)
% Kludge to have data's 'transposedness' match item's.
% (Necessary for complex row vector handling below.)
data=data';
end
txt=[txt,S_('_ArrayIsComplex_'),'T'];
end
txt=[txt,S_('_ArrayIsSparse_'),'T'];
if(size(item,1)==1)
% Row vector, store only column indices.
txt=[txt,S_('_ArrayData_'),...
matdata2ubjson([iy(:),data'],level+2,varargin{:})];
elseif(size(item,2)==1)
% Column vector, store only row indices.
txt=[txt,S_('_ArrayData_'),...
matdata2ubjson([ix,data],level+2,varargin{:})];
else
% General case, store row and column indices.
txt=[txt,S_('_ArrayData_'),...
matdata2ubjson([ix,iy,data],level+2,varargin{:})];
end
else
if(isreal(item))
txt=[txt,S_('_ArrayData_'),...
matdata2ubjson(item(:)',level+2,varargin{:})];
else
txt=[txt,S_('_ArrayIsComplex_'),'T'];
txt=[txt,S_('_ArrayData_'),...
matdata2ubjson([real(item(:)) imag(item(:))],level+2,varargin{:})];
end
end
txt=[txt,'}'];
%%-------------------------------------------------------------------------
function txt=matdata2ubjson(mat,level,varargin)
if(isempty(mat))
txt='Z';
return;
end
if(size(mat,1)==1)
level=level-1;
end
type='';
hasnegtive=(mat<0);
if(isa(mat,'integer') || isinteger(mat) || (isfloat(mat) && all(mod(mat(:),1) == 0)))
if(isempty(hasnegtive))
if(max(mat(:))<=2^8)
type='U';
end
end
if(isempty(type))
% todo - need to consider negative ones separately
id= histc(abs(max(mat(:))),[0 2^7 2^15 2^31 2^63]);
if(isempty(find(id)))
error('high-precision data is not yet supported');
end
key='iIlL';
type=key(find(id));
end
txt=[I_a(mat(:),type,size(mat))];
elseif(islogical(mat))
logicalval='FT';
if(numel(mat)==1)
txt=logicalval(mat+1);
else
txt=['[$U#' I_a(size(mat),'l') typecast(swapbytes(uint8(mat(:)')),'uint8')];
end
else
if(numel(mat)==1)
txt=['[' D_(mat) ']'];
else
txt=D_a(mat(:),'D',size(mat));
end
end
%txt=regexprep(mat2str(mat),'\s+',',');
%txt=regexprep(txt,';',sprintf('],['));
% if(nargin>=2 && size(mat,1)>1)
% txt=regexprep(txt,'\[',[repmat(sprintf('\t'),1,level) '[']);
% end
if(any(isinf(mat(:))))
txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','"$1_Inf_"',varargin{:}));
end
if(any(isnan(mat(:))))
txt=regexprep(txt,'NaN',jsonopt('NaN','"_NaN_"',varargin{:}));
end
%%-------------------------------------------------------------------------
function newname=checkname(name,varargin)
isunpack=jsonopt('UnpackHex',1,varargin{:});
newname=name;
if(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))
return
end
if(isunpack)
isoct=jsonopt('IsOctave',0,varargin{:});
if(~isoct)
newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');
else
pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');
pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');
if(isempty(pos)) return; end
str0=name;
pos0=[0 pend(:)' length(name)];
newname='';
for i=1:length(pos)
newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];
end
if(pos(end)~=length(name))
newname=[newname str0(pos0(end-1)+1:pos0(end))];
end
end
end
%%-------------------------------------------------------------------------
function val=S_(str)
if(length(str)==1)
val=['C' str];
else
val=['S' I_(int32(length(str))) str];
end
%%-------------------------------------------------------------------------
function val=I_(num)
if(~isinteger(num))
error('input is not an integer');
end
if(num>=0 && num<255)
val=['U' data2byte(swapbytes(cast(num,'uint8')),'uint8')];
return;
end
key='iIlL';
cid={'int8','int16','int32','int64'};
for i=1:4
if((num>0 && num<2^(i*8-1)) || (num<0 && num>=-2^(i*8-1)))
val=[key(i) data2byte(swapbytes(cast(num,cid{i})),'uint8')];
return;
end
end
error('unsupported integer');
%%-------------------------------------------------------------------------
function val=D_(num)
if(~isfloat(num))
error('input is not a float');
end
if(isa(num,'single'))
val=['d' data2byte(num,'uint8')];
else
val=['D' data2byte(num,'uint8')];
end
%%-------------------------------------------------------------------------
function data=I_a(num,type,dim,format)
id=find(ismember('iUIlL',type));
if(id==0)
error('unsupported integer array');
end
% based on UBJSON specs, all integer types are stored in big endian format
if(id==1)
data=data2byte(swapbytes(int8(num)),'uint8');
blen=1;
elseif(id==2)
data=data2byte(swapbytes(uint8(num)),'uint8');
blen=1;
elseif(id==3)
data=data2byte(swapbytes(int16(num)),'uint8');
blen=2;
elseif(id==4)
data=data2byte(swapbytes(int32(num)),'uint8');
blen=4;
elseif(id==5)
data=data2byte(swapbytes(int64(num)),'uint8');
blen=8;
end
if(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))
format='opt';
end
if((nargin<4 || strcmp(format,'opt')) && numel(num)>1)
if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))
cid=I_(uint32(max(dim)));
data=['$' type '#' I_a(dim,cid(1)) data(:)'];
else
data=['$' type '#' I_(int32(numel(data)/blen)) data(:)'];
end
data=['[' data(:)'];
else
data=reshape(data,blen,numel(data)/blen);
data(2:blen+1,:)=data;
data(1,:)=type;
data=data(:)';
data=['[' data(:)' ']'];
end
%%-------------------------------------------------------------------------
function data=D_a(num,type,dim,format)
id=find(ismember('dD',type));
if(id==0)
error('unsupported float array');
end
if(id==1)
data=data2byte(single(num),'uint8');
elseif(id==2)
data=data2byte(double(num),'uint8');
end
if(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))
format='opt';
end
if((nargin<4 || strcmp(format,'opt')) && numel(num)>1)
if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))
cid=I_(uint32(max(dim)));
data=['$' type '#' I_a(dim,cid(1)) data(:)'];
else
data=['$' type '#' I_(int32(numel(data)/(id*4))) data(:)'];
end
data=['[' data];
else
data=reshape(data,(id*4),length(data)/(id*4));
data(2:(id*4+1),:)=data;
data(1,:)=type;
data=data(:)';
data=['[' data(:)' ']'];
end
%%-------------------------------------------------------------------------
function bytes=data2byte(varargin)
bytes=typecast(varargin{:});
bytes=bytes(:)';
|
github
|
zzlyw/machine-learning-exercises-master
|
submit.m
|
.m
|
machine-learning-exercises-master/machine-learning-ex8/ex8/submit.m
| 2,135 |
utf_8
|
eebb8c0a1db5a4df20b4c858603efad6
|
function submit()
addpath('./lib');
conf.assignmentSlug = 'anomaly-detection-and-recommender-systems';
conf.itemName = 'Anomaly Detection and Recommender Systems';
conf.partArrays = { ...
{ ...
'1', ...
{ 'estimateGaussian.m' }, ...
'Estimate Gaussian Parameters', ...
}, ...
{ ...
'2', ...
{ 'selectThreshold.m' }, ...
'Select Threshold', ...
}, ...
{ ...
'3', ...
{ 'cofiCostFunc.m' }, ...
'Collaborative Filtering Cost', ...
}, ...
{ ...
'4', ...
{ 'cofiCostFunc.m' }, ...
'Collaborative Filtering Gradient', ...
}, ...
{ ...
'5', ...
{ 'cofiCostFunc.m' }, ...
'Regularized Cost', ...
}, ...
{ ...
'6', ...
{ 'cofiCostFunc.m' }, ...
'Regularized Gradient', ...
}, ...
};
conf.output = @output;
submitWithConfiguration(conf);
end
function out = output(partId, auxstring)
% Random Test Cases
n_u = 3; n_m = 4; n = 5;
X = reshape(sin(1:n_m*n), n_m, n);
Theta = reshape(cos(1:n_u*n), n_u, n);
Y = reshape(sin(1:2:2*n_m*n_u), n_m, n_u);
R = Y > 0.5;
pval = [abs(Y(:)) ; 0.001; 1];
Y = (Y .* double(R)); % set 'Y' values to 0 for movies not reviewed
yval = [R(:) ; 1; 0];
params = [X(:); Theta(:)];
if partId == '1'
[mu sigma2] = estimateGaussian(X);
out = sprintf('%0.5f ', [mu(:); sigma2(:)]);
elseif partId == '2'
[bestEpsilon bestF1] = selectThreshold(yval, pval);
out = sprintf('%0.5f ', [bestEpsilon(:); bestF1(:)]);
elseif partId == '3'
[J] = cofiCostFunc(params, Y, R, n_u, n_m, ...
n, 0);
out = sprintf('%0.5f ', J(:));
elseif partId == '4'
[J, grad] = cofiCostFunc(params, Y, R, n_u, n_m, ...
n, 0);
out = sprintf('%0.5f ', grad(:));
elseif partId == '5'
[J] = cofiCostFunc(params, Y, R, n_u, n_m, ...
n, 1.5);
out = sprintf('%0.5f ', J(:));
elseif partId == '6'
[J, grad] = cofiCostFunc(params, Y, R, n_u, n_m, ...
n, 1.5);
out = sprintf('%0.5f ', grad(:));
end
end
|
github
|
zzlyw/machine-learning-exercises-master
|
submitWithConfiguration.m
|
.m
|
machine-learning-exercises-master/machine-learning-ex8/ex8/lib/submitWithConfiguration.m
| 5,562 |
utf_8
|
4ac719ea6570ac228ea6c7a9c919e3f5
|
function submitWithConfiguration(conf)
addpath('./lib/jsonlab');
parts = parts(conf);
fprintf('== Submitting solutions | %s...\n', conf.itemName);
tokenFile = 'token.mat';
if exist(tokenFile, 'file')
load(tokenFile);
[email token] = promptToken(email, token, tokenFile);
else
[email token] = promptToken('', '', tokenFile);
end
if isempty(token)
fprintf('!! Submission Cancelled\n');
return
end
try
response = submitParts(conf, email, token, parts);
catch
e = lasterror();
fprintf('\n!! Submission failed: %s\n', e.message);
fprintf('\n\nFunction: %s\nFileName: %s\nLineNumber: %d\n', ...
e.stack(1,1).name, e.stack(1,1).file, e.stack(1,1).line);
fprintf('\nPlease correct your code and resubmit.\n');
return
end
if isfield(response, 'errorMessage')
fprintf('!! Submission failed: %s\n', response.errorMessage);
elseif isfield(response, 'errorCode')
fprintf('!! Submission failed: %s\n', response.message);
else
showFeedback(parts, response);
save(tokenFile, 'email', 'token');
end
end
function [email token] = promptToken(email, existingToken, tokenFile)
if (~isempty(email) && ~isempty(existingToken))
prompt = sprintf( ...
'Use token from last successful submission (%s)? (Y/n): ', ...
email);
reenter = input(prompt, 's');
if (isempty(reenter) || reenter(1) == 'Y' || reenter(1) == 'y')
token = existingToken;
return;
else
delete(tokenFile);
end
end
email = input('Login (email address): ', 's');
token = input('Token: ', 's');
end
function isValid = isValidPartOptionIndex(partOptions, i)
isValid = (~isempty(i)) && (1 <= i) && (i <= numel(partOptions));
end
function response = submitParts(conf, email, token, parts)
body = makePostBody(conf, email, token, parts);
submissionUrl = submissionUrl();
responseBody = getResponse(submissionUrl, body);
jsonResponse = validateResponse(responseBody);
response = loadjson(jsonResponse);
end
function body = makePostBody(conf, email, token, parts)
bodyStruct.assignmentSlug = conf.assignmentSlug;
bodyStruct.submitterEmail = email;
bodyStruct.secret = token;
bodyStruct.parts = makePartsStruct(conf, parts);
opt.Compact = 1;
body = savejson('', bodyStruct, opt);
end
function partsStruct = makePartsStruct(conf, parts)
for part = parts
partId = part{:}.id;
fieldName = makeValidFieldName(partId);
outputStruct.output = conf.output(partId);
partsStruct.(fieldName) = outputStruct;
end
end
function [parts] = parts(conf)
parts = {};
for partArray = conf.partArrays
part.id = partArray{:}{1};
part.sourceFiles = partArray{:}{2};
part.name = partArray{:}{3};
parts{end + 1} = part;
end
end
function showFeedback(parts, response)
fprintf('== \n');
fprintf('== %43s | %9s | %-s\n', 'Part Name', 'Score', 'Feedback');
fprintf('== %43s | %9s | %-s\n', '---------', '-----', '--------');
for part = parts
score = '';
partFeedback = '';
partFeedback = response.partFeedbacks.(makeValidFieldName(part{:}.id));
partEvaluation = response.partEvaluations.(makeValidFieldName(part{:}.id));
score = sprintf('%d / %3d', partEvaluation.score, partEvaluation.maxScore);
fprintf('== %43s | %9s | %-s\n', part{:}.name, score, partFeedback);
end
evaluation = response.evaluation;
totalScore = sprintf('%d / %d', evaluation.score, evaluation.maxScore);
fprintf('== --------------------------------\n');
fprintf('== %43s | %9s | %-s\n', '', totalScore, '');
fprintf('== \n');
end
% use urlread or curl to send submit results to the grader and get a response
function response = getResponse(url, body)
% try using urlread() and a secure connection
params = {'jsonBody', body};
[response, success] = urlread(url, 'post', params);
if (success == 0)
% urlread didn't work, try curl & the peer certificate patch
if ispc
% testing note: use 'jsonBody =' for a test case
json_command = sprintf('echo jsonBody=%s | curl -k -X POST -d @- %s', body, url);
else
% it's linux/OS X, so use the other form
json_command = sprintf('echo ''jsonBody=%s'' | curl -k -X POST -d @- %s', body, url);
end
% get the response body for the peer certificate patch method
[code, response] = system(json_command);
% test the success code
if (code ~= 0)
fprintf('[error] submission with curl() was not successful\n');
end
end
end
% validate the grader's response
function response = validateResponse(resp)
% test if the response is json or an HTML page
isJson = length(resp) > 0 && resp(1) == '{';
isHtml = findstr(lower(resp), '<html');
if (isJson)
response = resp;
elseif (isHtml)
% the response is html, so it's probably an error message
printHTMLContents(resp);
error('Grader response is an HTML message');
else
error('Grader sent no response');
end
end
% parse a HTML response and print it's contents
function printHTMLContents(response)
strippedResponse = regexprep(response, '<[^>]+>', ' ');
strippedResponse = regexprep(strippedResponse, '[\t ]+', ' ');
fprintf(strippedResponse);
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Service configuration
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function submissionUrl = submissionUrl()
submissionUrl = 'https://www-origin.coursera.org/api/onDemandProgrammingImmediateFormSubmissions.v1';
end
|
github
|
zzlyw/machine-learning-exercises-master
|
savejson.m
|
.m
|
machine-learning-exercises-master/machine-learning-ex8/ex8/lib/jsonlab/savejson.m
| 17,462 |
utf_8
|
861b534fc35ffe982b53ca3ca83143bf
|
function json=savejson(rootname,obj,varargin)
%
% json=savejson(rootname,obj,filename)
% or
% json=savejson(rootname,obj,opt)
% json=savejson(rootname,obj,'param1',value1,'param2',value2,...)
%
% convert a MATLAB object (cell, struct or array) into a JSON (JavaScript
% Object Notation) string
%
% author: Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% created on 2011/09/09
%
% $Id: savejson.m 460 2015-01-03 00:30:45Z fangq $
%
% input:
% rootname: the name of the root-object, when set to '', the root name
% is ignored, however, when opt.ForceRootName is set to 1 (see below),
% the MATLAB variable name will be used as the root name.
% obj: a MATLAB object (array, cell, cell array, struct, struct array).
% filename: a string for the file name to save the output JSON data.
% opt: a struct for additional options, ignore to use default values.
% opt can have the following fields (first in [.|.] is the default)
%
% opt.FileName [''|string]: a file name to save the output JSON data
% opt.FloatFormat ['%.10g'|string]: format to show each numeric element
% of a 1D/2D array;
% opt.ArrayIndent [1|0]: if 1, output explicit data array with
% precedent indentation; if 0, no indentation
% opt.ArrayToStruct[0|1]: when set to 0, savejson outputs 1D/2D
% array in JSON array format; if sets to 1, an
% array will be shown as a struct with fields
% "_ArrayType_", "_ArraySize_" and "_ArrayData_"; for
% sparse arrays, the non-zero elements will be
% saved to _ArrayData_ field in triplet-format i.e.
% (ix,iy,val) and "_ArrayIsSparse_" will be added
% with a value of 1; for a complex array, the
% _ArrayData_ array will include two columns
% (4 for sparse) to record the real and imaginary
% parts, and also "_ArrayIsComplex_":1 is added.
% opt.ParseLogical [0|1]: if this is set to 1, logical array elem
% will use true/false rather than 1/0.
% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single
% numerical element will be shown without a square
% bracket, unless it is the root object; if 0, square
% brackets are forced for any numerical arrays.
% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, savejson
% will use the name of the passed obj variable as the
% root object name; if obj is an expression and
% does not have a name, 'root' will be used; if this
% is set to 0 and rootname is empty, the root level
% will be merged down to the lower level.
% opt.Inf ['"$1_Inf_"'|string]: a customized regular expression pattern
% to represent +/-Inf. The matched pattern is '([-+]*)Inf'
% and $1 represents the sign. For those who want to use
% 1e999 to represent Inf, they can set opt.Inf to '$11e999'
% opt.NaN ['"_NaN_"'|string]: a customized regular expression pattern
% to represent NaN
% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),
% for example, if opt.JSONP='foo', the JSON data is
% wrapped inside a function call as 'foo(...);'
% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson
% back to the string form
% opt.SaveBinary [0|1]: 1 - save the JSON file in binary mode; 0 - text mode.
% opt.Compact [0|1]: 1- out compact JSON format (remove all newlines and tabs)
%
% opt can be replaced by a list of ('param',value) pairs. The param
% string is equivallent to a field in opt and is case sensitive.
% output:
% json: a string in the JSON format (see http://json.org)
%
% examples:
% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],...
% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...
% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...
% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...
% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...
% 'SpecialData',[nan, inf, -inf]);
% savejson('jmesh',jsonmesh)
% savejson('',jsonmesh,'ArrayIndent',0,'FloatFormat','\t%.5g')
%
% license:
% BSD, see LICENSE_BSD.txt files for details
%
% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
if(nargin==1)
varname=inputname(1);
obj=rootname;
if(isempty(varname))
varname='root';
end
rootname=varname;
else
varname=inputname(2);
end
if(length(varargin)==1 && ischar(varargin{1}))
opt=struct('FileName',varargin{1});
else
opt=varargin2struct(varargin{:});
end
opt.IsOctave=exist('OCTAVE_VERSION','builtin');
rootisarray=0;
rootlevel=1;
forceroot=jsonopt('ForceRootName',0,opt);
if((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)
rootisarray=1;
rootlevel=0;
else
if(isempty(rootname))
rootname=varname;
end
end
if((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)
rootname='root';
end
whitespaces=struct('tab',sprintf('\t'),'newline',sprintf('\n'),'sep',sprintf(',\n'));
if(jsonopt('Compact',0,opt)==1)
whitespaces=struct('tab','','newline','','sep',',');
end
if(~isfield(opt,'whitespaces_'))
opt.whitespaces_=whitespaces;
end
nl=whitespaces.newline;
json=obj2json(rootname,obj,rootlevel,opt);
if(rootisarray)
json=sprintf('%s%s',json,nl);
else
json=sprintf('{%s%s%s}\n',nl,json,nl);
end
jsonp=jsonopt('JSONP','',opt);
if(~isempty(jsonp))
json=sprintf('%s(%s);%s',jsonp,json,nl);
end
% save to a file if FileName is set, suggested by Patrick Rapin
if(~isempty(jsonopt('FileName','',opt)))
if(jsonopt('SaveBinary',0,opt)==1)
fid = fopen(opt.FileName, 'wb');
fwrite(fid,json);
else
fid = fopen(opt.FileName, 'wt');
fwrite(fid,json,'char');
end
fclose(fid);
end
%%-------------------------------------------------------------------------
function txt=obj2json(name,item,level,varargin)
if(iscell(item))
txt=cell2json(name,item,level,varargin{:});
elseif(isstruct(item))
txt=struct2json(name,item,level,varargin{:});
elseif(ischar(item))
txt=str2json(name,item,level,varargin{:});
else
txt=mat2json(name,item,level,varargin{:});
end
%%-------------------------------------------------------------------------
function txt=cell2json(name,item,level,varargin)
txt='';
if(~iscell(item))
error('input is not a cell');
end
dim=size(item);
if(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now
item=reshape(item,dim(1),numel(item)/dim(1));
dim=size(item);
end
len=numel(item);
ws=jsonopt('whitespaces_',struct('tab',sprintf('\t'),'newline',sprintf('\n'),'sep',sprintf(',\n')),varargin{:});
padding0=repmat(ws.tab,1,level);
padding2=repmat(ws.tab,1,level+1);
nl=ws.newline;
if(len>1)
if(~isempty(name))
txt=sprintf('%s"%s": [%s',padding0, checkname(name,varargin{:}),nl); name='';
else
txt=sprintf('%s[%s',padding0,nl);
end
elseif(len==0)
if(~isempty(name))
txt=sprintf('%s"%s": []',padding0, checkname(name,varargin{:})); name='';
else
txt=sprintf('%s[]',padding0);
end
end
for j=1:dim(2)
if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end
for i=1:dim(1)
txt=sprintf('%s%s',txt,obj2json(name,item{i,j},level+(dim(1)>1)+1,varargin{:}));
if(i<dim(1)) txt=sprintf('%s%s',txt,sprintf(',%s',nl)); end
end
if(dim(1)>1) txt=sprintf('%s%s%s]',txt,nl,padding2); end
if(j<dim(2)) txt=sprintf('%s%s',txt,sprintf(',%s',nl)); end
%if(j==dim(2)) txt=sprintf('%s%s',txt,sprintf(',%s',nl)); end
end
if(len>1) txt=sprintf('%s%s%s]',txt,nl,padding0); end
%%-------------------------------------------------------------------------
function txt=struct2json(name,item,level,varargin)
txt='';
if(~isstruct(item))
error('input is not a struct');
end
dim=size(item);
if(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now
item=reshape(item,dim(1),numel(item)/dim(1));
dim=size(item);
end
len=numel(item);
ws=struct('tab',sprintf('\t'),'newline',sprintf('\n'));
ws=jsonopt('whitespaces_',ws,varargin{:});
padding0=repmat(ws.tab,1,level);
padding2=repmat(ws.tab,1,level+1);
padding1=repmat(ws.tab,1,level+(dim(1)>1)+(len>1));
nl=ws.newline;
if(~isempty(name))
if(len>1) txt=sprintf('%s"%s": [%s',padding0,checkname(name,varargin{:}),nl); end
else
if(len>1) txt=sprintf('%s[%s',padding0,nl); end
end
for j=1:dim(2)
if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end
for i=1:dim(1)
names = fieldnames(item(i,j));
if(~isempty(name) && len==1)
txt=sprintf('%s%s"%s": {%s',txt,padding1, checkname(name,varargin{:}),nl);
else
txt=sprintf('%s%s{%s',txt,padding1,nl);
end
if(~isempty(names))
for e=1:length(names)
txt=sprintf('%s%s',txt,obj2json(names{e},getfield(item(i,j),...
names{e}),level+(dim(1)>1)+1+(len>1),varargin{:}));
if(e<length(names)) txt=sprintf('%s%s',txt,','); end
txt=sprintf('%s%s',txt,nl);
end
end
txt=sprintf('%s%s}',txt,padding1);
if(i<dim(1)) txt=sprintf('%s%s',txt,sprintf(',%s',nl)); end
end
if(dim(1)>1) txt=sprintf('%s%s%s]',txt,nl,padding2); end
if(j<dim(2)) txt=sprintf('%s%s',txt,sprintf(',%s',nl)); end
end
if(len>1) txt=sprintf('%s%s%s]',txt,nl,padding0); end
%%-------------------------------------------------------------------------
function txt=str2json(name,item,level,varargin)
txt='';
if(~ischar(item))
error('input is not a string');
end
item=reshape(item, max(size(item),[1 0]));
len=size(item,1);
ws=struct('tab',sprintf('\t'),'newline',sprintf('\n'),'sep',sprintf(',\n'));
ws=jsonopt('whitespaces_',ws,varargin{:});
padding1=repmat(ws.tab,1,level);
padding0=repmat(ws.tab,1,level+1);
nl=ws.newline;
sep=ws.sep;
if(~isempty(name))
if(len>1) txt=sprintf('%s"%s": [%s',padding1,checkname(name,varargin{:}),nl); end
else
if(len>1) txt=sprintf('%s[%s',padding1,nl); end
end
isoct=jsonopt('IsOctave',0,varargin{:});
for e=1:len
if(isoct)
val=regexprep(item(e,:),'\\','\\');
val=regexprep(val,'"','\"');
val=regexprep(val,'^"','\"');
else
val=regexprep(item(e,:),'\\','\\\\');
val=regexprep(val,'"','\\"');
val=regexprep(val,'^"','\\"');
end
val=escapejsonstring(val);
if(len==1)
obj=['"' checkname(name,varargin{:}) '": ' '"',val,'"'];
if(isempty(name)) obj=['"',val,'"']; end
txt=sprintf('%s%s%s%s',txt,padding1,obj);
else
txt=sprintf('%s%s%s%s',txt,padding0,['"',val,'"']);
end
if(e==len) sep=''; end
txt=sprintf('%s%s',txt,sep);
end
if(len>1) txt=sprintf('%s%s%s%s',txt,nl,padding1,']'); end
%%-------------------------------------------------------------------------
function txt=mat2json(name,item,level,varargin)
if(~isnumeric(item) && ~islogical(item))
error('input is not an array');
end
ws=struct('tab',sprintf('\t'),'newline',sprintf('\n'),'sep',sprintf(',\n'));
ws=jsonopt('whitespaces_',ws,varargin{:});
padding1=repmat(ws.tab,1,level);
padding0=repmat(ws.tab,1,level+1);
nl=ws.newline;
sep=ws.sep;
if(length(size(item))>2 || issparse(item) || ~isreal(item) || ...
isempty(item) ||jsonopt('ArrayToStruct',0,varargin{:}))
if(isempty(name))
txt=sprintf('%s{%s%s"_ArrayType_": "%s",%s%s"_ArraySize_": %s,%s',...
padding1,nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\s+',','),nl);
else
txt=sprintf('%s"%s": {%s%s"_ArrayType_": "%s",%s%s"_ArraySize_": %s,%s',...
padding1,checkname(name,varargin{:}),nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\s+',','),nl);
end
else
if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1 && level>0)
numtxt=regexprep(regexprep(matdata2json(item,level+1,varargin{:}),'^\[',''),']','');
else
numtxt=matdata2json(item,level+1,varargin{:});
end
if(isempty(name))
txt=sprintf('%s%s',padding1,numtxt);
else
if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)
txt=sprintf('%s"%s": %s',padding1,checkname(name,varargin{:}),numtxt);
else
txt=sprintf('%s"%s": %s',padding1,checkname(name,varargin{:}),numtxt);
end
end
return;
end
dataformat='%s%s%s%s%s';
if(issparse(item))
[ix,iy]=find(item);
data=full(item(find(item)));
if(~isreal(item))
data=[real(data(:)),imag(data(:))];
if(size(item,1)==1)
% Kludge to have data's 'transposedness' match item's.
% (Necessary for complex row vector handling below.)
data=data';
end
txt=sprintf(dataformat,txt,padding0,'"_ArrayIsComplex_": ','1', sep);
end
txt=sprintf(dataformat,txt,padding0,'"_ArrayIsSparse_": ','1', sep);
if(size(item,1)==1)
% Row vector, store only column indices.
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([iy(:),data'],level+2,varargin{:}), nl);
elseif(size(item,2)==1)
% Column vector, store only row indices.
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([ix,data],level+2,varargin{:}), nl);
else
% General case, store row and column indices.
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([ix,iy,data],level+2,varargin{:}), nl);
end
else
if(isreal(item))
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json(item(:)',level+2,varargin{:}), nl);
else
txt=sprintf(dataformat,txt,padding0,'"_ArrayIsComplex_": ','1', sep);
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([real(item(:)) imag(item(:))],level+2,varargin{:}), nl);
end
end
txt=sprintf('%s%s%s',txt,padding1,'}');
%%-------------------------------------------------------------------------
function txt=matdata2json(mat,level,varargin)
ws=struct('tab',sprintf('\t'),'newline',sprintf('\n'),'sep',sprintf(',\n'));
ws=jsonopt('whitespaces_',ws,varargin{:});
tab=ws.tab;
nl=ws.newline;
if(size(mat,1)==1)
pre='';
post='';
level=level-1;
else
pre=sprintf('[%s',nl);
post=sprintf('%s%s]',nl,repmat(tab,1,level-1));
end
if(isempty(mat))
txt='null';
return;
end
floatformat=jsonopt('FloatFormat','%.10g',varargin{:});
%if(numel(mat)>1)
formatstr=['[' repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf('],%s',nl)]];
%else
% formatstr=[repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf(',\n')]];
%end
if(nargin>=2 && size(mat,1)>1 && jsonopt('ArrayIndent',1,varargin{:})==1)
formatstr=[repmat(tab,1,level) formatstr];
end
txt=sprintf(formatstr,mat');
txt(end-length(nl):end)=[];
if(islogical(mat) && jsonopt('ParseLogical',0,varargin{:})==1)
txt=regexprep(txt,'1','true');
txt=regexprep(txt,'0','false');
end
%txt=regexprep(mat2str(mat),'\s+',',');
%txt=regexprep(txt,';',sprintf('],\n['));
% if(nargin>=2 && size(mat,1)>1)
% txt=regexprep(txt,'\[',[repmat(sprintf('\t'),1,level) '[']);
% end
txt=[pre txt post];
if(any(isinf(mat(:))))
txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','"$1_Inf_"',varargin{:}));
end
if(any(isnan(mat(:))))
txt=regexprep(txt,'NaN',jsonopt('NaN','"_NaN_"',varargin{:}));
end
%%-------------------------------------------------------------------------
function newname=checkname(name,varargin)
isunpack=jsonopt('UnpackHex',1,varargin{:});
newname=name;
if(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))
return
end
if(isunpack)
isoct=jsonopt('IsOctave',0,varargin{:});
if(~isoct)
newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');
else
pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');
pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');
if(isempty(pos)) return; end
str0=name;
pos0=[0 pend(:)' length(name)];
newname='';
for i=1:length(pos)
newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];
end
if(pos(end)~=length(name))
newname=[newname str0(pos0(end-1)+1:pos0(end))];
end
end
end
%%-------------------------------------------------------------------------
function newstr=escapejsonstring(str)
newstr=str;
isoct=exist('OCTAVE_VERSION','builtin');
if(isoct)
vv=sscanf(OCTAVE_VERSION,'%f');
if(vv(1)>=3.8) isoct=0; end
end
if(isoct)
escapechars={'\a','\f','\n','\r','\t','\v'};
for i=1:length(escapechars);
newstr=regexprep(newstr,escapechars{i},escapechars{i});
end
else
escapechars={'\a','\b','\f','\n','\r','\t','\v'};
for i=1:length(escapechars);
newstr=regexprep(newstr,escapechars{i},regexprep(escapechars{i},'\\','\\\\'));
end
end
|
github
|
zzlyw/machine-learning-exercises-master
|
loadjson.m
|
.m
|
machine-learning-exercises-master/machine-learning-ex8/ex8/lib/jsonlab/loadjson.m
| 18,732 |
ibm852
|
ab98cf173af2d50bbe8da4d6db252a20
|
function data = loadjson(fname,varargin)
%
% data=loadjson(fname,opt)
% or
% data=loadjson(fname,'param1',value1,'param2',value2,...)
%
% parse a JSON (JavaScript Object Notation) file or string
%
% authors:Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% created on 2011/09/09, including previous works from
%
% Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713
% created on 2009/11/02
% François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393
% created on 2009/03/22
% Joel Feenstra:
% http://www.mathworks.com/matlabcentral/fileexchange/20565
% created on 2008/07/03
%
% $Id: loadjson.m 460 2015-01-03 00:30:45Z fangq $
%
% input:
% fname: input file name, if fname contains "{}" or "[]", fname
% will be interpreted as a JSON string
% opt: a struct to store parsing options, opt can be replaced by
% a list of ('param',value) pairs - the param string is equivallent
% to a field in opt. opt can have the following
% fields (first in [.|.] is the default)
%
% opt.SimplifyCell [0|1]: if set to 1, loadjson will call cell2mat
% for each element of the JSON data, and group
% arrays based on the cell2mat rules.
% opt.FastArrayParser [1|0 or integer]: if set to 1, use a
% speed-optimized array parser when loading an
% array object. The fast array parser may
% collapse block arrays into a single large
% array similar to rules defined in cell2mat; 0 to
% use a legacy parser; if set to a larger-than-1
% value, this option will specify the minimum
% dimension to enable the fast array parser. For
% example, if the input is a 3D array, setting
% FastArrayParser to 1 will return a 3D array;
% setting to 2 will return a cell array of 2D
% arrays; setting to 3 will return to a 2D cell
% array of 1D vectors; setting to 4 will return a
% 3D cell array.
% opt.ShowProgress [0|1]: if set to 1, loadjson displays a progress bar.
%
% output:
% dat: a cell array, where {...} blocks are converted into cell arrays,
% and [...] are converted to arrays
%
% examples:
% dat=loadjson('{"obj":{"string":"value","array":[1,2,3]}}')
% dat=loadjson(['examples' filesep 'example1.json'])
% dat=loadjson(['examples' filesep 'example1.json'],'SimplifyCell',1)
%
% license:
% BSD, see LICENSE_BSD.txt files for details
%
% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
global pos inStr len esc index_esc len_esc isoct arraytoken
if(regexp(fname,'[\{\}\]\[]','once'))
string=fname;
elseif(exist(fname,'file'))
fid = fopen(fname,'rb');
string = fread(fid,inf,'uint8=>char')';
fclose(fid);
else
error('input file does not exist');
end
pos = 1; len = length(string); inStr = string;
isoct=exist('OCTAVE_VERSION','builtin');
arraytoken=find(inStr=='[' | inStr==']' | inStr=='"');
jstr=regexprep(inStr,'\\\\',' ');
escquote=regexp(jstr,'\\"');
arraytoken=sort([arraytoken escquote]);
% String delimiters and escape chars identified to improve speed:
esc = find(inStr=='"' | inStr=='\' ); % comparable to: regexp(inStr, '["\\]');
index_esc = 1; len_esc = length(esc);
opt=varargin2struct(varargin{:});
if(jsonopt('ShowProgress',0,opt)==1)
opt.progressbar_=waitbar(0,'loading ...');
end
jsoncount=1;
while pos <= len
switch(next_char)
case '{'
data{jsoncount} = parse_object(opt);
case '['
data{jsoncount} = parse_array(opt);
otherwise
error_pos('Outer level structure must be an object or an array');
end
jsoncount=jsoncount+1;
end % while
jsoncount=length(data);
if(jsoncount==1 && iscell(data))
data=data{1};
end
if(~isempty(data))
if(isstruct(data)) % data can be a struct array
data=jstruct2array(data);
elseif(iscell(data))
data=jcell2array(data);
end
end
if(isfield(opt,'progressbar_'))
close(opt.progressbar_);
end
%%
function newdata=jcell2array(data)
len=length(data);
newdata=data;
for i=1:len
if(isstruct(data{i}))
newdata{i}=jstruct2array(data{i});
elseif(iscell(data{i}))
newdata{i}=jcell2array(data{i});
end
end
%%-------------------------------------------------------------------------
function newdata=jstruct2array(data)
fn=fieldnames(data);
newdata=data;
len=length(data);
for i=1:length(fn) % depth-first
for j=1:len
if(isstruct(getfield(data(j),fn{i})))
newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));
end
end
end
if(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))
newdata=cell(len,1);
for j=1:len
ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);
iscpx=0;
if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))
if(data(j).x0x5F_ArrayIsComplex_)
iscpx=1;
end
end
if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))
if(data(j).x0x5F_ArrayIsSparse_)
if(~isempty(strmatch('x0x5F_ArraySize_',fn)))
dim=data(j).x0x5F_ArraySize_;
if(iscpx && size(ndata,2)==4-any(dim==1))
ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));
end
if isempty(ndata)
% All-zeros sparse
ndata=sparse(dim(1),prod(dim(2:end)));
elseif dim(1)==1
% Sparse row vector
ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));
elseif dim(2)==1
% Sparse column vector
ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));
else
% Generic sparse array.
ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));
end
else
if(iscpx && size(ndata,2)==4)
ndata(:,3)=complex(ndata(:,3),ndata(:,4));
end
ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));
end
end
elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))
if(iscpx && size(ndata,2)==2)
ndata=complex(ndata(:,1),ndata(:,2));
end
ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);
end
newdata{j}=ndata;
end
if(len==1)
newdata=newdata{1};
end
end
%%-------------------------------------------------------------------------
function object = parse_object(varargin)
parse_char('{');
object = [];
if next_char ~= '}'
while 1
str = parseStr(varargin{:});
if isempty(str)
error_pos('Name of value at position %d cannot be empty');
end
parse_char(':');
val = parse_value(varargin{:});
eval( sprintf( 'object.%s = val;', valid_field(str) ) );
if next_char == '}'
break;
end
parse_char(',');
end
end
parse_char('}');
%%-------------------------------------------------------------------------
function object = parse_array(varargin) % JSON array is written in row-major order
global pos inStr isoct
parse_char('[');
object = cell(0, 1);
dim2=[];
arraydepth=jsonopt('JSONLAB_ArrayDepth_',1,varargin{:});
pbar=jsonopt('progressbar_',-1,varargin{:});
if next_char ~= ']'
if(jsonopt('FastArrayParser',1,varargin{:})>=1 && arraydepth>=jsonopt('FastArrayParser',1,varargin{:}))
[endpos, e1l, e1r, maxlevel]=matching_bracket(inStr,pos);
arraystr=['[' inStr(pos:endpos)];
arraystr=regexprep(arraystr,'"_NaN_"','NaN');
arraystr=regexprep(arraystr,'"([-+]*)_Inf_"','$1Inf');
arraystr(arraystr==sprintf('\n'))=[];
arraystr(arraystr==sprintf('\r'))=[];
%arraystr=regexprep(arraystr,'\s*,',','); % this is slow,sometimes needed
if(~isempty(e1l) && ~isempty(e1r)) % the array is in 2D or higher D
astr=inStr((e1l+1):(e1r-1));
astr=regexprep(astr,'"_NaN_"','NaN');
astr=regexprep(astr,'"([-+]*)_Inf_"','$1Inf');
astr(astr==sprintf('\n'))=[];
astr(astr==sprintf('\r'))=[];
astr(astr==' ')='';
if(isempty(find(astr=='[', 1))) % array is 2D
dim2=length(sscanf(astr,'%f,',[1 inf]));
end
else % array is 1D
astr=arraystr(2:end-1);
astr(astr==' ')='';
[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',[1,inf]);
if(nextidx>=length(astr)-1)
object=obj;
pos=endpos;
parse_char(']');
return;
end
end
if(~isempty(dim2))
astr=arraystr;
astr(astr=='[')='';
astr(astr==']')='';
astr(astr==' ')='';
[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',inf);
if(nextidx>=length(astr)-1)
object=reshape(obj,dim2,numel(obj)/dim2)';
pos=endpos;
parse_char(']');
if(pbar>0)
waitbar(pos/length(inStr),pbar,'loading ...');
end
return;
end
end
arraystr=regexprep(arraystr,'\]\s*,','];');
else
arraystr='[';
end
try
if(isoct && regexp(arraystr,'"','once'))
error('Octave eval can produce empty cells for JSON-like input');
end
object=eval(arraystr);
pos=endpos;
catch
while 1
newopt=varargin2struct(varargin{:},'JSONLAB_ArrayDepth_',arraydepth+1);
val = parse_value(newopt);
object{end+1} = val;
if next_char == ']'
break;
end
parse_char(',');
end
end
end
if(jsonopt('SimplifyCell',0,varargin{:})==1)
try
oldobj=object;
object=cell2mat(object')';
if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)
object=oldobj;
elseif(size(object,1)>1 && ndims(object)==2)
object=object';
end
catch
end
end
parse_char(']');
if(pbar>0)
waitbar(pos/length(inStr),pbar,'loading ...');
end
%%-------------------------------------------------------------------------
function parse_char(c)
global pos inStr len
skip_whitespace;
if pos > len || inStr(pos) ~= c
error_pos(sprintf('Expected %c at position %%d', c));
else
pos = pos + 1;
skip_whitespace;
end
%%-------------------------------------------------------------------------
function c = next_char
global pos inStr len
skip_whitespace;
if pos > len
c = [];
else
c = inStr(pos);
end
%%-------------------------------------------------------------------------
function skip_whitespace
global pos inStr len
while pos <= len && isspace(inStr(pos))
pos = pos + 1;
end
%%-------------------------------------------------------------------------
function str = parseStr(varargin)
global pos inStr len esc index_esc len_esc
% len, ns = length(inStr), keyboard
if inStr(pos) ~= '"'
error_pos('String starting with " expected at position %d');
else
pos = pos + 1;
end
str = '';
while pos <= len
while index_esc <= len_esc && esc(index_esc) < pos
index_esc = index_esc + 1;
end
if index_esc > len_esc
str = [str inStr(pos:len)];
pos = len + 1;
break;
else
str = [str inStr(pos:esc(index_esc)-1)];
pos = esc(index_esc);
end
nstr = length(str); switch inStr(pos)
case '"'
pos = pos + 1;
if(~isempty(str))
if(strcmp(str,'_Inf_'))
str=Inf;
elseif(strcmp(str,'-_Inf_'))
str=-Inf;
elseif(strcmp(str,'_NaN_'))
str=NaN;
end
end
return;
case '\'
if pos+1 > len
error_pos('End of file reached right after escape character');
end
pos = pos + 1;
switch inStr(pos)
case {'"' '\' '/'}
str(nstr+1) = inStr(pos);
pos = pos + 1;
case {'b' 'f' 'n' 'r' 't'}
str(nstr+1) = sprintf(['\' inStr(pos)]);
pos = pos + 1;
case 'u'
if pos+4 > len
error_pos('End of file reached in escaped unicode character');
end
str(nstr+(1:6)) = inStr(pos-1:pos+4);
pos = pos + 5;
end
otherwise % should never happen
str(nstr+1) = inStr(pos), keyboard
pos = pos + 1;
end
end
error_pos('End of file while expecting end of inStr');
%%-------------------------------------------------------------------------
function num = parse_number(varargin)
global pos inStr len isoct
currstr=inStr(pos:end);
numstr=0;
if(isoct~=0)
numstr=regexp(currstr,'^\s*-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+\-]?\d+)?','end');
[num, one] = sscanf(currstr, '%f', 1);
delta=numstr+1;
else
[num, one, err, delta] = sscanf(currstr, '%f', 1);
if ~isempty(err)
error_pos('Error reading number at position %d');
end
end
pos = pos + delta-1;
%%-------------------------------------------------------------------------
function val = parse_value(varargin)
global pos inStr len
true = 1; false = 0;
pbar=jsonopt('progressbar_',-1,varargin{:});
if(pbar>0)
waitbar(pos/len,pbar,'loading ...');
end
switch(inStr(pos))
case '"'
val = parseStr(varargin{:});
return;
case '['
val = parse_array(varargin{:});
return;
case '{'
val = parse_object(varargin{:});
if isstruct(val)
if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))
val=jstruct2array(val);
end
elseif isempty(val)
val = struct;
end
return;
case {'-','0','1','2','3','4','5','6','7','8','9'}
val = parse_number(varargin{:});
return;
case 't'
if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'true')
val = true;
pos = pos + 4;
return;
end
case 'f'
if pos+4 <= len && strcmpi(inStr(pos:pos+4), 'false')
val = false;
pos = pos + 5;
return;
end
case 'n'
if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'null')
val = [];
pos = pos + 4;
return;
end
end
error_pos('Value expected at position %d');
%%-------------------------------------------------------------------------
function error_pos(msg)
global pos inStr len
poShow = max(min([pos-15 pos-1 pos pos+20],len),1);
if poShow(3) == poShow(2)
poShow(3:4) = poShow(2)+[0 -1]; % display nothing after
end
msg = [sprintf(msg, pos) ': ' ...
inStr(poShow(1):poShow(2)) '<error>' inStr(poShow(3):poShow(4)) ];
error( ['JSONparser:invalidFormat: ' msg] );
%%-------------------------------------------------------------------------
function str = valid_field(str)
global isoct
% From MATLAB doc: field names must begin with a letter, which may be
% followed by any combination of letters, digits, and underscores.
% Invalid characters will be converted to underscores, and the prefix
% "x0x[Hex code]_" will be added if the first character is not a letter.
pos=regexp(str,'^[^A-Za-z]','once');
if(~isempty(pos))
if(~isoct)
str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');
else
str=sprintf('x0x%X_%s',char(str(1)),str(2:end));
end
end
if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end
if(~isoct)
str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');
else
pos=regexp(str,'[^0-9A-Za-z_]');
if(isempty(pos)) return; end
str0=str;
pos0=[0 pos(:)' length(str)];
str='';
for i=1:length(pos)
str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];
end
if(pos(end)~=length(str))
str=[str str0(pos0(end-1)+1:pos0(end))];
end
end
%str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';
%%-------------------------------------------------------------------------
function endpos = matching_quote(str,pos)
len=length(str);
while(pos<len)
if(str(pos)=='"')
if(~(pos>1 && str(pos-1)=='\'))
endpos=pos;
return;
end
end
pos=pos+1;
end
error('unmatched quotation mark');
%%-------------------------------------------------------------------------
function [endpos, e1l, e1r, maxlevel] = matching_bracket(str,pos)
global arraytoken
level=1;
maxlevel=level;
endpos=0;
bpos=arraytoken(arraytoken>=pos);
tokens=str(bpos);
len=length(tokens);
pos=1;
e1l=[];
e1r=[];
while(pos<=len)
c=tokens(pos);
if(c==']')
level=level-1;
if(isempty(e1r)) e1r=bpos(pos); end
if(level==0)
endpos=bpos(pos);
return
end
end
if(c=='[')
if(isempty(e1l)) e1l=bpos(pos); end
level=level+1;
maxlevel=max(maxlevel,level);
end
if(c=='"')
pos=matching_quote(tokens,pos+1);
end
pos=pos+1;
end
if(endpos==0)
error('unmatched "]"');
end
|
github
|
zzlyw/machine-learning-exercises-master
|
loadubjson.m
|
.m
|
machine-learning-exercises-master/machine-learning-ex8/ex8/lib/jsonlab/loadubjson.m
| 15,574 |
utf_8
|
5974e78e71b81b1e0f76123784b951a4
|
function data = loadubjson(fname,varargin)
%
% data=loadubjson(fname,opt)
% or
% data=loadubjson(fname,'param1',value1,'param2',value2,...)
%
% parse a JSON (JavaScript Object Notation) file or string
%
% authors:Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% created on 2013/08/01
%
% $Id: loadubjson.m 460 2015-01-03 00:30:45Z fangq $
%
% input:
% fname: input file name, if fname contains "{}" or "[]", fname
% will be interpreted as a UBJSON string
% opt: a struct to store parsing options, opt can be replaced by
% a list of ('param',value) pairs - the param string is equivallent
% to a field in opt. opt can have the following
% fields (first in [.|.] is the default)
%
% opt.SimplifyCell [0|1]: if set to 1, loadubjson will call cell2mat
% for each element of the JSON data, and group
% arrays based on the cell2mat rules.
% opt.IntEndian [B|L]: specify the endianness of the integer fields
% in the UBJSON input data. B - Big-Endian format for
% integers (as required in the UBJSON specification);
% L - input integer fields are in Little-Endian order.
%
% output:
% dat: a cell array, where {...} blocks are converted into cell arrays,
% and [...] are converted to arrays
%
% examples:
% obj=struct('string','value','array',[1 2 3]);
% ubjdata=saveubjson('obj',obj);
% dat=loadubjson(ubjdata)
% dat=loadubjson(['examples' filesep 'example1.ubj'])
% dat=loadubjson(['examples' filesep 'example1.ubj'],'SimplifyCell',1)
%
% license:
% BSD, see LICENSE_BSD.txt files for details
%
% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
global pos inStr len esc index_esc len_esc isoct arraytoken fileendian systemendian
if(regexp(fname,'[\{\}\]\[]','once'))
string=fname;
elseif(exist(fname,'file'))
fid = fopen(fname,'rb');
string = fread(fid,inf,'uint8=>char')';
fclose(fid);
else
error('input file does not exist');
end
pos = 1; len = length(string); inStr = string;
isoct=exist('OCTAVE_VERSION','builtin');
arraytoken=find(inStr=='[' | inStr==']' | inStr=='"');
jstr=regexprep(inStr,'\\\\',' ');
escquote=regexp(jstr,'\\"');
arraytoken=sort([arraytoken escquote]);
% String delimiters and escape chars identified to improve speed:
esc = find(inStr=='"' | inStr=='\' ); % comparable to: regexp(inStr, '["\\]');
index_esc = 1; len_esc = length(esc);
opt=varargin2struct(varargin{:});
fileendian=upper(jsonopt('IntEndian','B',opt));
[os,maxelem,systemendian]=computer;
jsoncount=1;
while pos <= len
switch(next_char)
case '{'
data{jsoncount} = parse_object(opt);
case '['
data{jsoncount} = parse_array(opt);
otherwise
error_pos('Outer level structure must be an object or an array');
end
jsoncount=jsoncount+1;
end % while
jsoncount=length(data);
if(jsoncount==1 && iscell(data))
data=data{1};
end
if(~isempty(data))
if(isstruct(data)) % data can be a struct array
data=jstruct2array(data);
elseif(iscell(data))
data=jcell2array(data);
end
end
%%
function newdata=parse_collection(id,data,obj)
if(jsoncount>0 && exist('data','var'))
if(~iscell(data))
newdata=cell(1);
newdata{1}=data;
data=newdata;
end
end
%%
function newdata=jcell2array(data)
len=length(data);
newdata=data;
for i=1:len
if(isstruct(data{i}))
newdata{i}=jstruct2array(data{i});
elseif(iscell(data{i}))
newdata{i}=jcell2array(data{i});
end
end
%%-------------------------------------------------------------------------
function newdata=jstruct2array(data)
fn=fieldnames(data);
newdata=data;
len=length(data);
for i=1:length(fn) % depth-first
for j=1:len
if(isstruct(getfield(data(j),fn{i})))
newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));
end
end
end
if(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))
newdata=cell(len,1);
for j=1:len
ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);
iscpx=0;
if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))
if(data(j).x0x5F_ArrayIsComplex_)
iscpx=1;
end
end
if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))
if(data(j).x0x5F_ArrayIsSparse_)
if(~isempty(strmatch('x0x5F_ArraySize_',fn)))
dim=double(data(j).x0x5F_ArraySize_);
if(iscpx && size(ndata,2)==4-any(dim==1))
ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));
end
if isempty(ndata)
% All-zeros sparse
ndata=sparse(dim(1),prod(dim(2:end)));
elseif dim(1)==1
% Sparse row vector
ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));
elseif dim(2)==1
% Sparse column vector
ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));
else
% Generic sparse array.
ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));
end
else
if(iscpx && size(ndata,2)==4)
ndata(:,3)=complex(ndata(:,3),ndata(:,4));
end
ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));
end
end
elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))
if(iscpx && size(ndata,2)==2)
ndata=complex(ndata(:,1),ndata(:,2));
end
ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);
end
newdata{j}=ndata;
end
if(len==1)
newdata=newdata{1};
end
end
%%-------------------------------------------------------------------------
function object = parse_object(varargin)
parse_char('{');
object = [];
type='';
count=-1;
if(next_char == '$')
type=inStr(pos+1); % TODO
pos=pos+2;
end
if(next_char == '#')
pos=pos+1;
count=double(parse_number());
end
if next_char ~= '}'
num=0;
while 1
str = parseStr(varargin{:});
if isempty(str)
error_pos('Name of value at position %d cannot be empty');
end
%parse_char(':');
val = parse_value(varargin{:});
num=num+1;
eval( sprintf( 'object.%s = val;', valid_field(str) ) );
if next_char == '}' || (count>=0 && num>=count)
break;
end
%parse_char(',');
end
end
if(count==-1)
parse_char('}');
end
%%-------------------------------------------------------------------------
function [cid,len]=elem_info(type)
id=strfind('iUIlLdD',type);
dataclass={'int8','uint8','int16','int32','int64','single','double'};
bytelen=[1,1,2,4,8,4,8];
if(id>0)
cid=dataclass{id};
len=bytelen(id);
else
error_pos('unsupported type at position %d');
end
%%-------------------------------------------------------------------------
function [data adv]=parse_block(type,count,varargin)
global pos inStr isoct fileendian systemendian
[cid,len]=elem_info(type);
datastr=inStr(pos:pos+len*count-1);
if(isoct)
newdata=int8(datastr);
else
newdata=uint8(datastr);
end
id=strfind('iUIlLdD',type);
if(id<=5 && fileendian~=systemendian)
newdata=swapbytes(typecast(newdata,cid));
end
data=typecast(newdata,cid);
adv=double(len*count);
%%-------------------------------------------------------------------------
function object = parse_array(varargin) % JSON array is written in row-major order
global pos inStr isoct
parse_char('[');
object = cell(0, 1);
dim=[];
type='';
count=-1;
if(next_char == '$')
type=inStr(pos+1);
pos=pos+2;
end
if(next_char == '#')
pos=pos+1;
if(next_char=='[')
dim=parse_array(varargin{:});
count=prod(double(dim));
else
count=double(parse_number());
end
end
if(~isempty(type))
if(count>=0)
[object adv]=parse_block(type,count,varargin{:});
if(~isempty(dim))
object=reshape(object,dim);
end
pos=pos+adv;
return;
else
endpos=matching_bracket(inStr,pos);
[cid,len]=elem_info(type);
count=(endpos-pos)/len;
[object adv]=parse_block(type,count,varargin{:});
pos=pos+adv;
parse_char(']');
return;
end
end
if next_char ~= ']'
while 1
val = parse_value(varargin{:});
object{end+1} = val;
if next_char == ']'
break;
end
%parse_char(',');
end
end
if(jsonopt('SimplifyCell',0,varargin{:})==1)
try
oldobj=object;
object=cell2mat(object')';
if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)
object=oldobj;
elseif(size(object,1)>1 && ndims(object)==2)
object=object';
end
catch
end
end
if(count==-1)
parse_char(']');
end
%%-------------------------------------------------------------------------
function parse_char(c)
global pos inStr len
skip_whitespace;
if pos > len || inStr(pos) ~= c
error_pos(sprintf('Expected %c at position %%d', c));
else
pos = pos + 1;
skip_whitespace;
end
%%-------------------------------------------------------------------------
function c = next_char
global pos inStr len
skip_whitespace;
if pos > len
c = [];
else
c = inStr(pos);
end
%%-------------------------------------------------------------------------
function skip_whitespace
global pos inStr len
while pos <= len && isspace(inStr(pos))
pos = pos + 1;
end
%%-------------------------------------------------------------------------
function str = parseStr(varargin)
global pos inStr esc index_esc len_esc
% len, ns = length(inStr), keyboard
type=inStr(pos);
if type ~= 'S' && type ~= 'C' && type ~= 'H'
error_pos('String starting with S expected at position %d');
else
pos = pos + 1;
end
if(type == 'C')
str=inStr(pos);
pos=pos+1;
return;
end
bytelen=double(parse_number());
if(length(inStr)>=pos+bytelen-1)
str=inStr(pos:pos+bytelen-1);
pos=pos+bytelen;
else
error_pos('End of file while expecting end of inStr');
end
%%-------------------------------------------------------------------------
function num = parse_number(varargin)
global pos inStr len isoct fileendian systemendian
id=strfind('iUIlLdD',inStr(pos));
if(isempty(id))
error_pos('expecting a number at position %d');
end
type={'int8','uint8','int16','int32','int64','single','double'};
bytelen=[1,1,2,4,8,4,8];
datastr=inStr(pos+1:pos+bytelen(id));
if(isoct)
newdata=int8(datastr);
else
newdata=uint8(datastr);
end
if(id<=5 && fileendian~=systemendian)
newdata=swapbytes(typecast(newdata,type{id}));
end
num=typecast(newdata,type{id});
pos = pos + bytelen(id)+1;
%%-------------------------------------------------------------------------
function val = parse_value(varargin)
global pos inStr len
true = 1; false = 0;
switch(inStr(pos))
case {'S','C','H'}
val = parseStr(varargin{:});
return;
case '['
val = parse_array(varargin{:});
return;
case '{'
val = parse_object(varargin{:});
if isstruct(val)
if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))
val=jstruct2array(val);
end
elseif isempty(val)
val = struct;
end
return;
case {'i','U','I','l','L','d','D'}
val = parse_number(varargin{:});
return;
case 'T'
val = true;
pos = pos + 1;
return;
case 'F'
val = false;
pos = pos + 1;
return;
case {'Z','N'}
val = [];
pos = pos + 1;
return;
end
error_pos('Value expected at position %d');
%%-------------------------------------------------------------------------
function error_pos(msg)
global pos inStr len
poShow = max(min([pos-15 pos-1 pos pos+20],len),1);
if poShow(3) == poShow(2)
poShow(3:4) = poShow(2)+[0 -1]; % display nothing after
end
msg = [sprintf(msg, pos) ': ' ...
inStr(poShow(1):poShow(2)) '<error>' inStr(poShow(3):poShow(4)) ];
error( ['JSONparser:invalidFormat: ' msg] );
%%-------------------------------------------------------------------------
function str = valid_field(str)
global isoct
% From MATLAB doc: field names must begin with a letter, which may be
% followed by any combination of letters, digits, and underscores.
% Invalid characters will be converted to underscores, and the prefix
% "x0x[Hex code]_" will be added if the first character is not a letter.
pos=regexp(str,'^[^A-Za-z]','once');
if(~isempty(pos))
if(~isoct)
str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');
else
str=sprintf('x0x%X_%s',char(str(1)),str(2:end));
end
end
if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end
if(~isoct)
str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');
else
pos=regexp(str,'[^0-9A-Za-z_]');
if(isempty(pos)) return; end
str0=str;
pos0=[0 pos(:)' length(str)];
str='';
for i=1:length(pos)
str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];
end
if(pos(end)~=length(str))
str=[str str0(pos0(end-1)+1:pos0(end))];
end
end
%str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';
%%-------------------------------------------------------------------------
function endpos = matching_quote(str,pos)
len=length(str);
while(pos<len)
if(str(pos)=='"')
if(~(pos>1 && str(pos-1)=='\'))
endpos=pos;
return;
end
end
pos=pos+1;
end
error('unmatched quotation mark');
%%-------------------------------------------------------------------------
function [endpos e1l e1r maxlevel] = matching_bracket(str,pos)
global arraytoken
level=1;
maxlevel=level;
endpos=0;
bpos=arraytoken(arraytoken>=pos);
tokens=str(bpos);
len=length(tokens);
pos=1;
e1l=[];
e1r=[];
while(pos<=len)
c=tokens(pos);
if(c==']')
level=level-1;
if(isempty(e1r)) e1r=bpos(pos); end
if(level==0)
endpos=bpos(pos);
return
end
end
if(c=='[')
if(isempty(e1l)) e1l=bpos(pos); end
level=level+1;
maxlevel=max(maxlevel,level);
end
if(c=='"')
pos=matching_quote(tokens,pos+1);
end
pos=pos+1;
end
if(endpos==0)
error('unmatched "]"');
end
|
github
|
zzlyw/machine-learning-exercises-master
|
saveubjson.m
|
.m
|
machine-learning-exercises-master/machine-learning-ex8/ex8/lib/jsonlab/saveubjson.m
| 16,123 |
utf_8
|
61d4f51010aedbf97753396f5d2d9ec0
|
function json=saveubjson(rootname,obj,varargin)
%
% json=saveubjson(rootname,obj,filename)
% or
% json=saveubjson(rootname,obj,opt)
% json=saveubjson(rootname,obj,'param1',value1,'param2',value2,...)
%
% convert a MATLAB object (cell, struct or array) into a Universal
% Binary JSON (UBJSON) binary string
%
% author: Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% created on 2013/08/17
%
% $Id: saveubjson.m 460 2015-01-03 00:30:45Z fangq $
%
% input:
% rootname: the name of the root-object, when set to '', the root name
% is ignored, however, when opt.ForceRootName is set to 1 (see below),
% the MATLAB variable name will be used as the root name.
% obj: a MATLAB object (array, cell, cell array, struct, struct array)
% filename: a string for the file name to save the output UBJSON data
% opt: a struct for additional options, ignore to use default values.
% opt can have the following fields (first in [.|.] is the default)
%
% opt.FileName [''|string]: a file name to save the output JSON data
% opt.ArrayToStruct[0|1]: when set to 0, saveubjson outputs 1D/2D
% array in JSON array format; if sets to 1, an
% array will be shown as a struct with fields
% "_ArrayType_", "_ArraySize_" and "_ArrayData_"; for
% sparse arrays, the non-zero elements will be
% saved to _ArrayData_ field in triplet-format i.e.
% (ix,iy,val) and "_ArrayIsSparse_" will be added
% with a value of 1; for a complex array, the
% _ArrayData_ array will include two columns
% (4 for sparse) to record the real and imaginary
% parts, and also "_ArrayIsComplex_":1 is added.
% opt.ParseLogical [1|0]: if this is set to 1, logical array elem
% will use true/false rather than 1/0.
% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single
% numerical element will be shown without a square
% bracket, unless it is the root object; if 0, square
% brackets are forced for any numerical arrays.
% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, saveubjson
% will use the name of the passed obj variable as the
% root object name; if obj is an expression and
% does not have a name, 'root' will be used; if this
% is set to 0 and rootname is empty, the root level
% will be merged down to the lower level.
% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),
% for example, if opt.JSON='foo', the JSON data is
% wrapped inside a function call as 'foo(...);'
% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson
% back to the string form
%
% opt can be replaced by a list of ('param',value) pairs. The param
% string is equivallent to a field in opt and is case sensitive.
% output:
% json: a binary string in the UBJSON format (see http://ubjson.org)
%
% examples:
% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],...
% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...
% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...
% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...
% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...
% 'SpecialData',[nan, inf, -inf]);
% saveubjson('jsonmesh',jsonmesh)
% saveubjson('jsonmesh',jsonmesh,'meshdata.ubj')
%
% license:
% BSD, see LICENSE_BSD.txt files for details
%
% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
if(nargin==1)
varname=inputname(1);
obj=rootname;
if(isempty(varname))
varname='root';
end
rootname=varname;
else
varname=inputname(2);
end
if(length(varargin)==1 && ischar(varargin{1}))
opt=struct('FileName',varargin{1});
else
opt=varargin2struct(varargin{:});
end
opt.IsOctave=exist('OCTAVE_VERSION','builtin');
rootisarray=0;
rootlevel=1;
forceroot=jsonopt('ForceRootName',0,opt);
if((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)
rootisarray=1;
rootlevel=0;
else
if(isempty(rootname))
rootname=varname;
end
end
if((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)
rootname='root';
end
json=obj2ubjson(rootname,obj,rootlevel,opt);
if(~rootisarray)
json=['{' json '}'];
end
jsonp=jsonopt('JSONP','',opt);
if(~isempty(jsonp))
json=[jsonp '(' json ')'];
end
% save to a file if FileName is set, suggested by Patrick Rapin
if(~isempty(jsonopt('FileName','',opt)))
fid = fopen(opt.FileName, 'wb');
fwrite(fid,json);
fclose(fid);
end
%%-------------------------------------------------------------------------
function txt=obj2ubjson(name,item,level,varargin)
if(iscell(item))
txt=cell2ubjson(name,item,level,varargin{:});
elseif(isstruct(item))
txt=struct2ubjson(name,item,level,varargin{:});
elseif(ischar(item))
txt=str2ubjson(name,item,level,varargin{:});
else
txt=mat2ubjson(name,item,level,varargin{:});
end
%%-------------------------------------------------------------------------
function txt=cell2ubjson(name,item,level,varargin)
txt='';
if(~iscell(item))
error('input is not a cell');
end
dim=size(item);
if(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now
item=reshape(item,dim(1),numel(item)/dim(1));
dim=size(item);
end
len=numel(item); % let's handle 1D cell first
if(len>1)
if(~isempty(name))
txt=[S_(checkname(name,varargin{:})) '[']; name='';
else
txt='[';
end
elseif(len==0)
if(~isempty(name))
txt=[S_(checkname(name,varargin{:})) 'Z']; name='';
else
txt='Z';
end
end
for j=1:dim(2)
if(dim(1)>1) txt=[txt '[']; end
for i=1:dim(1)
txt=[txt obj2ubjson(name,item{i,j},level+(len>1),varargin{:})];
end
if(dim(1)>1) txt=[txt ']']; end
end
if(len>1) txt=[txt ']']; end
%%-------------------------------------------------------------------------
function txt=struct2ubjson(name,item,level,varargin)
txt='';
if(~isstruct(item))
error('input is not a struct');
end
dim=size(item);
if(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now
item=reshape(item,dim(1),numel(item)/dim(1));
dim=size(item);
end
len=numel(item);
if(~isempty(name))
if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end
else
if(len>1) txt='['; end
end
for j=1:dim(2)
if(dim(1)>1) txt=[txt '[']; end
for i=1:dim(1)
names = fieldnames(item(i,j));
if(~isempty(name) && len==1)
txt=[txt S_(checkname(name,varargin{:})) '{'];
else
txt=[txt '{'];
end
if(~isempty(names))
for e=1:length(names)
txt=[txt obj2ubjson(names{e},getfield(item(i,j),...
names{e}),level+(dim(1)>1)+1+(len>1),varargin{:})];
end
end
txt=[txt '}'];
end
if(dim(1)>1) txt=[txt ']']; end
end
if(len>1) txt=[txt ']']; end
%%-------------------------------------------------------------------------
function txt=str2ubjson(name,item,level,varargin)
txt='';
if(~ischar(item))
error('input is not a string');
end
item=reshape(item, max(size(item),[1 0]));
len=size(item,1);
if(~isempty(name))
if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end
else
if(len>1) txt='['; end
end
isoct=jsonopt('IsOctave',0,varargin{:});
for e=1:len
val=item(e,:);
if(len==1)
obj=['' S_(checkname(name,varargin{:})) '' '',S_(val),''];
if(isempty(name)) obj=['',S_(val),'']; end
txt=[txt,'',obj];
else
txt=[txt,'',['',S_(val),'']];
end
end
if(len>1) txt=[txt ']']; end
%%-------------------------------------------------------------------------
function txt=mat2ubjson(name,item,level,varargin)
if(~isnumeric(item) && ~islogical(item))
error('input is not an array');
end
if(length(size(item))>2 || issparse(item) || ~isreal(item) || ...
isempty(item) || jsonopt('ArrayToStruct',0,varargin{:}))
cid=I_(uint32(max(size(item))));
if(isempty(name))
txt=['{' S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1)) ];
else
if(isempty(item))
txt=[S_(checkname(name,varargin{:})),'Z'];
return;
else
txt=[S_(checkname(name,varargin{:})),'{',S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1))];
end
end
else
if(isempty(name))
txt=matdata2ubjson(item,level+1,varargin{:});
else
if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)
numtxt=regexprep(regexprep(matdata2ubjson(item,level+1,varargin{:}),'^\[',''),']','');
txt=[S_(checkname(name,varargin{:})) numtxt];
else
txt=[S_(checkname(name,varargin{:})),matdata2ubjson(item,level+1,varargin{:})];
end
end
return;
end
if(issparse(item))
[ix,iy]=find(item);
data=full(item(find(item)));
if(~isreal(item))
data=[real(data(:)),imag(data(:))];
if(size(item,1)==1)
% Kludge to have data's 'transposedness' match item's.
% (Necessary for complex row vector handling below.)
data=data';
end
txt=[txt,S_('_ArrayIsComplex_'),'T'];
end
txt=[txt,S_('_ArrayIsSparse_'),'T'];
if(size(item,1)==1)
% Row vector, store only column indices.
txt=[txt,S_('_ArrayData_'),...
matdata2ubjson([iy(:),data'],level+2,varargin{:})];
elseif(size(item,2)==1)
% Column vector, store only row indices.
txt=[txt,S_('_ArrayData_'),...
matdata2ubjson([ix,data],level+2,varargin{:})];
else
% General case, store row and column indices.
txt=[txt,S_('_ArrayData_'),...
matdata2ubjson([ix,iy,data],level+2,varargin{:})];
end
else
if(isreal(item))
txt=[txt,S_('_ArrayData_'),...
matdata2ubjson(item(:)',level+2,varargin{:})];
else
txt=[txt,S_('_ArrayIsComplex_'),'T'];
txt=[txt,S_('_ArrayData_'),...
matdata2ubjson([real(item(:)) imag(item(:))],level+2,varargin{:})];
end
end
txt=[txt,'}'];
%%-------------------------------------------------------------------------
function txt=matdata2ubjson(mat,level,varargin)
if(isempty(mat))
txt='Z';
return;
end
if(size(mat,1)==1)
level=level-1;
end
type='';
hasnegtive=(mat<0);
if(isa(mat,'integer') || isinteger(mat) || (isfloat(mat) && all(mod(mat(:),1) == 0)))
if(isempty(hasnegtive))
if(max(mat(:))<=2^8)
type='U';
end
end
if(isempty(type))
% todo - need to consider negative ones separately
id= histc(abs(max(mat(:))),[0 2^7 2^15 2^31 2^63]);
if(isempty(find(id)))
error('high-precision data is not yet supported');
end
key='iIlL';
type=key(find(id));
end
txt=[I_a(mat(:),type,size(mat))];
elseif(islogical(mat))
logicalval='FT';
if(numel(mat)==1)
txt=logicalval(mat+1);
else
txt=['[$U#' I_a(size(mat),'l') typecast(swapbytes(uint8(mat(:)')),'uint8')];
end
else
if(numel(mat)==1)
txt=['[' D_(mat) ']'];
else
txt=D_a(mat(:),'D',size(mat));
end
end
%txt=regexprep(mat2str(mat),'\s+',',');
%txt=regexprep(txt,';',sprintf('],['));
% if(nargin>=2 && size(mat,1)>1)
% txt=regexprep(txt,'\[',[repmat(sprintf('\t'),1,level) '[']);
% end
if(any(isinf(mat(:))))
txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','"$1_Inf_"',varargin{:}));
end
if(any(isnan(mat(:))))
txt=regexprep(txt,'NaN',jsonopt('NaN','"_NaN_"',varargin{:}));
end
%%-------------------------------------------------------------------------
function newname=checkname(name,varargin)
isunpack=jsonopt('UnpackHex',1,varargin{:});
newname=name;
if(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))
return
end
if(isunpack)
isoct=jsonopt('IsOctave',0,varargin{:});
if(~isoct)
newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');
else
pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');
pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');
if(isempty(pos)) return; end
str0=name;
pos0=[0 pend(:)' length(name)];
newname='';
for i=1:length(pos)
newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];
end
if(pos(end)~=length(name))
newname=[newname str0(pos0(end-1)+1:pos0(end))];
end
end
end
%%-------------------------------------------------------------------------
function val=S_(str)
if(length(str)==1)
val=['C' str];
else
val=['S' I_(int32(length(str))) str];
end
%%-------------------------------------------------------------------------
function val=I_(num)
if(~isinteger(num))
error('input is not an integer');
end
if(num>=0 && num<255)
val=['U' data2byte(swapbytes(cast(num,'uint8')),'uint8')];
return;
end
key='iIlL';
cid={'int8','int16','int32','int64'};
for i=1:4
if((num>0 && num<2^(i*8-1)) || (num<0 && num>=-2^(i*8-1)))
val=[key(i) data2byte(swapbytes(cast(num,cid{i})),'uint8')];
return;
end
end
error('unsupported integer');
%%-------------------------------------------------------------------------
function val=D_(num)
if(~isfloat(num))
error('input is not a float');
end
if(isa(num,'single'))
val=['d' data2byte(num,'uint8')];
else
val=['D' data2byte(num,'uint8')];
end
%%-------------------------------------------------------------------------
function data=I_a(num,type,dim,format)
id=find(ismember('iUIlL',type));
if(id==0)
error('unsupported integer array');
end
% based on UBJSON specs, all integer types are stored in big endian format
if(id==1)
data=data2byte(swapbytes(int8(num)),'uint8');
blen=1;
elseif(id==2)
data=data2byte(swapbytes(uint8(num)),'uint8');
blen=1;
elseif(id==3)
data=data2byte(swapbytes(int16(num)),'uint8');
blen=2;
elseif(id==4)
data=data2byte(swapbytes(int32(num)),'uint8');
blen=4;
elseif(id==5)
data=data2byte(swapbytes(int64(num)),'uint8');
blen=8;
end
if(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))
format='opt';
end
if((nargin<4 || strcmp(format,'opt')) && numel(num)>1)
if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))
cid=I_(uint32(max(dim)));
data=['$' type '#' I_a(dim,cid(1)) data(:)'];
else
data=['$' type '#' I_(int32(numel(data)/blen)) data(:)'];
end
data=['[' data(:)'];
else
data=reshape(data,blen,numel(data)/blen);
data(2:blen+1,:)=data;
data(1,:)=type;
data=data(:)';
data=['[' data(:)' ']'];
end
%%-------------------------------------------------------------------------
function data=D_a(num,type,dim,format)
id=find(ismember('dD',type));
if(id==0)
error('unsupported float array');
end
if(id==1)
data=data2byte(single(num),'uint8');
elseif(id==2)
data=data2byte(double(num),'uint8');
end
if(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))
format='opt';
end
if((nargin<4 || strcmp(format,'opt')) && numel(num)>1)
if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))
cid=I_(uint32(max(dim)));
data=['$' type '#' I_a(dim,cid(1)) data(:)'];
else
data=['$' type '#' I_(int32(numel(data)/(id*4))) data(:)'];
end
data=['[' data];
else
data=reshape(data,(id*4),length(data)/(id*4));
data(2:(id*4+1),:)=data;
data(1,:)=type;
data=data(:)';
data=['[' data(:)' ']'];
end
%%-------------------------------------------------------------------------
function bytes=data2byte(varargin)
bytes=typecast(varargin{:});
bytes=bytes(:)';
|
github
|
zzlyw/machine-learning-exercises-master
|
submit.m
|
.m
|
machine-learning-exercises-master/machine-learning-ex1/ex1/submit.m
| 1,876 |
utf_8
|
8d1c467b830a89c187c05b121cb8fbfd
|
function submit()
addpath('./lib');
conf.assignmentSlug = 'linear-regression';
conf.itemName = 'Linear Regression with Multiple Variables';
conf.partArrays = { ...
{ ...
'1', ...
{ 'warmUpExercise.m' }, ...
'Warm-up Exercise', ...
}, ...
{ ...
'2', ...
{ 'computeCost.m' }, ...
'Computing Cost (for One Variable)', ...
}, ...
{ ...
'3', ...
{ 'gradientDescent.m' }, ...
'Gradient Descent (for One Variable)', ...
}, ...
{ ...
'4', ...
{ 'featureNormalize.m' }, ...
'Feature Normalization', ...
}, ...
{ ...
'5', ...
{ 'computeCostMulti.m' }, ...
'Computing Cost (for Multiple Variables)', ...
}, ...
{ ...
'6', ...
{ 'gradientDescentMulti.m' }, ...
'Gradient Descent (for Multiple Variables)', ...
}, ...
{ ...
'7', ...
{ 'normalEqn.m' }, ...
'Normal Equations', ...
}, ...
};
conf.output = @output;
submitWithConfiguration(conf);
end
function out = output(partId)
% Random Test Cases
X1 = [ones(20,1) (exp(1) + exp(2) * (0.1:0.1:2))'];
Y1 = X1(:,2) + sin(X1(:,1)) + cos(X1(:,2));
X2 = [X1 X1(:,2).^0.5 X1(:,2).^0.25];
Y2 = Y1.^0.5 + Y1;
if partId == '1'
out = sprintf('%0.5f ', warmUpExercise());
elseif partId == '2'
out = sprintf('%0.5f ', computeCost(X1, Y1, [0.5 -0.5]'));
elseif partId == '3'
out = sprintf('%0.5f ', gradientDescent(X1, Y1, [0.5 -0.5]', 0.01, 10));
elseif partId == '4'
out = sprintf('%0.5f ', featureNormalize(X2(:,2:4)));
elseif partId == '5'
out = sprintf('%0.5f ', computeCostMulti(X2, Y2, [0.1 0.2 0.3 0.4]'));
elseif partId == '6'
out = sprintf('%0.5f ', gradientDescentMulti(X2, Y2, [-0.1 -0.2 -0.3 -0.4]', 0.01, 10));
elseif partId == '7'
out = sprintf('%0.5f ', normalEqn(X2, Y2));
end
end
|
github
|
zzlyw/machine-learning-exercises-master
|
submitWithConfiguration.m
|
.m
|
machine-learning-exercises-master/machine-learning-ex1/ex1/lib/submitWithConfiguration.m
| 5,562 |
utf_8
|
4ac719ea6570ac228ea6c7a9c919e3f5
|
function submitWithConfiguration(conf)
addpath('./lib/jsonlab');
parts = parts(conf);
fprintf('== Submitting solutions | %s...\n', conf.itemName);
tokenFile = 'token.mat';
if exist(tokenFile, 'file')
load(tokenFile);
[email token] = promptToken(email, token, tokenFile);
else
[email token] = promptToken('', '', tokenFile);
end
if isempty(token)
fprintf('!! Submission Cancelled\n');
return
end
try
response = submitParts(conf, email, token, parts);
catch
e = lasterror();
fprintf('\n!! Submission failed: %s\n', e.message);
fprintf('\n\nFunction: %s\nFileName: %s\nLineNumber: %d\n', ...
e.stack(1,1).name, e.stack(1,1).file, e.stack(1,1).line);
fprintf('\nPlease correct your code and resubmit.\n');
return
end
if isfield(response, 'errorMessage')
fprintf('!! Submission failed: %s\n', response.errorMessage);
elseif isfield(response, 'errorCode')
fprintf('!! Submission failed: %s\n', response.message);
else
showFeedback(parts, response);
save(tokenFile, 'email', 'token');
end
end
function [email token] = promptToken(email, existingToken, tokenFile)
if (~isempty(email) && ~isempty(existingToken))
prompt = sprintf( ...
'Use token from last successful submission (%s)? (Y/n): ', ...
email);
reenter = input(prompt, 's');
if (isempty(reenter) || reenter(1) == 'Y' || reenter(1) == 'y')
token = existingToken;
return;
else
delete(tokenFile);
end
end
email = input('Login (email address): ', 's');
token = input('Token: ', 's');
end
function isValid = isValidPartOptionIndex(partOptions, i)
isValid = (~isempty(i)) && (1 <= i) && (i <= numel(partOptions));
end
function response = submitParts(conf, email, token, parts)
body = makePostBody(conf, email, token, parts);
submissionUrl = submissionUrl();
responseBody = getResponse(submissionUrl, body);
jsonResponse = validateResponse(responseBody);
response = loadjson(jsonResponse);
end
function body = makePostBody(conf, email, token, parts)
bodyStruct.assignmentSlug = conf.assignmentSlug;
bodyStruct.submitterEmail = email;
bodyStruct.secret = token;
bodyStruct.parts = makePartsStruct(conf, parts);
opt.Compact = 1;
body = savejson('', bodyStruct, opt);
end
function partsStruct = makePartsStruct(conf, parts)
for part = parts
partId = part{:}.id;
fieldName = makeValidFieldName(partId);
outputStruct.output = conf.output(partId);
partsStruct.(fieldName) = outputStruct;
end
end
function [parts] = parts(conf)
parts = {};
for partArray = conf.partArrays
part.id = partArray{:}{1};
part.sourceFiles = partArray{:}{2};
part.name = partArray{:}{3};
parts{end + 1} = part;
end
end
function showFeedback(parts, response)
fprintf('== \n');
fprintf('== %43s | %9s | %-s\n', 'Part Name', 'Score', 'Feedback');
fprintf('== %43s | %9s | %-s\n', '---------', '-----', '--------');
for part = parts
score = '';
partFeedback = '';
partFeedback = response.partFeedbacks.(makeValidFieldName(part{:}.id));
partEvaluation = response.partEvaluations.(makeValidFieldName(part{:}.id));
score = sprintf('%d / %3d', partEvaluation.score, partEvaluation.maxScore);
fprintf('== %43s | %9s | %-s\n', part{:}.name, score, partFeedback);
end
evaluation = response.evaluation;
totalScore = sprintf('%d / %d', evaluation.score, evaluation.maxScore);
fprintf('== --------------------------------\n');
fprintf('== %43s | %9s | %-s\n', '', totalScore, '');
fprintf('== \n');
end
% use urlread or curl to send submit results to the grader and get a response
function response = getResponse(url, body)
% try using urlread() and a secure connection
params = {'jsonBody', body};
[response, success] = urlread(url, 'post', params);
if (success == 0)
% urlread didn't work, try curl & the peer certificate patch
if ispc
% testing note: use 'jsonBody =' for a test case
json_command = sprintf('echo jsonBody=%s | curl -k -X POST -d @- %s', body, url);
else
% it's linux/OS X, so use the other form
json_command = sprintf('echo ''jsonBody=%s'' | curl -k -X POST -d @- %s', body, url);
end
% get the response body for the peer certificate patch method
[code, response] = system(json_command);
% test the success code
if (code ~= 0)
fprintf('[error] submission with curl() was not successful\n');
end
end
end
% validate the grader's response
function response = validateResponse(resp)
% test if the response is json or an HTML page
isJson = length(resp) > 0 && resp(1) == '{';
isHtml = findstr(lower(resp), '<html');
if (isJson)
response = resp;
elseif (isHtml)
% the response is html, so it's probably an error message
printHTMLContents(resp);
error('Grader response is an HTML message');
else
error('Grader sent no response');
end
end
% parse a HTML response and print it's contents
function printHTMLContents(response)
strippedResponse = regexprep(response, '<[^>]+>', ' ');
strippedResponse = regexprep(strippedResponse, '[\t ]+', ' ');
fprintf(strippedResponse);
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Service configuration
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function submissionUrl = submissionUrl()
submissionUrl = 'https://www-origin.coursera.org/api/onDemandProgrammingImmediateFormSubmissions.v1';
end
|
github
|
zzlyw/machine-learning-exercises-master
|
savejson.m
|
.m
|
machine-learning-exercises-master/machine-learning-ex1/ex1/lib/jsonlab/savejson.m
| 17,462 |
utf_8
|
861b534fc35ffe982b53ca3ca83143bf
|
function json=savejson(rootname,obj,varargin)
%
% json=savejson(rootname,obj,filename)
% or
% json=savejson(rootname,obj,opt)
% json=savejson(rootname,obj,'param1',value1,'param2',value2,...)
%
% convert a MATLAB object (cell, struct or array) into a JSON (JavaScript
% Object Notation) string
%
% author: Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% created on 2011/09/09
%
% $Id: savejson.m 460 2015-01-03 00:30:45Z fangq $
%
% input:
% rootname: the name of the root-object, when set to '', the root name
% is ignored, however, when opt.ForceRootName is set to 1 (see below),
% the MATLAB variable name will be used as the root name.
% obj: a MATLAB object (array, cell, cell array, struct, struct array).
% filename: a string for the file name to save the output JSON data.
% opt: a struct for additional options, ignore to use default values.
% opt can have the following fields (first in [.|.] is the default)
%
% opt.FileName [''|string]: a file name to save the output JSON data
% opt.FloatFormat ['%.10g'|string]: format to show each numeric element
% of a 1D/2D array;
% opt.ArrayIndent [1|0]: if 1, output explicit data array with
% precedent indentation; if 0, no indentation
% opt.ArrayToStruct[0|1]: when set to 0, savejson outputs 1D/2D
% array in JSON array format; if sets to 1, an
% array will be shown as a struct with fields
% "_ArrayType_", "_ArraySize_" and "_ArrayData_"; for
% sparse arrays, the non-zero elements will be
% saved to _ArrayData_ field in triplet-format i.e.
% (ix,iy,val) and "_ArrayIsSparse_" will be added
% with a value of 1; for a complex array, the
% _ArrayData_ array will include two columns
% (4 for sparse) to record the real and imaginary
% parts, and also "_ArrayIsComplex_":1 is added.
% opt.ParseLogical [0|1]: if this is set to 1, logical array elem
% will use true/false rather than 1/0.
% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single
% numerical element will be shown without a square
% bracket, unless it is the root object; if 0, square
% brackets are forced for any numerical arrays.
% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, savejson
% will use the name of the passed obj variable as the
% root object name; if obj is an expression and
% does not have a name, 'root' will be used; if this
% is set to 0 and rootname is empty, the root level
% will be merged down to the lower level.
% opt.Inf ['"$1_Inf_"'|string]: a customized regular expression pattern
% to represent +/-Inf. The matched pattern is '([-+]*)Inf'
% and $1 represents the sign. For those who want to use
% 1e999 to represent Inf, they can set opt.Inf to '$11e999'
% opt.NaN ['"_NaN_"'|string]: a customized regular expression pattern
% to represent NaN
% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),
% for example, if opt.JSONP='foo', the JSON data is
% wrapped inside a function call as 'foo(...);'
% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson
% back to the string form
% opt.SaveBinary [0|1]: 1 - save the JSON file in binary mode; 0 - text mode.
% opt.Compact [0|1]: 1- out compact JSON format (remove all newlines and tabs)
%
% opt can be replaced by a list of ('param',value) pairs. The param
% string is equivallent to a field in opt and is case sensitive.
% output:
% json: a string in the JSON format (see http://json.org)
%
% examples:
% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],...
% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...
% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...
% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...
% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...
% 'SpecialData',[nan, inf, -inf]);
% savejson('jmesh',jsonmesh)
% savejson('',jsonmesh,'ArrayIndent',0,'FloatFormat','\t%.5g')
%
% license:
% BSD, see LICENSE_BSD.txt files for details
%
% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
if(nargin==1)
varname=inputname(1);
obj=rootname;
if(isempty(varname))
varname='root';
end
rootname=varname;
else
varname=inputname(2);
end
if(length(varargin)==1 && ischar(varargin{1}))
opt=struct('FileName',varargin{1});
else
opt=varargin2struct(varargin{:});
end
opt.IsOctave=exist('OCTAVE_VERSION','builtin');
rootisarray=0;
rootlevel=1;
forceroot=jsonopt('ForceRootName',0,opt);
if((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)
rootisarray=1;
rootlevel=0;
else
if(isempty(rootname))
rootname=varname;
end
end
if((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)
rootname='root';
end
whitespaces=struct('tab',sprintf('\t'),'newline',sprintf('\n'),'sep',sprintf(',\n'));
if(jsonopt('Compact',0,opt)==1)
whitespaces=struct('tab','','newline','','sep',',');
end
if(~isfield(opt,'whitespaces_'))
opt.whitespaces_=whitespaces;
end
nl=whitespaces.newline;
json=obj2json(rootname,obj,rootlevel,opt);
if(rootisarray)
json=sprintf('%s%s',json,nl);
else
json=sprintf('{%s%s%s}\n',nl,json,nl);
end
jsonp=jsonopt('JSONP','',opt);
if(~isempty(jsonp))
json=sprintf('%s(%s);%s',jsonp,json,nl);
end
% save to a file if FileName is set, suggested by Patrick Rapin
if(~isempty(jsonopt('FileName','',opt)))
if(jsonopt('SaveBinary',0,opt)==1)
fid = fopen(opt.FileName, 'wb');
fwrite(fid,json);
else
fid = fopen(opt.FileName, 'wt');
fwrite(fid,json,'char');
end
fclose(fid);
end
%%-------------------------------------------------------------------------
function txt=obj2json(name,item,level,varargin)
if(iscell(item))
txt=cell2json(name,item,level,varargin{:});
elseif(isstruct(item))
txt=struct2json(name,item,level,varargin{:});
elseif(ischar(item))
txt=str2json(name,item,level,varargin{:});
else
txt=mat2json(name,item,level,varargin{:});
end
%%-------------------------------------------------------------------------
function txt=cell2json(name,item,level,varargin)
txt='';
if(~iscell(item))
error('input is not a cell');
end
dim=size(item);
if(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now
item=reshape(item,dim(1),numel(item)/dim(1));
dim=size(item);
end
len=numel(item);
ws=jsonopt('whitespaces_',struct('tab',sprintf('\t'),'newline',sprintf('\n'),'sep',sprintf(',\n')),varargin{:});
padding0=repmat(ws.tab,1,level);
padding2=repmat(ws.tab,1,level+1);
nl=ws.newline;
if(len>1)
if(~isempty(name))
txt=sprintf('%s"%s": [%s',padding0, checkname(name,varargin{:}),nl); name='';
else
txt=sprintf('%s[%s',padding0,nl);
end
elseif(len==0)
if(~isempty(name))
txt=sprintf('%s"%s": []',padding0, checkname(name,varargin{:})); name='';
else
txt=sprintf('%s[]',padding0);
end
end
for j=1:dim(2)
if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end
for i=1:dim(1)
txt=sprintf('%s%s',txt,obj2json(name,item{i,j},level+(dim(1)>1)+1,varargin{:}));
if(i<dim(1)) txt=sprintf('%s%s',txt,sprintf(',%s',nl)); end
end
if(dim(1)>1) txt=sprintf('%s%s%s]',txt,nl,padding2); end
if(j<dim(2)) txt=sprintf('%s%s',txt,sprintf(',%s',nl)); end
%if(j==dim(2)) txt=sprintf('%s%s',txt,sprintf(',%s',nl)); end
end
if(len>1) txt=sprintf('%s%s%s]',txt,nl,padding0); end
%%-------------------------------------------------------------------------
function txt=struct2json(name,item,level,varargin)
txt='';
if(~isstruct(item))
error('input is not a struct');
end
dim=size(item);
if(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now
item=reshape(item,dim(1),numel(item)/dim(1));
dim=size(item);
end
len=numel(item);
ws=struct('tab',sprintf('\t'),'newline',sprintf('\n'));
ws=jsonopt('whitespaces_',ws,varargin{:});
padding0=repmat(ws.tab,1,level);
padding2=repmat(ws.tab,1,level+1);
padding1=repmat(ws.tab,1,level+(dim(1)>1)+(len>1));
nl=ws.newline;
if(~isempty(name))
if(len>1) txt=sprintf('%s"%s": [%s',padding0,checkname(name,varargin{:}),nl); end
else
if(len>1) txt=sprintf('%s[%s',padding0,nl); end
end
for j=1:dim(2)
if(dim(1)>1) txt=sprintf('%s%s[%s',txt,padding2,nl); end
for i=1:dim(1)
names = fieldnames(item(i,j));
if(~isempty(name) && len==1)
txt=sprintf('%s%s"%s": {%s',txt,padding1, checkname(name,varargin{:}),nl);
else
txt=sprintf('%s%s{%s',txt,padding1,nl);
end
if(~isempty(names))
for e=1:length(names)
txt=sprintf('%s%s',txt,obj2json(names{e},getfield(item(i,j),...
names{e}),level+(dim(1)>1)+1+(len>1),varargin{:}));
if(e<length(names)) txt=sprintf('%s%s',txt,','); end
txt=sprintf('%s%s',txt,nl);
end
end
txt=sprintf('%s%s}',txt,padding1);
if(i<dim(1)) txt=sprintf('%s%s',txt,sprintf(',%s',nl)); end
end
if(dim(1)>1) txt=sprintf('%s%s%s]',txt,nl,padding2); end
if(j<dim(2)) txt=sprintf('%s%s',txt,sprintf(',%s',nl)); end
end
if(len>1) txt=sprintf('%s%s%s]',txt,nl,padding0); end
%%-------------------------------------------------------------------------
function txt=str2json(name,item,level,varargin)
txt='';
if(~ischar(item))
error('input is not a string');
end
item=reshape(item, max(size(item),[1 0]));
len=size(item,1);
ws=struct('tab',sprintf('\t'),'newline',sprintf('\n'),'sep',sprintf(',\n'));
ws=jsonopt('whitespaces_',ws,varargin{:});
padding1=repmat(ws.tab,1,level);
padding0=repmat(ws.tab,1,level+1);
nl=ws.newline;
sep=ws.sep;
if(~isempty(name))
if(len>1) txt=sprintf('%s"%s": [%s',padding1,checkname(name,varargin{:}),nl); end
else
if(len>1) txt=sprintf('%s[%s',padding1,nl); end
end
isoct=jsonopt('IsOctave',0,varargin{:});
for e=1:len
if(isoct)
val=regexprep(item(e,:),'\\','\\');
val=regexprep(val,'"','\"');
val=regexprep(val,'^"','\"');
else
val=regexprep(item(e,:),'\\','\\\\');
val=regexprep(val,'"','\\"');
val=regexprep(val,'^"','\\"');
end
val=escapejsonstring(val);
if(len==1)
obj=['"' checkname(name,varargin{:}) '": ' '"',val,'"'];
if(isempty(name)) obj=['"',val,'"']; end
txt=sprintf('%s%s%s%s',txt,padding1,obj);
else
txt=sprintf('%s%s%s%s',txt,padding0,['"',val,'"']);
end
if(e==len) sep=''; end
txt=sprintf('%s%s',txt,sep);
end
if(len>1) txt=sprintf('%s%s%s%s',txt,nl,padding1,']'); end
%%-------------------------------------------------------------------------
function txt=mat2json(name,item,level,varargin)
if(~isnumeric(item) && ~islogical(item))
error('input is not an array');
end
ws=struct('tab',sprintf('\t'),'newline',sprintf('\n'),'sep',sprintf(',\n'));
ws=jsonopt('whitespaces_',ws,varargin{:});
padding1=repmat(ws.tab,1,level);
padding0=repmat(ws.tab,1,level+1);
nl=ws.newline;
sep=ws.sep;
if(length(size(item))>2 || issparse(item) || ~isreal(item) || ...
isempty(item) ||jsonopt('ArrayToStruct',0,varargin{:}))
if(isempty(name))
txt=sprintf('%s{%s%s"_ArrayType_": "%s",%s%s"_ArraySize_": %s,%s',...
padding1,nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\s+',','),nl);
else
txt=sprintf('%s"%s": {%s%s"_ArrayType_": "%s",%s%s"_ArraySize_": %s,%s',...
padding1,checkname(name,varargin{:}),nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\s+',','),nl);
end
else
if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1 && level>0)
numtxt=regexprep(regexprep(matdata2json(item,level+1,varargin{:}),'^\[',''),']','');
else
numtxt=matdata2json(item,level+1,varargin{:});
end
if(isempty(name))
txt=sprintf('%s%s',padding1,numtxt);
else
if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)
txt=sprintf('%s"%s": %s',padding1,checkname(name,varargin{:}),numtxt);
else
txt=sprintf('%s"%s": %s',padding1,checkname(name,varargin{:}),numtxt);
end
end
return;
end
dataformat='%s%s%s%s%s';
if(issparse(item))
[ix,iy]=find(item);
data=full(item(find(item)));
if(~isreal(item))
data=[real(data(:)),imag(data(:))];
if(size(item,1)==1)
% Kludge to have data's 'transposedness' match item's.
% (Necessary for complex row vector handling below.)
data=data';
end
txt=sprintf(dataformat,txt,padding0,'"_ArrayIsComplex_": ','1', sep);
end
txt=sprintf(dataformat,txt,padding0,'"_ArrayIsSparse_": ','1', sep);
if(size(item,1)==1)
% Row vector, store only column indices.
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([iy(:),data'],level+2,varargin{:}), nl);
elseif(size(item,2)==1)
% Column vector, store only row indices.
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([ix,data],level+2,varargin{:}), nl);
else
% General case, store row and column indices.
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([ix,iy,data],level+2,varargin{:}), nl);
end
else
if(isreal(item))
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json(item(:)',level+2,varargin{:}), nl);
else
txt=sprintf(dataformat,txt,padding0,'"_ArrayIsComplex_": ','1', sep);
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([real(item(:)) imag(item(:))],level+2,varargin{:}), nl);
end
end
txt=sprintf('%s%s%s',txt,padding1,'}');
%%-------------------------------------------------------------------------
function txt=matdata2json(mat,level,varargin)
ws=struct('tab',sprintf('\t'),'newline',sprintf('\n'),'sep',sprintf(',\n'));
ws=jsonopt('whitespaces_',ws,varargin{:});
tab=ws.tab;
nl=ws.newline;
if(size(mat,1)==1)
pre='';
post='';
level=level-1;
else
pre=sprintf('[%s',nl);
post=sprintf('%s%s]',nl,repmat(tab,1,level-1));
end
if(isempty(mat))
txt='null';
return;
end
floatformat=jsonopt('FloatFormat','%.10g',varargin{:});
%if(numel(mat)>1)
formatstr=['[' repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf('],%s',nl)]];
%else
% formatstr=[repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf(',\n')]];
%end
if(nargin>=2 && size(mat,1)>1 && jsonopt('ArrayIndent',1,varargin{:})==1)
formatstr=[repmat(tab,1,level) formatstr];
end
txt=sprintf(formatstr,mat');
txt(end-length(nl):end)=[];
if(islogical(mat) && jsonopt('ParseLogical',0,varargin{:})==1)
txt=regexprep(txt,'1','true');
txt=regexprep(txt,'0','false');
end
%txt=regexprep(mat2str(mat),'\s+',',');
%txt=regexprep(txt,';',sprintf('],\n['));
% if(nargin>=2 && size(mat,1)>1)
% txt=regexprep(txt,'\[',[repmat(sprintf('\t'),1,level) '[']);
% end
txt=[pre txt post];
if(any(isinf(mat(:))))
txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','"$1_Inf_"',varargin{:}));
end
if(any(isnan(mat(:))))
txt=regexprep(txt,'NaN',jsonopt('NaN','"_NaN_"',varargin{:}));
end
%%-------------------------------------------------------------------------
function newname=checkname(name,varargin)
isunpack=jsonopt('UnpackHex',1,varargin{:});
newname=name;
if(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))
return
end
if(isunpack)
isoct=jsonopt('IsOctave',0,varargin{:});
if(~isoct)
newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');
else
pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');
pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');
if(isempty(pos)) return; end
str0=name;
pos0=[0 pend(:)' length(name)];
newname='';
for i=1:length(pos)
newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];
end
if(pos(end)~=length(name))
newname=[newname str0(pos0(end-1)+1:pos0(end))];
end
end
end
%%-------------------------------------------------------------------------
function newstr=escapejsonstring(str)
newstr=str;
isoct=exist('OCTAVE_VERSION','builtin');
if(isoct)
vv=sscanf(OCTAVE_VERSION,'%f');
if(vv(1)>=3.8) isoct=0; end
end
if(isoct)
escapechars={'\a','\f','\n','\r','\t','\v'};
for i=1:length(escapechars);
newstr=regexprep(newstr,escapechars{i},escapechars{i});
end
else
escapechars={'\a','\b','\f','\n','\r','\t','\v'};
for i=1:length(escapechars);
newstr=regexprep(newstr,escapechars{i},regexprep(escapechars{i},'\\','\\\\'));
end
end
|
github
|
zzlyw/machine-learning-exercises-master
|
loadjson.m
|
.m
|
machine-learning-exercises-master/machine-learning-ex1/ex1/lib/jsonlab/loadjson.m
| 18,732 |
ibm852
|
ab98cf173af2d50bbe8da4d6db252a20
|
function data = loadjson(fname,varargin)
%
% data=loadjson(fname,opt)
% or
% data=loadjson(fname,'param1',value1,'param2',value2,...)
%
% parse a JSON (JavaScript Object Notation) file or string
%
% authors:Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% created on 2011/09/09, including previous works from
%
% Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713
% created on 2009/11/02
% François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393
% created on 2009/03/22
% Joel Feenstra:
% http://www.mathworks.com/matlabcentral/fileexchange/20565
% created on 2008/07/03
%
% $Id: loadjson.m 460 2015-01-03 00:30:45Z fangq $
%
% input:
% fname: input file name, if fname contains "{}" or "[]", fname
% will be interpreted as a JSON string
% opt: a struct to store parsing options, opt can be replaced by
% a list of ('param',value) pairs - the param string is equivallent
% to a field in opt. opt can have the following
% fields (first in [.|.] is the default)
%
% opt.SimplifyCell [0|1]: if set to 1, loadjson will call cell2mat
% for each element of the JSON data, and group
% arrays based on the cell2mat rules.
% opt.FastArrayParser [1|0 or integer]: if set to 1, use a
% speed-optimized array parser when loading an
% array object. The fast array parser may
% collapse block arrays into a single large
% array similar to rules defined in cell2mat; 0 to
% use a legacy parser; if set to a larger-than-1
% value, this option will specify the minimum
% dimension to enable the fast array parser. For
% example, if the input is a 3D array, setting
% FastArrayParser to 1 will return a 3D array;
% setting to 2 will return a cell array of 2D
% arrays; setting to 3 will return to a 2D cell
% array of 1D vectors; setting to 4 will return a
% 3D cell array.
% opt.ShowProgress [0|1]: if set to 1, loadjson displays a progress bar.
%
% output:
% dat: a cell array, where {...} blocks are converted into cell arrays,
% and [...] are converted to arrays
%
% examples:
% dat=loadjson('{"obj":{"string":"value","array":[1,2,3]}}')
% dat=loadjson(['examples' filesep 'example1.json'])
% dat=loadjson(['examples' filesep 'example1.json'],'SimplifyCell',1)
%
% license:
% BSD, see LICENSE_BSD.txt files for details
%
% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
global pos inStr len esc index_esc len_esc isoct arraytoken
if(regexp(fname,'[\{\}\]\[]','once'))
string=fname;
elseif(exist(fname,'file'))
fid = fopen(fname,'rb');
string = fread(fid,inf,'uint8=>char')';
fclose(fid);
else
error('input file does not exist');
end
pos = 1; len = length(string); inStr = string;
isoct=exist('OCTAVE_VERSION','builtin');
arraytoken=find(inStr=='[' | inStr==']' | inStr=='"');
jstr=regexprep(inStr,'\\\\',' ');
escquote=regexp(jstr,'\\"');
arraytoken=sort([arraytoken escquote]);
% String delimiters and escape chars identified to improve speed:
esc = find(inStr=='"' | inStr=='\' ); % comparable to: regexp(inStr, '["\\]');
index_esc = 1; len_esc = length(esc);
opt=varargin2struct(varargin{:});
if(jsonopt('ShowProgress',0,opt)==1)
opt.progressbar_=waitbar(0,'loading ...');
end
jsoncount=1;
while pos <= len
switch(next_char)
case '{'
data{jsoncount} = parse_object(opt);
case '['
data{jsoncount} = parse_array(opt);
otherwise
error_pos('Outer level structure must be an object or an array');
end
jsoncount=jsoncount+1;
end % while
jsoncount=length(data);
if(jsoncount==1 && iscell(data))
data=data{1};
end
if(~isempty(data))
if(isstruct(data)) % data can be a struct array
data=jstruct2array(data);
elseif(iscell(data))
data=jcell2array(data);
end
end
if(isfield(opt,'progressbar_'))
close(opt.progressbar_);
end
%%
function newdata=jcell2array(data)
len=length(data);
newdata=data;
for i=1:len
if(isstruct(data{i}))
newdata{i}=jstruct2array(data{i});
elseif(iscell(data{i}))
newdata{i}=jcell2array(data{i});
end
end
%%-------------------------------------------------------------------------
function newdata=jstruct2array(data)
fn=fieldnames(data);
newdata=data;
len=length(data);
for i=1:length(fn) % depth-first
for j=1:len
if(isstruct(getfield(data(j),fn{i})))
newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));
end
end
end
if(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))
newdata=cell(len,1);
for j=1:len
ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);
iscpx=0;
if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))
if(data(j).x0x5F_ArrayIsComplex_)
iscpx=1;
end
end
if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))
if(data(j).x0x5F_ArrayIsSparse_)
if(~isempty(strmatch('x0x5F_ArraySize_',fn)))
dim=data(j).x0x5F_ArraySize_;
if(iscpx && size(ndata,2)==4-any(dim==1))
ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));
end
if isempty(ndata)
% All-zeros sparse
ndata=sparse(dim(1),prod(dim(2:end)));
elseif dim(1)==1
% Sparse row vector
ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));
elseif dim(2)==1
% Sparse column vector
ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));
else
% Generic sparse array.
ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));
end
else
if(iscpx && size(ndata,2)==4)
ndata(:,3)=complex(ndata(:,3),ndata(:,4));
end
ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));
end
end
elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))
if(iscpx && size(ndata,2)==2)
ndata=complex(ndata(:,1),ndata(:,2));
end
ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);
end
newdata{j}=ndata;
end
if(len==1)
newdata=newdata{1};
end
end
%%-------------------------------------------------------------------------
function object = parse_object(varargin)
parse_char('{');
object = [];
if next_char ~= '}'
while 1
str = parseStr(varargin{:});
if isempty(str)
error_pos('Name of value at position %d cannot be empty');
end
parse_char(':');
val = parse_value(varargin{:});
eval( sprintf( 'object.%s = val;', valid_field(str) ) );
if next_char == '}'
break;
end
parse_char(',');
end
end
parse_char('}');
%%-------------------------------------------------------------------------
function object = parse_array(varargin) % JSON array is written in row-major order
global pos inStr isoct
parse_char('[');
object = cell(0, 1);
dim2=[];
arraydepth=jsonopt('JSONLAB_ArrayDepth_',1,varargin{:});
pbar=jsonopt('progressbar_',-1,varargin{:});
if next_char ~= ']'
if(jsonopt('FastArrayParser',1,varargin{:})>=1 && arraydepth>=jsonopt('FastArrayParser',1,varargin{:}))
[endpos, e1l, e1r, maxlevel]=matching_bracket(inStr,pos);
arraystr=['[' inStr(pos:endpos)];
arraystr=regexprep(arraystr,'"_NaN_"','NaN');
arraystr=regexprep(arraystr,'"([-+]*)_Inf_"','$1Inf');
arraystr(arraystr==sprintf('\n'))=[];
arraystr(arraystr==sprintf('\r'))=[];
%arraystr=regexprep(arraystr,'\s*,',','); % this is slow,sometimes needed
if(~isempty(e1l) && ~isempty(e1r)) % the array is in 2D or higher D
astr=inStr((e1l+1):(e1r-1));
astr=regexprep(astr,'"_NaN_"','NaN');
astr=regexprep(astr,'"([-+]*)_Inf_"','$1Inf');
astr(astr==sprintf('\n'))=[];
astr(astr==sprintf('\r'))=[];
astr(astr==' ')='';
if(isempty(find(astr=='[', 1))) % array is 2D
dim2=length(sscanf(astr,'%f,',[1 inf]));
end
else % array is 1D
astr=arraystr(2:end-1);
astr(astr==' ')='';
[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',[1,inf]);
if(nextidx>=length(astr)-1)
object=obj;
pos=endpos;
parse_char(']');
return;
end
end
if(~isempty(dim2))
astr=arraystr;
astr(astr=='[')='';
astr(astr==']')='';
astr(astr==' ')='';
[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',inf);
if(nextidx>=length(astr)-1)
object=reshape(obj,dim2,numel(obj)/dim2)';
pos=endpos;
parse_char(']');
if(pbar>0)
waitbar(pos/length(inStr),pbar,'loading ...');
end
return;
end
end
arraystr=regexprep(arraystr,'\]\s*,','];');
else
arraystr='[';
end
try
if(isoct && regexp(arraystr,'"','once'))
error('Octave eval can produce empty cells for JSON-like input');
end
object=eval(arraystr);
pos=endpos;
catch
while 1
newopt=varargin2struct(varargin{:},'JSONLAB_ArrayDepth_',arraydepth+1);
val = parse_value(newopt);
object{end+1} = val;
if next_char == ']'
break;
end
parse_char(',');
end
end
end
if(jsonopt('SimplifyCell',0,varargin{:})==1)
try
oldobj=object;
object=cell2mat(object')';
if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)
object=oldobj;
elseif(size(object,1)>1 && ndims(object)==2)
object=object';
end
catch
end
end
parse_char(']');
if(pbar>0)
waitbar(pos/length(inStr),pbar,'loading ...');
end
%%-------------------------------------------------------------------------
function parse_char(c)
global pos inStr len
skip_whitespace;
if pos > len || inStr(pos) ~= c
error_pos(sprintf('Expected %c at position %%d', c));
else
pos = pos + 1;
skip_whitespace;
end
%%-------------------------------------------------------------------------
function c = next_char
global pos inStr len
skip_whitespace;
if pos > len
c = [];
else
c = inStr(pos);
end
%%-------------------------------------------------------------------------
function skip_whitespace
global pos inStr len
while pos <= len && isspace(inStr(pos))
pos = pos + 1;
end
%%-------------------------------------------------------------------------
function str = parseStr(varargin)
global pos inStr len esc index_esc len_esc
% len, ns = length(inStr), keyboard
if inStr(pos) ~= '"'
error_pos('String starting with " expected at position %d');
else
pos = pos + 1;
end
str = '';
while pos <= len
while index_esc <= len_esc && esc(index_esc) < pos
index_esc = index_esc + 1;
end
if index_esc > len_esc
str = [str inStr(pos:len)];
pos = len + 1;
break;
else
str = [str inStr(pos:esc(index_esc)-1)];
pos = esc(index_esc);
end
nstr = length(str); switch inStr(pos)
case '"'
pos = pos + 1;
if(~isempty(str))
if(strcmp(str,'_Inf_'))
str=Inf;
elseif(strcmp(str,'-_Inf_'))
str=-Inf;
elseif(strcmp(str,'_NaN_'))
str=NaN;
end
end
return;
case '\'
if pos+1 > len
error_pos('End of file reached right after escape character');
end
pos = pos + 1;
switch inStr(pos)
case {'"' '\' '/'}
str(nstr+1) = inStr(pos);
pos = pos + 1;
case {'b' 'f' 'n' 'r' 't'}
str(nstr+1) = sprintf(['\' inStr(pos)]);
pos = pos + 1;
case 'u'
if pos+4 > len
error_pos('End of file reached in escaped unicode character');
end
str(nstr+(1:6)) = inStr(pos-1:pos+4);
pos = pos + 5;
end
otherwise % should never happen
str(nstr+1) = inStr(pos), keyboard
pos = pos + 1;
end
end
error_pos('End of file while expecting end of inStr');
%%-------------------------------------------------------------------------
function num = parse_number(varargin)
global pos inStr len isoct
currstr=inStr(pos:end);
numstr=0;
if(isoct~=0)
numstr=regexp(currstr,'^\s*-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+\-]?\d+)?','end');
[num, one] = sscanf(currstr, '%f', 1);
delta=numstr+1;
else
[num, one, err, delta] = sscanf(currstr, '%f', 1);
if ~isempty(err)
error_pos('Error reading number at position %d');
end
end
pos = pos + delta-1;
%%-------------------------------------------------------------------------
function val = parse_value(varargin)
global pos inStr len
true = 1; false = 0;
pbar=jsonopt('progressbar_',-1,varargin{:});
if(pbar>0)
waitbar(pos/len,pbar,'loading ...');
end
switch(inStr(pos))
case '"'
val = parseStr(varargin{:});
return;
case '['
val = parse_array(varargin{:});
return;
case '{'
val = parse_object(varargin{:});
if isstruct(val)
if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))
val=jstruct2array(val);
end
elseif isempty(val)
val = struct;
end
return;
case {'-','0','1','2','3','4','5','6','7','8','9'}
val = parse_number(varargin{:});
return;
case 't'
if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'true')
val = true;
pos = pos + 4;
return;
end
case 'f'
if pos+4 <= len && strcmpi(inStr(pos:pos+4), 'false')
val = false;
pos = pos + 5;
return;
end
case 'n'
if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'null')
val = [];
pos = pos + 4;
return;
end
end
error_pos('Value expected at position %d');
%%-------------------------------------------------------------------------
function error_pos(msg)
global pos inStr len
poShow = max(min([pos-15 pos-1 pos pos+20],len),1);
if poShow(3) == poShow(2)
poShow(3:4) = poShow(2)+[0 -1]; % display nothing after
end
msg = [sprintf(msg, pos) ': ' ...
inStr(poShow(1):poShow(2)) '<error>' inStr(poShow(3):poShow(4)) ];
error( ['JSONparser:invalidFormat: ' msg] );
%%-------------------------------------------------------------------------
function str = valid_field(str)
global isoct
% From MATLAB doc: field names must begin with a letter, which may be
% followed by any combination of letters, digits, and underscores.
% Invalid characters will be converted to underscores, and the prefix
% "x0x[Hex code]_" will be added if the first character is not a letter.
pos=regexp(str,'^[^A-Za-z]','once');
if(~isempty(pos))
if(~isoct)
str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');
else
str=sprintf('x0x%X_%s',char(str(1)),str(2:end));
end
end
if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end
if(~isoct)
str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');
else
pos=regexp(str,'[^0-9A-Za-z_]');
if(isempty(pos)) return; end
str0=str;
pos0=[0 pos(:)' length(str)];
str='';
for i=1:length(pos)
str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];
end
if(pos(end)~=length(str))
str=[str str0(pos0(end-1)+1:pos0(end))];
end
end
%str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';
%%-------------------------------------------------------------------------
function endpos = matching_quote(str,pos)
len=length(str);
while(pos<len)
if(str(pos)=='"')
if(~(pos>1 && str(pos-1)=='\'))
endpos=pos;
return;
end
end
pos=pos+1;
end
error('unmatched quotation mark');
%%-------------------------------------------------------------------------
function [endpos, e1l, e1r, maxlevel] = matching_bracket(str,pos)
global arraytoken
level=1;
maxlevel=level;
endpos=0;
bpos=arraytoken(arraytoken>=pos);
tokens=str(bpos);
len=length(tokens);
pos=1;
e1l=[];
e1r=[];
while(pos<=len)
c=tokens(pos);
if(c==']')
level=level-1;
if(isempty(e1r)) e1r=bpos(pos); end
if(level==0)
endpos=bpos(pos);
return
end
end
if(c=='[')
if(isempty(e1l)) e1l=bpos(pos); end
level=level+1;
maxlevel=max(maxlevel,level);
end
if(c=='"')
pos=matching_quote(tokens,pos+1);
end
pos=pos+1;
end
if(endpos==0)
error('unmatched "]"');
end
|
github
|
zzlyw/machine-learning-exercises-master
|
loadubjson.m
|
.m
|
machine-learning-exercises-master/machine-learning-ex1/ex1/lib/jsonlab/loadubjson.m
| 15,574 |
utf_8
|
5974e78e71b81b1e0f76123784b951a4
|
function data = loadubjson(fname,varargin)
%
% data=loadubjson(fname,opt)
% or
% data=loadubjson(fname,'param1',value1,'param2',value2,...)
%
% parse a JSON (JavaScript Object Notation) file or string
%
% authors:Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% created on 2013/08/01
%
% $Id: loadubjson.m 460 2015-01-03 00:30:45Z fangq $
%
% input:
% fname: input file name, if fname contains "{}" or "[]", fname
% will be interpreted as a UBJSON string
% opt: a struct to store parsing options, opt can be replaced by
% a list of ('param',value) pairs - the param string is equivallent
% to a field in opt. opt can have the following
% fields (first in [.|.] is the default)
%
% opt.SimplifyCell [0|1]: if set to 1, loadubjson will call cell2mat
% for each element of the JSON data, and group
% arrays based on the cell2mat rules.
% opt.IntEndian [B|L]: specify the endianness of the integer fields
% in the UBJSON input data. B - Big-Endian format for
% integers (as required in the UBJSON specification);
% L - input integer fields are in Little-Endian order.
%
% output:
% dat: a cell array, where {...} blocks are converted into cell arrays,
% and [...] are converted to arrays
%
% examples:
% obj=struct('string','value','array',[1 2 3]);
% ubjdata=saveubjson('obj',obj);
% dat=loadubjson(ubjdata)
% dat=loadubjson(['examples' filesep 'example1.ubj'])
% dat=loadubjson(['examples' filesep 'example1.ubj'],'SimplifyCell',1)
%
% license:
% BSD, see LICENSE_BSD.txt files for details
%
% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
global pos inStr len esc index_esc len_esc isoct arraytoken fileendian systemendian
if(regexp(fname,'[\{\}\]\[]','once'))
string=fname;
elseif(exist(fname,'file'))
fid = fopen(fname,'rb');
string = fread(fid,inf,'uint8=>char')';
fclose(fid);
else
error('input file does not exist');
end
pos = 1; len = length(string); inStr = string;
isoct=exist('OCTAVE_VERSION','builtin');
arraytoken=find(inStr=='[' | inStr==']' | inStr=='"');
jstr=regexprep(inStr,'\\\\',' ');
escquote=regexp(jstr,'\\"');
arraytoken=sort([arraytoken escquote]);
% String delimiters and escape chars identified to improve speed:
esc = find(inStr=='"' | inStr=='\' ); % comparable to: regexp(inStr, '["\\]');
index_esc = 1; len_esc = length(esc);
opt=varargin2struct(varargin{:});
fileendian=upper(jsonopt('IntEndian','B',opt));
[os,maxelem,systemendian]=computer;
jsoncount=1;
while pos <= len
switch(next_char)
case '{'
data{jsoncount} = parse_object(opt);
case '['
data{jsoncount} = parse_array(opt);
otherwise
error_pos('Outer level structure must be an object or an array');
end
jsoncount=jsoncount+1;
end % while
jsoncount=length(data);
if(jsoncount==1 && iscell(data))
data=data{1};
end
if(~isempty(data))
if(isstruct(data)) % data can be a struct array
data=jstruct2array(data);
elseif(iscell(data))
data=jcell2array(data);
end
end
%%
function newdata=parse_collection(id,data,obj)
if(jsoncount>0 && exist('data','var'))
if(~iscell(data))
newdata=cell(1);
newdata{1}=data;
data=newdata;
end
end
%%
function newdata=jcell2array(data)
len=length(data);
newdata=data;
for i=1:len
if(isstruct(data{i}))
newdata{i}=jstruct2array(data{i});
elseif(iscell(data{i}))
newdata{i}=jcell2array(data{i});
end
end
%%-------------------------------------------------------------------------
function newdata=jstruct2array(data)
fn=fieldnames(data);
newdata=data;
len=length(data);
for i=1:length(fn) % depth-first
for j=1:len
if(isstruct(getfield(data(j),fn{i})))
newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i})));
end
end
end
if(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn)))
newdata=cell(len,1);
for j=1:len
ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_);
iscpx=0;
if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn)))
if(data(j).x0x5F_ArrayIsComplex_)
iscpx=1;
end
end
if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn)))
if(data(j).x0x5F_ArrayIsSparse_)
if(~isempty(strmatch('x0x5F_ArraySize_',fn)))
dim=double(data(j).x0x5F_ArraySize_);
if(iscpx && size(ndata,2)==4-any(dim==1))
ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end));
end
if isempty(ndata)
% All-zeros sparse
ndata=sparse(dim(1),prod(dim(2:end)));
elseif dim(1)==1
% Sparse row vector
ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end)));
elseif dim(2)==1
% Sparse column vector
ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end)));
else
% Generic sparse array.
ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end)));
end
else
if(iscpx && size(ndata,2)==4)
ndata(:,3)=complex(ndata(:,3),ndata(:,4));
end
ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3));
end
end
elseif(~isempty(strmatch('x0x5F_ArraySize_',fn)))
if(iscpx && size(ndata,2)==2)
ndata=complex(ndata(:,1),ndata(:,2));
end
ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_);
end
newdata{j}=ndata;
end
if(len==1)
newdata=newdata{1};
end
end
%%-------------------------------------------------------------------------
function object = parse_object(varargin)
parse_char('{');
object = [];
type='';
count=-1;
if(next_char == '$')
type=inStr(pos+1); % TODO
pos=pos+2;
end
if(next_char == '#')
pos=pos+1;
count=double(parse_number());
end
if next_char ~= '}'
num=0;
while 1
str = parseStr(varargin{:});
if isempty(str)
error_pos('Name of value at position %d cannot be empty');
end
%parse_char(':');
val = parse_value(varargin{:});
num=num+1;
eval( sprintf( 'object.%s = val;', valid_field(str) ) );
if next_char == '}' || (count>=0 && num>=count)
break;
end
%parse_char(',');
end
end
if(count==-1)
parse_char('}');
end
%%-------------------------------------------------------------------------
function [cid,len]=elem_info(type)
id=strfind('iUIlLdD',type);
dataclass={'int8','uint8','int16','int32','int64','single','double'};
bytelen=[1,1,2,4,8,4,8];
if(id>0)
cid=dataclass{id};
len=bytelen(id);
else
error_pos('unsupported type at position %d');
end
%%-------------------------------------------------------------------------
function [data adv]=parse_block(type,count,varargin)
global pos inStr isoct fileendian systemendian
[cid,len]=elem_info(type);
datastr=inStr(pos:pos+len*count-1);
if(isoct)
newdata=int8(datastr);
else
newdata=uint8(datastr);
end
id=strfind('iUIlLdD',type);
if(id<=5 && fileendian~=systemendian)
newdata=swapbytes(typecast(newdata,cid));
end
data=typecast(newdata,cid);
adv=double(len*count);
%%-------------------------------------------------------------------------
function object = parse_array(varargin) % JSON array is written in row-major order
global pos inStr isoct
parse_char('[');
object = cell(0, 1);
dim=[];
type='';
count=-1;
if(next_char == '$')
type=inStr(pos+1);
pos=pos+2;
end
if(next_char == '#')
pos=pos+1;
if(next_char=='[')
dim=parse_array(varargin{:});
count=prod(double(dim));
else
count=double(parse_number());
end
end
if(~isempty(type))
if(count>=0)
[object adv]=parse_block(type,count,varargin{:});
if(~isempty(dim))
object=reshape(object,dim);
end
pos=pos+adv;
return;
else
endpos=matching_bracket(inStr,pos);
[cid,len]=elem_info(type);
count=(endpos-pos)/len;
[object adv]=parse_block(type,count,varargin{:});
pos=pos+adv;
parse_char(']');
return;
end
end
if next_char ~= ']'
while 1
val = parse_value(varargin{:});
object{end+1} = val;
if next_char == ']'
break;
end
%parse_char(',');
end
end
if(jsonopt('SimplifyCell',0,varargin{:})==1)
try
oldobj=object;
object=cell2mat(object')';
if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)
object=oldobj;
elseif(size(object,1)>1 && ndims(object)==2)
object=object';
end
catch
end
end
if(count==-1)
parse_char(']');
end
%%-------------------------------------------------------------------------
function parse_char(c)
global pos inStr len
skip_whitespace;
if pos > len || inStr(pos) ~= c
error_pos(sprintf('Expected %c at position %%d', c));
else
pos = pos + 1;
skip_whitespace;
end
%%-------------------------------------------------------------------------
function c = next_char
global pos inStr len
skip_whitespace;
if pos > len
c = [];
else
c = inStr(pos);
end
%%-------------------------------------------------------------------------
function skip_whitespace
global pos inStr len
while pos <= len && isspace(inStr(pos))
pos = pos + 1;
end
%%-------------------------------------------------------------------------
function str = parseStr(varargin)
global pos inStr esc index_esc len_esc
% len, ns = length(inStr), keyboard
type=inStr(pos);
if type ~= 'S' && type ~= 'C' && type ~= 'H'
error_pos('String starting with S expected at position %d');
else
pos = pos + 1;
end
if(type == 'C')
str=inStr(pos);
pos=pos+1;
return;
end
bytelen=double(parse_number());
if(length(inStr)>=pos+bytelen-1)
str=inStr(pos:pos+bytelen-1);
pos=pos+bytelen;
else
error_pos('End of file while expecting end of inStr');
end
%%-------------------------------------------------------------------------
function num = parse_number(varargin)
global pos inStr len isoct fileendian systemendian
id=strfind('iUIlLdD',inStr(pos));
if(isempty(id))
error_pos('expecting a number at position %d');
end
type={'int8','uint8','int16','int32','int64','single','double'};
bytelen=[1,1,2,4,8,4,8];
datastr=inStr(pos+1:pos+bytelen(id));
if(isoct)
newdata=int8(datastr);
else
newdata=uint8(datastr);
end
if(id<=5 && fileendian~=systemendian)
newdata=swapbytes(typecast(newdata,type{id}));
end
num=typecast(newdata,type{id});
pos = pos + bytelen(id)+1;
%%-------------------------------------------------------------------------
function val = parse_value(varargin)
global pos inStr len
true = 1; false = 0;
switch(inStr(pos))
case {'S','C','H'}
val = parseStr(varargin{:});
return;
case '['
val = parse_array(varargin{:});
return;
case '{'
val = parse_object(varargin{:});
if isstruct(val)
if(~isempty(strmatch('x0x5F_ArrayType_',fieldnames(val), 'exact')))
val=jstruct2array(val);
end
elseif isempty(val)
val = struct;
end
return;
case {'i','U','I','l','L','d','D'}
val = parse_number(varargin{:});
return;
case 'T'
val = true;
pos = pos + 1;
return;
case 'F'
val = false;
pos = pos + 1;
return;
case {'Z','N'}
val = [];
pos = pos + 1;
return;
end
error_pos('Value expected at position %d');
%%-------------------------------------------------------------------------
function error_pos(msg)
global pos inStr len
poShow = max(min([pos-15 pos-1 pos pos+20],len),1);
if poShow(3) == poShow(2)
poShow(3:4) = poShow(2)+[0 -1]; % display nothing after
end
msg = [sprintf(msg, pos) ': ' ...
inStr(poShow(1):poShow(2)) '<error>' inStr(poShow(3):poShow(4)) ];
error( ['JSONparser:invalidFormat: ' msg] );
%%-------------------------------------------------------------------------
function str = valid_field(str)
global isoct
% From MATLAB doc: field names must begin with a letter, which may be
% followed by any combination of letters, digits, and underscores.
% Invalid characters will be converted to underscores, and the prefix
% "x0x[Hex code]_" will be added if the first character is not a letter.
pos=regexp(str,'^[^A-Za-z]','once');
if(~isempty(pos))
if(~isoct)
str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');
else
str=sprintf('x0x%X_%s',char(str(1)),str(2:end));
end
end
if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end
if(~isoct)
str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');
else
pos=regexp(str,'[^0-9A-Za-z_]');
if(isempty(pos)) return; end
str0=str;
pos0=[0 pos(:)' length(str)];
str='';
for i=1:length(pos)
str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];
end
if(pos(end)~=length(str))
str=[str str0(pos0(end-1)+1:pos0(end))];
end
end
%str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';
%%-------------------------------------------------------------------------
function endpos = matching_quote(str,pos)
len=length(str);
while(pos<len)
if(str(pos)=='"')
if(~(pos>1 && str(pos-1)=='\'))
endpos=pos;
return;
end
end
pos=pos+1;
end
error('unmatched quotation mark');
%%-------------------------------------------------------------------------
function [endpos e1l e1r maxlevel] = matching_bracket(str,pos)
global arraytoken
level=1;
maxlevel=level;
endpos=0;
bpos=arraytoken(arraytoken>=pos);
tokens=str(bpos);
len=length(tokens);
pos=1;
e1l=[];
e1r=[];
while(pos<=len)
c=tokens(pos);
if(c==']')
level=level-1;
if(isempty(e1r)) e1r=bpos(pos); end
if(level==0)
endpos=bpos(pos);
return
end
end
if(c=='[')
if(isempty(e1l)) e1l=bpos(pos); end
level=level+1;
maxlevel=max(maxlevel,level);
end
if(c=='"')
pos=matching_quote(tokens,pos+1);
end
pos=pos+1;
end
if(endpos==0)
error('unmatched "]"');
end
|
github
|
zzlyw/machine-learning-exercises-master
|
saveubjson.m
|
.m
|
machine-learning-exercises-master/machine-learning-ex1/ex1/lib/jsonlab/saveubjson.m
| 16,123 |
utf_8
|
61d4f51010aedbf97753396f5d2d9ec0
|
function json=saveubjson(rootname,obj,varargin)
%
% json=saveubjson(rootname,obj,filename)
% or
% json=saveubjson(rootname,obj,opt)
% json=saveubjson(rootname,obj,'param1',value1,'param2',value2,...)
%
% convert a MATLAB object (cell, struct or array) into a Universal
% Binary JSON (UBJSON) binary string
%
% author: Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% created on 2013/08/17
%
% $Id: saveubjson.m 460 2015-01-03 00:30:45Z fangq $
%
% input:
% rootname: the name of the root-object, when set to '', the root name
% is ignored, however, when opt.ForceRootName is set to 1 (see below),
% the MATLAB variable name will be used as the root name.
% obj: a MATLAB object (array, cell, cell array, struct, struct array)
% filename: a string for the file name to save the output UBJSON data
% opt: a struct for additional options, ignore to use default values.
% opt can have the following fields (first in [.|.] is the default)
%
% opt.FileName [''|string]: a file name to save the output JSON data
% opt.ArrayToStruct[0|1]: when set to 0, saveubjson outputs 1D/2D
% array in JSON array format; if sets to 1, an
% array will be shown as a struct with fields
% "_ArrayType_", "_ArraySize_" and "_ArrayData_"; for
% sparse arrays, the non-zero elements will be
% saved to _ArrayData_ field in triplet-format i.e.
% (ix,iy,val) and "_ArrayIsSparse_" will be added
% with a value of 1; for a complex array, the
% _ArrayData_ array will include two columns
% (4 for sparse) to record the real and imaginary
% parts, and also "_ArrayIsComplex_":1 is added.
% opt.ParseLogical [1|0]: if this is set to 1, logical array elem
% will use true/false rather than 1/0.
% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single
% numerical element will be shown without a square
% bracket, unless it is the root object; if 0, square
% brackets are forced for any numerical arrays.
% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, saveubjson
% will use the name of the passed obj variable as the
% root object name; if obj is an expression and
% does not have a name, 'root' will be used; if this
% is set to 0 and rootname is empty, the root level
% will be merged down to the lower level.
% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),
% for example, if opt.JSON='foo', the JSON data is
% wrapped inside a function call as 'foo(...);'
% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson
% back to the string form
%
% opt can be replaced by a list of ('param',value) pairs. The param
% string is equivallent to a field in opt and is case sensitive.
% output:
% json: a binary string in the UBJSON format (see http://ubjson.org)
%
% examples:
% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],...
% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...
% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...
% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...
% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...
% 'SpecialData',[nan, inf, -inf]);
% saveubjson('jsonmesh',jsonmesh)
% saveubjson('jsonmesh',jsonmesh,'meshdata.ubj')
%
% license:
% BSD, see LICENSE_BSD.txt files for details
%
% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
if(nargin==1)
varname=inputname(1);
obj=rootname;
if(isempty(varname))
varname='root';
end
rootname=varname;
else
varname=inputname(2);
end
if(length(varargin)==1 && ischar(varargin{1}))
opt=struct('FileName',varargin{1});
else
opt=varargin2struct(varargin{:});
end
opt.IsOctave=exist('OCTAVE_VERSION','builtin');
rootisarray=0;
rootlevel=1;
forceroot=jsonopt('ForceRootName',0,opt);
if((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)
rootisarray=1;
rootlevel=0;
else
if(isempty(rootname))
rootname=varname;
end
end
if((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)
rootname='root';
end
json=obj2ubjson(rootname,obj,rootlevel,opt);
if(~rootisarray)
json=['{' json '}'];
end
jsonp=jsonopt('JSONP','',opt);
if(~isempty(jsonp))
json=[jsonp '(' json ')'];
end
% save to a file if FileName is set, suggested by Patrick Rapin
if(~isempty(jsonopt('FileName','',opt)))
fid = fopen(opt.FileName, 'wb');
fwrite(fid,json);
fclose(fid);
end
%%-------------------------------------------------------------------------
function txt=obj2ubjson(name,item,level,varargin)
if(iscell(item))
txt=cell2ubjson(name,item,level,varargin{:});
elseif(isstruct(item))
txt=struct2ubjson(name,item,level,varargin{:});
elseif(ischar(item))
txt=str2ubjson(name,item,level,varargin{:});
else
txt=mat2ubjson(name,item,level,varargin{:});
end
%%-------------------------------------------------------------------------
function txt=cell2ubjson(name,item,level,varargin)
txt='';
if(~iscell(item))
error('input is not a cell');
end
dim=size(item);
if(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now
item=reshape(item,dim(1),numel(item)/dim(1));
dim=size(item);
end
len=numel(item); % let's handle 1D cell first
if(len>1)
if(~isempty(name))
txt=[S_(checkname(name,varargin{:})) '[']; name='';
else
txt='[';
end
elseif(len==0)
if(~isempty(name))
txt=[S_(checkname(name,varargin{:})) 'Z']; name='';
else
txt='Z';
end
end
for j=1:dim(2)
if(dim(1)>1) txt=[txt '[']; end
for i=1:dim(1)
txt=[txt obj2ubjson(name,item{i,j},level+(len>1),varargin{:})];
end
if(dim(1)>1) txt=[txt ']']; end
end
if(len>1) txt=[txt ']']; end
%%-------------------------------------------------------------------------
function txt=struct2ubjson(name,item,level,varargin)
txt='';
if(~isstruct(item))
error('input is not a struct');
end
dim=size(item);
if(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now
item=reshape(item,dim(1),numel(item)/dim(1));
dim=size(item);
end
len=numel(item);
if(~isempty(name))
if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end
else
if(len>1) txt='['; end
end
for j=1:dim(2)
if(dim(1)>1) txt=[txt '[']; end
for i=1:dim(1)
names = fieldnames(item(i,j));
if(~isempty(name) && len==1)
txt=[txt S_(checkname(name,varargin{:})) '{'];
else
txt=[txt '{'];
end
if(~isempty(names))
for e=1:length(names)
txt=[txt obj2ubjson(names{e},getfield(item(i,j),...
names{e}),level+(dim(1)>1)+1+(len>1),varargin{:})];
end
end
txt=[txt '}'];
end
if(dim(1)>1) txt=[txt ']']; end
end
if(len>1) txt=[txt ']']; end
%%-------------------------------------------------------------------------
function txt=str2ubjson(name,item,level,varargin)
txt='';
if(~ischar(item))
error('input is not a string');
end
item=reshape(item, max(size(item),[1 0]));
len=size(item,1);
if(~isempty(name))
if(len>1) txt=[S_(checkname(name,varargin{:})) '[']; end
else
if(len>1) txt='['; end
end
isoct=jsonopt('IsOctave',0,varargin{:});
for e=1:len
val=item(e,:);
if(len==1)
obj=['' S_(checkname(name,varargin{:})) '' '',S_(val),''];
if(isempty(name)) obj=['',S_(val),'']; end
txt=[txt,'',obj];
else
txt=[txt,'',['',S_(val),'']];
end
end
if(len>1) txt=[txt ']']; end
%%-------------------------------------------------------------------------
function txt=mat2ubjson(name,item,level,varargin)
if(~isnumeric(item) && ~islogical(item))
error('input is not an array');
end
if(length(size(item))>2 || issparse(item) || ~isreal(item) || ...
isempty(item) || jsonopt('ArrayToStruct',0,varargin{:}))
cid=I_(uint32(max(size(item))));
if(isempty(name))
txt=['{' S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1)) ];
else
if(isempty(item))
txt=[S_(checkname(name,varargin{:})),'Z'];
return;
else
txt=[S_(checkname(name,varargin{:})),'{',S_('_ArrayType_'),S_(class(item)),S_('_ArraySize_'),I_a(size(item),cid(1))];
end
end
else
if(isempty(name))
txt=matdata2ubjson(item,level+1,varargin{:});
else
if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)
numtxt=regexprep(regexprep(matdata2ubjson(item,level+1,varargin{:}),'^\[',''),']','');
txt=[S_(checkname(name,varargin{:})) numtxt];
else
txt=[S_(checkname(name,varargin{:})),matdata2ubjson(item,level+1,varargin{:})];
end
end
return;
end
if(issparse(item))
[ix,iy]=find(item);
data=full(item(find(item)));
if(~isreal(item))
data=[real(data(:)),imag(data(:))];
if(size(item,1)==1)
% Kludge to have data's 'transposedness' match item's.
% (Necessary for complex row vector handling below.)
data=data';
end
txt=[txt,S_('_ArrayIsComplex_'),'T'];
end
txt=[txt,S_('_ArrayIsSparse_'),'T'];
if(size(item,1)==1)
% Row vector, store only column indices.
txt=[txt,S_('_ArrayData_'),...
matdata2ubjson([iy(:),data'],level+2,varargin{:})];
elseif(size(item,2)==1)
% Column vector, store only row indices.
txt=[txt,S_('_ArrayData_'),...
matdata2ubjson([ix,data],level+2,varargin{:})];
else
% General case, store row and column indices.
txt=[txt,S_('_ArrayData_'),...
matdata2ubjson([ix,iy,data],level+2,varargin{:})];
end
else
if(isreal(item))
txt=[txt,S_('_ArrayData_'),...
matdata2ubjson(item(:)',level+2,varargin{:})];
else
txt=[txt,S_('_ArrayIsComplex_'),'T'];
txt=[txt,S_('_ArrayData_'),...
matdata2ubjson([real(item(:)) imag(item(:))],level+2,varargin{:})];
end
end
txt=[txt,'}'];
%%-------------------------------------------------------------------------
function txt=matdata2ubjson(mat,level,varargin)
if(isempty(mat))
txt='Z';
return;
end
if(size(mat,1)==1)
level=level-1;
end
type='';
hasnegtive=(mat<0);
if(isa(mat,'integer') || isinteger(mat) || (isfloat(mat) && all(mod(mat(:),1) == 0)))
if(isempty(hasnegtive))
if(max(mat(:))<=2^8)
type='U';
end
end
if(isempty(type))
% todo - need to consider negative ones separately
id= histc(abs(max(mat(:))),[0 2^7 2^15 2^31 2^63]);
if(isempty(find(id)))
error('high-precision data is not yet supported');
end
key='iIlL';
type=key(find(id));
end
txt=[I_a(mat(:),type,size(mat))];
elseif(islogical(mat))
logicalval='FT';
if(numel(mat)==1)
txt=logicalval(mat+1);
else
txt=['[$U#' I_a(size(mat),'l') typecast(swapbytes(uint8(mat(:)')),'uint8')];
end
else
if(numel(mat)==1)
txt=['[' D_(mat) ']'];
else
txt=D_a(mat(:),'D',size(mat));
end
end
%txt=regexprep(mat2str(mat),'\s+',',');
%txt=regexprep(txt,';',sprintf('],['));
% if(nargin>=2 && size(mat,1)>1)
% txt=regexprep(txt,'\[',[repmat(sprintf('\t'),1,level) '[']);
% end
if(any(isinf(mat(:))))
txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','"$1_Inf_"',varargin{:}));
end
if(any(isnan(mat(:))))
txt=regexprep(txt,'NaN',jsonopt('NaN','"_NaN_"',varargin{:}));
end
%%-------------------------------------------------------------------------
function newname=checkname(name,varargin)
isunpack=jsonopt('UnpackHex',1,varargin{:});
newname=name;
if(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))
return
end
if(isunpack)
isoct=jsonopt('IsOctave',0,varargin{:});
if(~isoct)
newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');
else
pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');
pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');
if(isempty(pos)) return; end
str0=name;
pos0=[0 pend(:)' length(name)];
newname='';
for i=1:length(pos)
newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];
end
if(pos(end)~=length(name))
newname=[newname str0(pos0(end-1)+1:pos0(end))];
end
end
end
%%-------------------------------------------------------------------------
function val=S_(str)
if(length(str)==1)
val=['C' str];
else
val=['S' I_(int32(length(str))) str];
end
%%-------------------------------------------------------------------------
function val=I_(num)
if(~isinteger(num))
error('input is not an integer');
end
if(num>=0 && num<255)
val=['U' data2byte(swapbytes(cast(num,'uint8')),'uint8')];
return;
end
key='iIlL';
cid={'int8','int16','int32','int64'};
for i=1:4
if((num>0 && num<2^(i*8-1)) || (num<0 && num>=-2^(i*8-1)))
val=[key(i) data2byte(swapbytes(cast(num,cid{i})),'uint8')];
return;
end
end
error('unsupported integer');
%%-------------------------------------------------------------------------
function val=D_(num)
if(~isfloat(num))
error('input is not a float');
end
if(isa(num,'single'))
val=['d' data2byte(num,'uint8')];
else
val=['D' data2byte(num,'uint8')];
end
%%-------------------------------------------------------------------------
function data=I_a(num,type,dim,format)
id=find(ismember('iUIlL',type));
if(id==0)
error('unsupported integer array');
end
% based on UBJSON specs, all integer types are stored in big endian format
if(id==1)
data=data2byte(swapbytes(int8(num)),'uint8');
blen=1;
elseif(id==2)
data=data2byte(swapbytes(uint8(num)),'uint8');
blen=1;
elseif(id==3)
data=data2byte(swapbytes(int16(num)),'uint8');
blen=2;
elseif(id==4)
data=data2byte(swapbytes(int32(num)),'uint8');
blen=4;
elseif(id==5)
data=data2byte(swapbytes(int64(num)),'uint8');
blen=8;
end
if(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))
format='opt';
end
if((nargin<4 || strcmp(format,'opt')) && numel(num)>1)
if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))
cid=I_(uint32(max(dim)));
data=['$' type '#' I_a(dim,cid(1)) data(:)'];
else
data=['$' type '#' I_(int32(numel(data)/blen)) data(:)'];
end
data=['[' data(:)'];
else
data=reshape(data,blen,numel(data)/blen);
data(2:blen+1,:)=data;
data(1,:)=type;
data=data(:)';
data=['[' data(:)' ']'];
end
%%-------------------------------------------------------------------------
function data=D_a(num,type,dim,format)
id=find(ismember('dD',type));
if(id==0)
error('unsupported float array');
end
if(id==1)
data=data2byte(single(num),'uint8');
elseif(id==2)
data=data2byte(double(num),'uint8');
end
if(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))
format='opt';
end
if((nargin<4 || strcmp(format,'opt')) && numel(num)>1)
if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))
cid=I_(uint32(max(dim)));
data=['$' type '#' I_a(dim,cid(1)) data(:)'];
else
data=['$' type '#' I_(int32(numel(data)/(id*4))) data(:)'];
end
data=['[' data];
else
data=reshape(data,(id*4),length(data)/(id*4));
data(2:(id*4+1),:)=data;
data(1,:)=type;
data=data(:)';
data=['[' data(:)' ']'];
end
%%-------------------------------------------------------------------------
function bytes=data2byte(varargin)
bytes=typecast(varargin{:});
bytes=bytes(:)';
|
github
|
mcubelab/push-est-public-master
|
savejson.m
|
.m
|
push-est-public-master/catkin_ws/src/pnpush_planning/src/analyze/matlab/Json/fsroot/jsonlab/savejson.m
| 17,893 |
utf_8
|
e6ce3747006d07076995e00a8b14623a
|
function json=savejson(rootname,obj,varargin)
%
% json=savejson(rootname,obj,filename)
% or
% json=savejson(rootname,obj,opt)
% json=savejson(rootname,obj,'param1',value1,'param2',value2,...)
%
% convert a MATLAB object (cell, struct or array) into a JSON (JavaScript
% Object Notation) string
%
% author: Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% created on 2011/09/09
%
% $Id: savejson.m 492 2015-06-05 20:52:02Z fangq $
%
% input:
% rootname: the name of the root-object, when set to '', the root name
% is ignored, however, when opt.ForceRootName is set to 1 (see below),
% the MATLAB variable name will be used as the root name.
% obj: a MATLAB object (array, cell, cell array, struct, struct array).
% filename: a string for the file name to save the output JSON data.
% opt: a struct for additional options, ignore to use default values.
% opt can have the following fields (first in [.|.] is the default)
%
% opt.FileName [''|string]: a file name to save the output JSON data
% opt.FloatFormat ['%.10g'|string]: format to show each numeric element
% of a 1D/2D array;
% opt.ArrayIndent [1|0]: if 1, output explicit data array with
% precedent indentation; if 0, no indentation
% opt.ArrayToStruct[0|1]: when set to 0, savejson outputs 1D/2D
% array in JSON array format; if sets to 1, an
% array will be shown as a struct with fields
% "_ArrayType_", "_ArraySize_" and "_ArrayData_"; for
% sparse arrays, the non-zero elements will be
% saved to _ArrayData_ field in triplet-format i.e.
% (ix,iy,val) and "_ArrayIsSparse_" will be added
% with a value of 1; for a complex array, the
% _ArrayData_ array will include two columns
% (4 for sparse) to record the real and imaginary
% parts, and also "_ArrayIsComplex_":1 is added.
% opt.ParseLogical [0|1]: if this is set to 1, logical array elem
% will use true/false rather than 1/0.
% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single
% numerical element will be shown without a square
% bracket, unless it is the root object; if 0, square
% brackets are forced for any numerical arrays.
% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, savejson
% will use the name of the passed obj variable as the
% root object name; if obj is an expression and
% does not have a name, 'root' will be used; if this
% is set to 0 and rootname is empty, the root level
% will be merged down to the lower level.
% opt.Inf ['"$1_Inf_"'|string]: a customized regular expression pattern
% to represent +/-Inf. The matched pattern is '([-+]*)Inf'
% and $1 represents the sign. For those who want to use
% 1e999 to represent Inf, they can set opt.Inf to '$11e999'
% opt.NaN ['"_NaN_"'|string]: a customized regular expression pattern
% to represent NaN
% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),
% for example, if opt.JSONP='foo', the JSON data is
% wrapped inside a function call as 'foo(...);'
% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson
% back to the string form
% opt.SaveBinary [0|1]: 1 - save the JSON file in binary mode; 0 - text mode.
% opt.Compact [0|1]: 1- out compact JSON format (remove all newlines and tabs)
%
% opt can be replaced by a list of ('param',value) pairs. The param
% string is equivallent to a field in opt and is case sensitive.
% output:
% json: a string in the JSON format (see http://json.org)
%
% examples:
% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],...
% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...
% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...
% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...
% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...
% 'SpecialData',[nan, inf, -inf]);
% savejson('jmesh',jsonmesh)
% savejson('',jsonmesh,'ArrayIndent',0,'FloatFormat','\t%.5g')
%
% license:
% BSD License, see LICENSE_BSD.txt files for details
%
% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
if(nargin==1)
varname=inputname(1);
obj=rootname;
if(isempty(varname))
varname='root';
end
rootname=varname;
else
varname=inputname(2);
end
if(length(varargin)==1 && ischar(varargin{1}))
opt=struct('FileName',varargin{1});
else
opt=varargin2struct(varargin{:});
end
opt.IsOctave=exist('OCTAVE_VERSION','builtin');
rootisarray=0;
rootlevel=1;
forceroot=jsonopt('ForceRootName',0,opt);
if((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)
rootisarray=1;
rootlevel=0;
else
if(isempty(rootname))
rootname=varname;
end
end
if((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)
rootname='root';
end
whitespaces=struct('tab',sprintf('\t'),'newline',sprintf('\n'),'sep',sprintf(',\n'));
if(jsonopt('Compact',0,opt)==1)
whitespaces=struct('tab','','newline','','sep',',');
end
if(~isfield(opt,'whitespaces_'))
opt.whitespaces_=whitespaces;
end
nl=whitespaces.newline;
json=obj2json(rootname,obj,rootlevel,opt);
if(rootisarray)
json=sprintf('%s%s',json,nl);
else
json=sprintf('{%s%s%s}\n',nl,json,nl);
end
jsonp=jsonopt('JSONP','',opt);
if(~isempty(jsonp))
json=sprintf('%s(%s);%s',jsonp,json,nl);
end
% save to a file if FileName is set, suggested by Patrick Rapin
if(~isempty(jsonopt('FileName','',opt)))
if(jsonopt('SaveBinary',0,opt)==1)
fid = fopen(opt.FileName, 'wb');
fwrite(fid,json);
else
fid = fopen(opt.FileName, 'wt');
fwrite(fid,json,'char');
end
fclose(fid);
end
%%-------------------------------------------------------------------------
function txt=obj2json(name,item,level,varargin)
if(iscell(item))
txt=cell2json(name,item,level,varargin{:});
elseif(isstruct(item))
txt=struct2json(name,item,level,varargin{:});
elseif(ischar(item))
txt=str2json(name,item,level,varargin{:});
else
txt=mat2json(name,item,level,varargin{:});
end
%%-------------------------------------------------------------------------
function txt=cell2json(name,item,level,varargin)
txt='';
if(~iscell(item))
error('input is not a cell');
end
dim=size(item);
if(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now
item=reshape(item,dim(1),numel(item)/dim(1));
dim=size(item);
end
len=numel(item);
ws=jsonopt('whitespaces_',struct('tab',sprintf('\t'),'newline',sprintf('\n'),'sep',sprintf(',\n')),varargin{:});
padding0=repmat(ws.tab,1,level);
padding2=repmat(ws.tab,1,level+1);
nl=ws.newline;
if(len>1)
if(~isempty(name))
txt=sprintf('%s"%s": [%s',padding0, checkname(name,varargin{:}),nl); name='';
else
txt=sprintf('%s[%s',padding0,nl);
end
elseif(len==0)
if(~isempty(name))
txt=sprintf('%s"%s": []',padding0, checkname(name,varargin{:})); name='';
else
txt=sprintf('%s[]',padding0);
end
end
for j=1:dim(2)
if(dim(1)>1)
txt=sprintf('%s%s[%s',txt,padding2,nl);
end
for i=1:dim(1)
txt=sprintf('%s%s',txt,obj2json(name,item{i,j},level+(dim(1)>1)+(len>1),varargin{:}));
if(i<dim(1))
txt=sprintf('%s%s',txt,sprintf(',%s',nl));
end
end
if(dim(1)>1)
txt=sprintf('%s%s%s]',txt,nl,padding2);
end
if(j<dim(2))
txt=sprintf('%s%s',txt,sprintf(',%s',nl));
end
%if(j==dim(2)) txt=sprintf('%s%s',txt,sprintf(',%s',nl)); end
end
if(len>1)
txt=sprintf('%s%s%s]',txt,nl,padding0);
end
%%-------------------------------------------------------------------------
function txt=struct2json(name,item,level,varargin)
txt='';
if(~isstruct(item))
error('input is not a struct');
end
dim=size(item);
if(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now
item=reshape(item,dim(1),numel(item)/dim(1));
dim=size(item);
end
len=numel(item);
forcearray= (len>1 || (jsonopt('NoRowBracket',1,varargin{:})==0 && level>0));
ws=struct('tab',sprintf('\t'),'newline',sprintf('\n'));
ws=jsonopt('whitespaces_',ws,varargin{:});
padding0=repmat(ws.tab,1,level);
padding2=repmat(ws.tab,1,level+1);
padding1=repmat(ws.tab,1,level+(dim(1)>1)+forcearray);
nl=ws.newline;
if(isempty(item))
if(~isempty(name))
txt=sprintf('%s"%s": []',padding0,checkname(name,varargin{:}));
else
txt=sprintf('%s[]',padding0);
end
return;
end
if(~isempty(name))
if(forcearray)
txt=sprintf('%s"%s": [%s',padding0,checkname(name,varargin{:}),nl);
end
else
if(forcearray)
txt=sprintf('%s[%s',padding0,nl);
end
end
for j=1:dim(2)
if(dim(1)>1)
txt=sprintf('%s%s[%s',txt,padding2,nl);
end
for i=1:dim(1)
names = fieldnames(item(i,j));
if(~isempty(name) && len==1 && ~forcearray)
txt=sprintf('%s%s"%s": {%s',txt,padding1, checkname(name,varargin{:}),nl);
else
txt=sprintf('%s%s{%s',txt,padding1,nl);
end
if(~isempty(names))
for e=1:length(names)
txt=sprintf('%s%s',txt,obj2json(names{e},item(i,j).(names{e}),...
level+(dim(1)>1)+1+forcearray,varargin{:}));
if(e<length(names))
txt=sprintf('%s%s',txt,',');
end
txt=sprintf('%s%s',txt,nl);
end
end
txt=sprintf('%s%s}',txt,padding1);
if(i<dim(1))
txt=sprintf('%s%s',txt,sprintf(',%s',nl));
end
end
if(dim(1)>1)
txt=sprintf('%s%s%s]',txt,nl,padding2);
end
if(j<dim(2))
txt=sprintf('%s%s',txt,sprintf(',%s',nl));
end
end
if(forcearray)
txt=sprintf('%s%s%s]',txt,nl,padding0);
end
%%-------------------------------------------------------------------------
function txt=str2json(name,item,level,varargin)
txt='';
if(~ischar(item))
error('input is not a string');
end
item=reshape(item, max(size(item),[1 0]));
len=size(item,1);
ws=struct('tab',sprintf('\t'),'newline',sprintf('\n'),'sep',sprintf(',\n'));
ws=jsonopt('whitespaces_',ws,varargin{:});
padding1=repmat(ws.tab,1,level);
padding0=repmat(ws.tab,1,level+1);
nl=ws.newline;
sep=ws.sep;
if(~isempty(name))
if(len>1)
txt=sprintf('%s"%s": [%s',padding1,checkname(name,varargin{:}),nl);
end
else
if(len>1)
txt=sprintf('%s[%s',padding1,nl);
end
end
for e=1:len
val=escapejsonstring(item(e,:));
if(len==1)
obj=['"' checkname(name,varargin{:}) '": ' '"',val,'"'];
if(isempty(name))
obj=['"',val,'"'];
end
txt=sprintf('%s%s%s%s',txt,padding1,obj);
else
txt=sprintf('%s%s%s%s',txt,padding0,['"',val,'"']);
end
if(e==len)
sep='';
end
txt=sprintf('%s%s',txt,sep);
end
if(len>1)
txt=sprintf('%s%s%s%s',txt,nl,padding1,']');
end
%%-------------------------------------------------------------------------
function txt=mat2json(name,item,level,varargin)
if(~isnumeric(item) && ~islogical(item))
error('input is not an array');
end
ws=struct('tab',sprintf('\t'),'newline',sprintf('\n'),'sep',sprintf(',\n'));
ws=jsonopt('whitespaces_',ws,varargin{:});
padding1=repmat(ws.tab,1,level);
padding0=repmat(ws.tab,1,level+1);
nl=ws.newline;
sep=ws.sep;
if(length(size(item))>2 || issparse(item) || ~isreal(item) || ...
(isempty(item) && any(size(item))) ||jsonopt('ArrayToStruct',0,varargin{:}))
if(isempty(name))
txt=sprintf('%s{%s%s"_ArrayType_": "%s",%s%s"_ArraySize_": %s,%s',...
padding1,nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\s+',','),nl);
else
txt=sprintf('%s"%s": {%s%s"_ArrayType_": "%s",%s%s"_ArraySize_": %s,%s',...
padding1,checkname(name,varargin{:}),nl,padding0,class(item),nl,padding0,regexprep(mat2str(size(item)),'\s+',','),nl);
end
else
if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1 && level>0)
numtxt=regexprep(regexprep(matdata2json(item,level+1,varargin{:}),'^\[',''),']','');
else
numtxt=matdata2json(item,level+1,varargin{:});
end
if(isempty(name))
txt=sprintf('%s%s',padding1,numtxt);
else
if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)
txt=sprintf('%s"%s": %s',padding1,checkname(name,varargin{:}),numtxt);
else
txt=sprintf('%s"%s": %s',padding1,checkname(name,varargin{:}),numtxt);
end
end
return;
end
dataformat='%s%s%s%s%s';
if(issparse(item))
[ix,iy]=find(item);
data=full(item(find(item)));
if(~isreal(item))
data=[real(data(:)),imag(data(:))];
if(size(item,1)==1)
% Kludge to have data's 'transposedness' match item's.
% (Necessary for complex row vector handling below.)
data=data';
end
txt=sprintf(dataformat,txt,padding0,'"_ArrayIsComplex_": ','1', sep);
end
txt=sprintf(dataformat,txt,padding0,'"_ArrayIsSparse_": ','1', sep);
if(size(item,1)==1)
% Row vector, store only column indices.
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([iy(:),data'],level+2,varargin{:}), nl);
elseif(size(item,2)==1)
% Column vector, store only row indices.
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([ix,data],level+2,varargin{:}), nl);
else
% General case, store row and column indices.
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([ix,iy,data],level+2,varargin{:}), nl);
end
else
if(isreal(item))
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json(item(:)',level+2,varargin{:}), nl);
else
txt=sprintf(dataformat,txt,padding0,'"_ArrayIsComplex_": ','1', sep);
txt=sprintf(dataformat,txt,padding0,'"_ArrayData_": ',...
matdata2json([real(item(:)) imag(item(:))],level+2,varargin{:}), nl);
end
end
txt=sprintf('%s%s%s',txt,padding1,'}');
%%-------------------------------------------------------------------------
function txt=matdata2json(mat,level,varargin)
ws=struct('tab',sprintf('\t'),'newline',sprintf('\n'),'sep',sprintf(',\n'));
ws=jsonopt('whitespaces_',ws,varargin{:});
tab=ws.tab;
nl=ws.newline;
if(size(mat,1)==1)
pre='';
post='';
level=level-1;
else
pre=sprintf('[%s',nl);
post=sprintf('%s%s]',nl,repmat(tab,1,level-1));
end
if(isempty(mat))
txt='null';
return;
end
floatformat=jsonopt('FloatFormat','%.10g',varargin{:});
%if(numel(mat)>1)
formatstr=['[' repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf('],%s',nl)]];
%else
% formatstr=[repmat([floatformat ','],1,size(mat,2)-1) [floatformat sprintf(',\n')]];
%end
if(nargin>=2 && size(mat,1)>1 && jsonopt('ArrayIndent',1,varargin{:})==1)
formatstr=[repmat(tab,1,level) formatstr];
end
txt=sprintf(formatstr,mat');
txt(end-length(nl):end)=[];
if(islogical(mat) && jsonopt('ParseLogical',0,varargin{:})==1)
txt=regexprep(txt,'1','true');
txt=regexprep(txt,'0','false');
end
%txt=regexprep(mat2str(mat),'\s+',',');
%txt=regexprep(txt,';',sprintf('],\n['));
% if(nargin>=2 && size(mat,1)>1)
% txt=regexprep(txt,'\[',[repmat(sprintf('\t'),1,level) '[']);
% end
txt=[pre txt post];
if(any(isinf(mat(:))))
txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','"$1_Inf_"',varargin{:}));
end
if(any(isnan(mat(:))))
txt=regexprep(txt,'NaN',jsonopt('NaN','"_NaN_"',varargin{:}));
end
%%-------------------------------------------------------------------------
function newname=checkname(name,varargin)
isunpack=jsonopt('UnpackHex',1,varargin{:});
newname=name;
if(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))
return
end
if(isunpack)
isoct=jsonopt('IsOctave',0,varargin{:});
if(~isoct)
newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');
else
pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');
pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');
if(isempty(pos))
return;
end
str0=name;
pos0=[0 pend(:)' length(name)];
newname='';
for i=1:length(pos)
newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];
end
if(pos(end)~=length(name))
newname=[newname str0(pos0(end-1)+1:pos0(end))];
end
end
end
%%-------------------------------------------------------------------------
function newstr=escapejsonstring(str)
newstr=str;
isoct=exist('OCTAVE_VERSION','builtin');
if(isoct)
vv=sscanf(OCTAVE_VERSION,'%f');
if(vv(1)>=3.8)
isoct=0;
end
end
if(isoct)
escapechars={'\\','\"','\/','\a','\f','\n','\r','\t','\v'};
for i=1:length(escapechars);
newstr=regexprep(newstr,escapechars{i},escapechars{i});
end
newstr=regexprep(newstr,'\\\\(u[0-9a-fA-F]{4}[^0-9a-fA-F]*)','\$1');
else
escapechars={'\\','\"','\/','\a','\b','\f','\n','\r','\t','\v'};
for i=1:length(escapechars);
newstr=regexprep(newstr,escapechars{i},regexprep(escapechars{i},'\\','\\\\'));
end
newstr=regexprep(newstr,'\\\\(u[0-9a-fA-F]{4}[^0-9a-fA-F]*)','\\$1');
end
|
github
|
mcubelab/push-est-public-master
|
loadjson.m
|
.m
|
push-est-public-master/catkin_ws/src/pnpush_planning/src/analyze/matlab/Json/fsroot/jsonlab/loadjson.m
| 16,170 |
ibm852
|
2fc3bbe9aed7b4b05de8b391f0f744b3
|
function data = loadjson(fname,varargin)
%
% data=loadjson(fname,opt)
% or
% data=loadjson(fname,'param1',value1,'param2',value2,...)
%
% parse a JSON (JavaScript Object Notation) file or string
%
% authors:Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% created on 2011/09/09, including previous works from
%
% Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713
% created on 2009/11/02
% François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393
% created on 2009/03/22
% Joel Feenstra:
% http://www.mathworks.com/matlabcentral/fileexchange/20565
% created on 2008/07/03
%
% $Id: loadjson.m 492 2015-06-05 20:52:02Z fangq $
%
% input:
% fname: input file name, if fname contains "{}" or "[]", fname
% will be interpreted as a JSON string
% opt: a struct to store parsing options, opt can be replaced by
% a list of ('param',value) pairs - the param string is equivallent
% to a field in opt. opt can have the following
% fields (first in [.|.] is the default)
%
% opt.SimplifyCell [0|1]: if set to 1, loadjson will call cell2mat
% for each element of the JSON data, and group
% arrays based on the cell2mat rules.
% opt.FastArrayParser [1|0 or integer]: if set to 1, use a
% speed-optimized array parser when loading an
% array object. The fast array parser may
% collapse block arrays into a single large
% array similar to rules defined in cell2mat; 0 to
% use a legacy parser; if set to a larger-than-1
% value, this option will specify the minimum
% dimension to enable the fast array parser. For
% example, if the input is a 3D array, setting
% FastArrayParser to 1 will return a 3D array;
% setting to 2 will return a cell array of 2D
% arrays; setting to 3 will return to a 2D cell
% array of 1D vectors; setting to 4 will return a
% 3D cell array.
% opt.ShowProgress [0|1]: if set to 1, loadjson displays a progress bar.
%
% output:
% dat: a cell array, where {...} blocks are converted into cell arrays,
% and [...] are converted to arrays
%
% examples:
% dat=loadjson('{"obj":{"string":"value","array":[1,2,3]}}')
% dat=loadjson(['examples' filesep 'example1.json'])
% dat=loadjson(['examples' filesep 'example1.json'],'SimplifyCell',1)
%
% license:
% BSD License, see LICENSE_BSD.txt files for details
%
% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
global pos inStr len esc index_esc len_esc isoct arraytoken
if(regexp(fname,'[\{\}\]\[]','once'))
string=fname;
elseif(exist(fname,'file'))
try
string = fileread(fname);
catch
try
string = urlread(['file://',fname]);
catch
string = urlread(['file://',fullfile(pwd,fname)]);
end
end
else
error('input file does not exist');
end
pos = 1; len = length(string); inStr = string;
isoct=exist('OCTAVE_VERSION','builtin');
arraytoken=find(inStr=='[' | inStr==']' | inStr=='"');
jstr=regexprep(inStr,'\\\\',' ');
escquote=regexp(jstr,'\\"');
arraytoken=sort([arraytoken escquote]);
% String delimiters and escape chars identified to improve speed:
esc = find(inStr=='"' | inStr=='\' ); % comparable to: regexp(inStr, '["\\]');
index_esc = 1; len_esc = length(esc);
opt=varargin2struct(varargin{:});
if(jsonopt('ShowProgress',0,opt)==1)
opt.progressbar_=waitbar(0,'loading ...');
end
jsoncount=1;
while pos <= len
switch(next_char)
case '{'
data{jsoncount} = parse_object(opt);
case '['
data{jsoncount} = parse_array(opt);
otherwise
error_pos('Outer level structure must be an object or an array');
end
jsoncount=jsoncount+1;
end % while
jsoncount=length(data);
if(jsoncount==1 && iscell(data))
data=data{1};
end
if(isfield(opt,'progressbar_'))
close(opt.progressbar_);
end
%%-------------------------------------------------------------------------
function object = parse_object(varargin)
parse_char('{');
object = [];
if next_char ~= '}'
while 1
str = parseStr(varargin{:});
if isempty(str)
error_pos('Name of value at position %d cannot be empty');
end
parse_char(':');
val = parse_value(varargin{:});
object.(valid_field(str))=val;
if next_char == '}'
break;
end
parse_char(',');
end
end
parse_char('}');
if(isstruct(object))
object=struct2jdata(object);
end
%%-------------------------------------------------------------------------
function object = parse_array(varargin) % JSON array is written in row-major order
global pos inStr isoct
parse_char('[');
object = cell(0, 1);
dim2=[];
arraydepth=jsonopt('JSONLAB_ArrayDepth_',1,varargin{:});
pbar=-1;
if(isfield(varargin{1},'progressbar_'))
pbar=varargin{1}.progressbar_;
end
if next_char ~= ']'
if(jsonopt('FastArrayParser',1,varargin{:})>=1 && arraydepth>=jsonopt('FastArrayParser',1,varargin{:}))
[endpos, e1l, e1r]=matching_bracket(inStr,pos);
arraystr=['[' inStr(pos:endpos)];
arraystr=regexprep(arraystr,'"_NaN_"','NaN');
arraystr=regexprep(arraystr,'"([-+]*)_Inf_"','$1Inf');
arraystr(arraystr==sprintf('\n'))=[];
arraystr(arraystr==sprintf('\r'))=[];
%arraystr=regexprep(arraystr,'\s*,',','); % this is slow,sometimes needed
if(~isempty(e1l) && ~isempty(e1r)) % the array is in 2D or higher D
astr=inStr((e1l+1):(e1r-1));
astr=regexprep(astr,'"_NaN_"','NaN');
astr=regexprep(astr,'"([-+]*)_Inf_"','$1Inf');
astr(astr==sprintf('\n'))=[];
astr(astr==sprintf('\r'))=[];
astr(astr==' ')='';
if(isempty(find(astr=='[', 1))) % array is 2D
dim2=length(sscanf(astr,'%f,',[1 inf]));
end
else % array is 1D
astr=arraystr(2:end-1);
astr(astr==' ')='';
[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',[1,inf]);
if(nextidx>=length(astr)-1)
object=obj;
pos=endpos;
parse_char(']');
return;
end
end
if(~isempty(dim2))
astr=arraystr;
astr(astr=='[')='';
astr(astr==']')='';
astr(astr==' ')='';
[obj, count, errmsg, nextidx]=sscanf(astr,'%f,',inf);
if(nextidx>=length(astr)-1)
object=reshape(obj,dim2,numel(obj)/dim2)';
pos=endpos;
parse_char(']');
if(pbar>0)
waitbar(pos/length(inStr),pbar,'loading ...');
end
return;
end
end
arraystr=regexprep(arraystr,'\]\s*,','];');
else
arraystr='[';
end
try
if(isoct && regexp(arraystr,'"','once'))
error('Octave eval can produce empty cells for JSON-like input');
end
object=eval(arraystr);
pos=endpos;
catch
while 1
newopt=varargin2struct(varargin{:},'JSONLAB_ArrayDepth_',arraydepth+1);
val = parse_value(newopt);
object{end+1} = val;
if next_char == ']'
break;
end
parse_char(',');
end
end
end
if(jsonopt('SimplifyCell',0,varargin{:})==1)
try
oldobj=object;
object=cell2mat(object')';
if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)
object=oldobj;
elseif(size(object,1)>1 && ismatrix(object))
object=object';
end
catch
end
end
parse_char(']');
if(pbar>0)
waitbar(pos/length(inStr),pbar,'loading ...');
end
%%-------------------------------------------------------------------------
function parse_char(c)
global pos inStr len
pos=skip_whitespace(pos,inStr,len);
if pos > len || inStr(pos) ~= c
error_pos(sprintf('Expected %c at position %%d', c));
else
pos = pos + 1;
pos=skip_whitespace(pos,inStr,len);
end
%%-------------------------------------------------------------------------
function c = next_char
global pos inStr len
pos=skip_whitespace(pos,inStr,len);
if pos > len
c = [];
else
c = inStr(pos);
end
%%-------------------------------------------------------------------------
function newpos=skip_whitespace(pos,inStr,len)
newpos=pos;
while newpos <= len && isspace(inStr(newpos))
newpos = newpos + 1;
end
%%-------------------------------------------------------------------------
function str = parseStr(varargin)
global pos inStr len esc index_esc len_esc
% len, ns = length(inStr), keyboard
if inStr(pos) ~= '"'
error_pos('String starting with " expected at position %d');
else
pos = pos + 1;
end
str = '';
while pos <= len
while index_esc <= len_esc && esc(index_esc) < pos
index_esc = index_esc + 1;
end
if index_esc > len_esc
str = [str inStr(pos:len)];
pos = len + 1;
break;
else
str = [str inStr(pos:esc(index_esc)-1)];
pos = esc(index_esc);
end
nstr = length(str);
switch inStr(pos)
case '"'
pos = pos + 1;
if(~isempty(str))
if(strcmp(str,'_Inf_'))
str=Inf;
elseif(strcmp(str,'-_Inf_'))
str=-Inf;
elseif(strcmp(str,'_NaN_'))
str=NaN;
end
end
return;
case '\'
if pos+1 > len
error_pos('End of file reached right after escape character');
end
pos = pos + 1;
switch inStr(pos)
case {'"' '\' '/'}
str(nstr+1) = inStr(pos);
pos = pos + 1;
case {'b' 'f' 'n' 'r' 't'}
str(nstr+1) = sprintf(['\' inStr(pos)]);
pos = pos + 1;
case 'u'
if pos+4 > len
error_pos('End of file reached in escaped unicode character');
end
str(nstr+(1:6)) = inStr(pos-1:pos+4);
pos = pos + 5;
end
otherwise % should never happen
str(nstr+1) = inStr(pos);
keyboard;
pos = pos + 1;
end
end
error_pos('End of file while expecting end of inStr');
%%-------------------------------------------------------------------------
function num = parse_number(varargin)
global pos inStr isoct
currstr=inStr(pos:min(pos+30,end));
if(isoct~=0)
numstr=regexp(currstr,'^\s*-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+\-]?\d+)?','end');
[num] = sscanf(currstr, '%f', 1);
delta=numstr+1;
else
[num, one, err, delta] = sscanf(currstr, '%f', 1);
if ~isempty(err)
error_pos('Error reading number at position %d');
end
end
pos = pos + delta-1;
%%-------------------------------------------------------------------------
function val = parse_value(varargin)
global pos inStr len
if(isfield(varargin{1},'progressbar_'))
waitbar(pos/len,varargin{1}.progressbar_,'loading ...');
end
switch(inStr(pos))
case '"'
val = parseStr(varargin{:});
return;
case '['
val = parse_array(varargin{:});
return;
case '{'
val = parse_object(varargin{:});
return;
case {'-','0','1','2','3','4','5','6','7','8','9'}
val = parse_number(varargin{:});
return;
case 't'
if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'true')
val = true;
pos = pos + 4;
return;
end
case 'f'
if pos+4 <= len && strcmpi(inStr(pos:pos+4), 'false')
val = false;
pos = pos + 5;
return;
end
case 'n'
if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'null')
val = [];
pos = pos + 4;
return;
end
end
error_pos('Value expected at position %d');
%%-------------------------------------------------------------------------
function error_pos(msg)
global pos inStr len
poShow = max(min([pos-15 pos-1 pos pos+20],len),1);
if poShow(3) == poShow(2)
poShow(3:4) = poShow(2)+[0 -1]; % display nothing after
end
msg = [sprintf(msg, pos) ': ' ...
inStr(poShow(1):poShow(2)) '<error>' inStr(poShow(3):poShow(4)) ];
error( ['JSONparser:invalidFormat: ' msg] );
%%-------------------------------------------------------------------------
function str = valid_field(str)
global isoct
% From MATLAB doc: field names must begin with a letter, which may be
% followed by any combination of letters, digits, and underscores.
% Invalid characters will be converted to underscores, and the prefix
% "x0x[Hex code]_" will be added if the first character is not a letter.
pos=regexp(str,'^[^A-Za-z]','once');
if(~isempty(pos))
if(~isoct)
str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');
else
str=sprintf('x0x%X_%s',char(str(1)),str(2:end));
end
end
if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' )))
return;
end
if(~isoct)
str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');
else
pos=regexp(str,'[^0-9A-Za-z_]');
if(isempty(pos))
return;
end
str0=str;
pos0=[0 pos(:)' length(str)];
str='';
for i=1:length(pos)
str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];
end
if(pos(end)~=length(str))
str=[str str0(pos0(end-1)+1:pos0(end))];
end
end
%str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';
%%-------------------------------------------------------------------------
function endpos = matching_quote(str,pos)
len=length(str);
while(pos<len)
if(str(pos)=='"')
if(~(pos>1 && str(pos-1)=='\'))
endpos=pos;
return;
end
end
pos=pos+1;
end
error('unmatched quotation mark');
%%-------------------------------------------------------------------------
function [endpos, e1l, e1r, maxlevel] = matching_bracket(str,pos)
global arraytoken
level=1;
maxlevel=level;
endpos=0;
bpos=arraytoken(arraytoken>=pos);
tokens=str(bpos);
len=length(tokens);
pos=1;
e1l=[];
e1r=[];
while(pos<=len)
c=tokens(pos);
if(c==']')
level=level-1;
if(isempty(e1r))
e1r=bpos(pos);
end
if(level==0)
endpos=bpos(pos);
return
end
end
if(c=='[')
if(isempty(e1l))
e1l=bpos(pos);
end
level=level+1;
maxlevel=max(maxlevel,level);
end
if(c=='"')
pos=matching_quote(tokens,pos+1);
end
pos=pos+1;
end
if(endpos==0)
error('unmatched "]"');
end
|
github
|
mcubelab/push-est-public-master
|
loadubjson.m
|
.m
|
push-est-public-master/catkin_ws/src/pnpush_planning/src/analyze/matlab/Json/fsroot/jsonlab/loadubjson.m
| 13,346 |
utf_8
|
4f30b406868398bdc5d594a6ae042e6b
|
function data = loadubjson(fname,varargin)
%
% data=loadubjson(fname,opt)
% or
% data=loadubjson(fname,'param1',value1,'param2',value2,...)
%
% parse a JSON (JavaScript Object Notation) file or string
%
% authors:Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% created on 2013/08/01
%
% $Id: loadubjson.m 492 2015-06-05 20:52:02Z fangq $
%
% input:
% fname: input file name, if fname contains "{}" or "[]", fname
% will be interpreted as a UBJSON string
% opt: a struct to store parsing options, opt can be replaced by
% a list of ('param',value) pairs - the param string is equivallent
% to a field in opt. opt can have the following
% fields (first in [.|.] is the default)
%
% opt.SimplifyCell [0|1]: if set to 1, loadubjson will call cell2mat
% for each element of the JSON data, and group
% arrays based on the cell2mat rules.
% opt.IntEndian [B|L]: specify the endianness of the integer fields
% in the UBJSON input data. B - Big-Endian format for
% integers (as required in the UBJSON specification);
% L - input integer fields are in Little-Endian order.
% opt.NameIsString [0|1]: for UBJSON Specification Draft 8 or
% earlier versions (JSONLab 1.0 final or earlier),
% the "name" tag is treated as a string. To load
% these UBJSON data, you need to manually set this
% flag to 1.
%
% output:
% dat: a cell array, where {...} blocks are converted into cell arrays,
% and [...] are converted to arrays
%
% examples:
% obj=struct('string','value','array',[1 2 3]);
% ubjdata=saveubjson('obj',obj);
% dat=loadubjson(ubjdata)
% dat=loadubjson(['examples' filesep 'example1.ubj'])
% dat=loadubjson(['examples' filesep 'example1.ubj'],'SimplifyCell',1)
%
% license:
% BSD License, see LICENSE_BSD.txt files for details
%
% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
global pos inStr len esc index_esc len_esc isoct arraytoken fileendian systemendian
if(regexp(fname,'[\{\}\]\[]','once'))
string=fname;
elseif(exist(fname,'file'))
fid = fopen(fname,'rb');
string = fread(fid,inf,'uint8=>char')';
fclose(fid);
else
error('input file does not exist');
end
pos = 1; len = length(string); inStr = string;
isoct=exist('OCTAVE_VERSION','builtin');
arraytoken=find(inStr=='[' | inStr==']' | inStr=='"');
jstr=regexprep(inStr,'\\\\',' ');
escquote=regexp(jstr,'\\"');
arraytoken=sort([arraytoken escquote]);
% String delimiters and escape chars identified to improve speed:
esc = find(inStr=='"' | inStr=='\' ); % comparable to: regexp(inStr, '["\\]');
index_esc = 1; len_esc = length(esc);
opt=varargin2struct(varargin{:});
fileendian=upper(jsonopt('IntEndian','B',opt));
[os,maxelem,systemendian]=computer;
jsoncount=1;
while pos <= len
switch(next_char)
case '{'
data{jsoncount} = parse_object(opt);
case '['
data{jsoncount} = parse_array(opt);
otherwise
error_pos('Outer level structure must be an object or an array');
end
jsoncount=jsoncount+1;
end % while
jsoncount=length(data);
if(jsoncount==1 && iscell(data))
data=data{1};
end
%%-------------------------------------------------------------------------
function object = parse_object(varargin)
parse_char('{');
object = [];
type='';
count=-1;
if(next_char == '$')
type=inStr(pos+1); % TODO
pos=pos+2;
end
if(next_char == '#')
pos=pos+1;
count=double(parse_number());
end
if next_char ~= '}'
num=0;
while 1
if(jsonopt('NameIsString',0,varargin{:}))
str = parseStr(varargin{:});
else
str = parse_name(varargin{:});
end
if isempty(str)
error_pos('Name of value at position %d cannot be empty');
end
%parse_char(':');
val = parse_value(varargin{:});
num=num+1;
object.(valid_field(str))=val;
if next_char == '}' || (count>=0 && num>=count)
break;
end
%parse_char(',');
end
end
if(count==-1)
parse_char('}');
end
if(isstruct(object))
object=struct2jdata(object);
end
%%-------------------------------------------------------------------------
function [cid,len]=elem_info(type)
id=strfind('iUIlLdD',type);
dataclass={'int8','uint8','int16','int32','int64','single','double'};
bytelen=[1,1,2,4,8,4,8];
if(id>0)
cid=dataclass{id};
len=bytelen(id);
else
error_pos('unsupported type at position %d');
end
%%-------------------------------------------------------------------------
function [data, adv]=parse_block(type,count,varargin)
global pos inStr isoct fileendian systemendian
[cid,len]=elem_info(type);
datastr=inStr(pos:pos+len*count-1);
if(isoct)
newdata=int8(datastr);
else
newdata=uint8(datastr);
end
id=strfind('iUIlLdD',type);
if(id<=5 && fileendian~=systemendian)
newdata=swapbytes(typecast(newdata,cid));
end
data=typecast(newdata,cid);
adv=double(len*count);
%%-------------------------------------------------------------------------
function object = parse_array(varargin) % JSON array is written in row-major order
global pos inStr
parse_char('[');
object = cell(0, 1);
dim=[];
type='';
count=-1;
if(next_char == '$')
type=inStr(pos+1);
pos=pos+2;
end
if(next_char == '#')
pos=pos+1;
if(next_char=='[')
dim=parse_array(varargin{:});
count=prod(double(dim));
else
count=double(parse_number());
end
end
if(~isempty(type))
if(count>=0)
[object, adv]=parse_block(type,count,varargin{:});
if(~isempty(dim))
object=reshape(object,dim);
end
pos=pos+adv;
return;
else
endpos=matching_bracket(inStr,pos);
[cid,len]=elem_info(type);
count=(endpos-pos)/len;
[object, adv]=parse_block(type,count,varargin{:});
pos=pos+adv;
parse_char(']');
return;
end
end
if next_char ~= ']'
while 1
val = parse_value(varargin{:});
object{end+1} = val;
if next_char == ']'
break;
end
%parse_char(',');
end
end
if(jsonopt('SimplifyCell',0,varargin{:})==1)
try
oldobj=object;
object=cell2mat(object')';
if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0)
object=oldobj;
elseif(size(object,1)>1 && ismatrix(object))
object=object';
end
catch
end
end
if(count==-1)
parse_char(']');
end
%%-------------------------------------------------------------------------
function parse_char(c)
global pos inStr len
skip_whitespace;
if pos > len || inStr(pos) ~= c
error_pos(sprintf('Expected %c at position %%d', c));
else
pos = pos + 1;
skip_whitespace;
end
%%-------------------------------------------------------------------------
function c = next_char
global pos inStr len
skip_whitespace;
if pos > len
c = [];
else
c = inStr(pos);
end
%%-------------------------------------------------------------------------
function skip_whitespace
global pos inStr len
while pos <= len && isspace(inStr(pos))
pos = pos + 1;
end
%%-------------------------------------------------------------------------
function str = parse_name(varargin)
global pos inStr
bytelen=double(parse_number());
if(length(inStr)>=pos+bytelen-1)
str=inStr(pos:pos+bytelen-1);
pos=pos+bytelen;
else
error_pos('End of file while expecting end of name');
end
%%-------------------------------------------------------------------------
function str = parseStr(varargin)
global pos inStr
% len, ns = length(inStr), keyboard
type=inStr(pos);
if type ~= 'S' && type ~= 'C' && type ~= 'H'
error_pos('String starting with S expected at position %d');
else
pos = pos + 1;
end
if(type == 'C')
str=inStr(pos);
pos=pos+1;
return;
end
bytelen=double(parse_number());
if(length(inStr)>=pos+bytelen-1)
str=inStr(pos:pos+bytelen-1);
pos=pos+bytelen;
else
error_pos('End of file while expecting end of inStr');
end
%%-------------------------------------------------------------------------
function num = parse_number(varargin)
global pos inStr isoct fileendian systemendian
id=strfind('iUIlLdD',inStr(pos));
if(isempty(id))
error_pos('expecting a number at position %d');
end
type={'int8','uint8','int16','int32','int64','single','double'};
bytelen=[1,1,2,4,8,4,8];
datastr=inStr(pos+1:pos+bytelen(id));
if(isoct)
newdata=int8(datastr);
else
newdata=uint8(datastr);
end
if(id<=5 && fileendian~=systemendian)
newdata=swapbytes(typecast(newdata,type{id}));
end
num=typecast(newdata,type{id});
pos = pos + bytelen(id)+1;
%%-------------------------------------------------------------------------
function val = parse_value(varargin)
global pos inStr
switch(inStr(pos))
case {'S','C','H'}
val = parseStr(varargin{:});
return;
case '['
val = parse_array(varargin{:});
return;
case '{'
val = parse_object(varargin{:});
return;
case {'i','U','I','l','L','d','D'}
val = parse_number(varargin{:});
return;
case 'T'
val = true;
pos = pos + 1;
return;
case 'F'
val = false;
pos = pos + 1;
return;
case {'Z','N'}
val = [];
pos = pos + 1;
return;
end
error_pos('Value expected at position %d');
%%-------------------------------------------------------------------------
function error_pos(msg)
global pos inStr len
poShow = max(min([pos-15 pos-1 pos pos+20],len),1);
if poShow(3) == poShow(2)
poShow(3:4) = poShow(2)+[0 -1]; % display nothing after
end
msg = [sprintf(msg, pos) ': ' ...
inStr(poShow(1):poShow(2)) '<error>' inStr(poShow(3):poShow(4)) ];
error( ['JSONparser:invalidFormat: ' msg] );
%%-------------------------------------------------------------------------
function str = valid_field(str)
global isoct
% From MATLAB doc: field names must begin with a letter, which may be
% followed by any combination of letters, digits, and underscores.
% Invalid characters will be converted to underscores, and the prefix
% "x0x[Hex code]_" will be added if the first character is not a letter.
pos=regexp(str,'^[^A-Za-z]','once');
if(~isempty(pos))
if(~isoct)
str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');
else
str=sprintf('x0x%X_%s',char(str(1)),str(2:end));
end
end
if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' )))
return;
end
if(~isoct)
str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');
else
pos=regexp(str,'[^0-9A-Za-z_]');
if(isempty(pos))
return;
end
str0=str;
pos0=[0 pos(:)' length(str)];
str='';
for i=1:length(pos)
str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];
end
if(pos(end)~=length(str))
str=[str str0(pos0(end-1)+1:pos0(end))];
end
end
%str(~isletter(str) & ~('0' <= str & str <= '9')) = '_';
%%-------------------------------------------------------------------------
function endpos = matching_quote(str,pos)
len=length(str);
while(pos<len)
if(str(pos)=='"')
if(~(pos>1 && str(pos-1)=='\'))
endpos=pos;
return;
end
end
pos=pos+1;
end
error('unmatched quotation mark');
%%-------------------------------------------------------------------------
function [endpos, e1l, e1r, maxlevel] = matching_bracket(str,pos)
global arraytoken
level=1;
maxlevel=level;
endpos=0;
bpos=arraytoken(arraytoken>=pos);
tokens=str(bpos);
len=length(tokens);
pos=1;
e1l=[];
e1r=[];
while(pos<=len)
c=tokens(pos);
if(c==']')
level=level-1;
if(isempty(e1r))
e1r=bpos(pos);
end
if(level==0)
endpos=bpos(pos);
return
end
end
if(c=='[')
if(isempty(e1l))
e1l=bpos(pos);
end
level=level+1;
maxlevel=max(maxlevel,level);
end
if(c=='"')
pos=matching_quote(tokens,pos+1);
end
pos=pos+1;
end
if(endpos==0)
error('unmatched "]"');
end
|
github
|
mcubelab/push-est-public-master
|
saveubjson.m
|
.m
|
push-est-public-master/catkin_ws/src/pnpush_planning/src/analyze/matlab/Json/fsroot/jsonlab/saveubjson.m
| 16,440 |
utf_8
|
4bf8d44968ce0b316dbc21afe2d446f9
|
function json=saveubjson(rootname,obj,varargin)
%
% json=saveubjson(rootname,obj,filename)
% or
% json=saveubjson(rootname,obj,opt)
% json=saveubjson(rootname,obj,'param1',value1,'param2',value2,...)
%
% convert a MATLAB object (cell, struct or array) into a Universal
% Binary JSON (UBJSON) binary string
%
% author: Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% created on 2013/08/17
%
% $Id: saveubjson.m 492 2015-06-05 20:52:02Z fangq $
%
% input:
% rootname: the name of the root-object, when set to '', the root name
% is ignored, however, when opt.ForceRootName is set to 1 (see below),
% the MATLAB variable name will be used as the root name.
% obj: a MATLAB object (array, cell, cell array, struct, struct array)
% filename: a string for the file name to save the output UBJSON data
% opt: a struct for additional options, ignore to use default values.
% opt can have the following fields (first in [.|.] is the default)
%
% opt.FileName [''|string]: a file name to save the output JSON data
% opt.ArrayToStruct[0|1]: when set to 0, saveubjson outputs 1D/2D
% array in JSON array format; if sets to 1, an
% array will be shown as a struct with fields
% "_ArrayType_", "_ArraySize_" and "_ArrayData_"; for
% sparse arrays, the non-zero elements will be
% saved to _ArrayData_ field in triplet-format i.e.
% (ix,iy,val) and "_ArrayIsSparse_" will be added
% with a value of 1; for a complex array, the
% _ArrayData_ array will include two columns
% (4 for sparse) to record the real and imaginary
% parts, and also "_ArrayIsComplex_":1 is added.
% opt.ParseLogical [1|0]: if this is set to 1, logical array elem
% will use true/false rather than 1/0.
% opt.NoRowBracket [1|0]: if this is set to 1, arrays with a single
% numerical element will be shown without a square
% bracket, unless it is the root object; if 0, square
% brackets are forced for any numerical arrays.
% opt.ForceRootName [0|1]: when set to 1 and rootname is empty, saveubjson
% will use the name of the passed obj variable as the
% root object name; if obj is an expression and
% does not have a name, 'root' will be used; if this
% is set to 0 and rootname is empty, the root level
% will be merged down to the lower level.
% opt.JSONP [''|string]: to generate a JSONP output (JSON with padding),
% for example, if opt.JSON='foo', the JSON data is
% wrapped inside a function call as 'foo(...);'
% opt.UnpackHex [1|0]: conver the 0x[hex code] output by loadjson
% back to the string form
%
% opt can be replaced by a list of ('param',value) pairs. The param
% string is equivallent to a field in opt and is case sensitive.
% output:
% json: a binary string in the UBJSON format (see http://ubjson.org)
%
% examples:
% jsonmesh=struct('MeshNode',[0 0 0;1 0 0;0 1 0;1 1 0;0 0 1;1 0 1;0 1 1;1 1 1],...
% 'MeshTetra',[1 2 4 8;1 3 4 8;1 2 6 8;1 5 6 8;1 5 7 8;1 3 7 8],...
% 'MeshTri',[1 2 4;1 2 6;1 3 4;1 3 7;1 5 6;1 5 7;...
% 2 8 4;2 8 6;3 8 4;3 8 7;5 8 6;5 8 7],...
% 'MeshCreator','FangQ','MeshTitle','T6 Cube',...
% 'SpecialData',[nan, inf, -inf]);
% saveubjson('jsonmesh',jsonmesh)
% saveubjson('jsonmesh',jsonmesh,'meshdata.ubj')
%
% license:
% BSD License, see LICENSE_BSD.txt files for details
%
% -- this function is part of JSONLab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
if(nargin==1)
varname=inputname(1);
obj=rootname;
if(isempty(varname))
varname='root';
end
rootname=varname;
else
varname=inputname(2);
end
if(length(varargin)==1 && ischar(varargin{1}))
opt=struct('FileName',varargin{1});
else
opt=varargin2struct(varargin{:});
end
opt.IsOctave=exist('OCTAVE_VERSION','builtin');
rootisarray=0;
rootlevel=1;
forceroot=jsonopt('ForceRootName',0,opt);
if((isnumeric(obj) || islogical(obj) || ischar(obj) || isstruct(obj) || iscell(obj)) && isempty(rootname) && forceroot==0)
rootisarray=1;
rootlevel=0;
else
if(isempty(rootname))
rootname=varname;
end
end
if((isstruct(obj) || iscell(obj))&& isempty(rootname) && forceroot)
rootname='root';
end
json=obj2ubjson(rootname,obj,rootlevel,opt);
if(~rootisarray)
json=['{' json '}'];
end
jsonp=jsonopt('JSONP','',opt);
if(~isempty(jsonp))
json=[jsonp '(' json ')'];
end
% save to a file if FileName is set, suggested by Patrick Rapin
if(~isempty(jsonopt('FileName','',opt)))
fid = fopen(opt.FileName, 'wb');
fwrite(fid,json);
fclose(fid);
end
%%-------------------------------------------------------------------------
function txt=obj2ubjson(name,item,level,varargin)
if(iscell(item))
txt=cell2ubjson(name,item,level,varargin{:});
elseif(isstruct(item))
txt=struct2ubjson(name,item,level,varargin{:});
elseif(ischar(item))
txt=str2ubjson(name,item,level,varargin{:});
else
txt=mat2ubjson(name,item,level,varargin{:});
end
%%-------------------------------------------------------------------------
function txt=cell2ubjson(name,item,level,varargin)
txt='';
if(~iscell(item))
error('input is not a cell');
end
dim=size(item);
if(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now
item=reshape(item,dim(1),numel(item)/dim(1));
dim=size(item);
end
len=numel(item); % let's handle 1D cell first
if(len>1)
if(~isempty(name))
txt=[N_(checkname(name,varargin{:})) '[']; name='';
else
txt='[';
end
elseif(len==0)
if(~isempty(name))
txt=[N_(checkname(name,varargin{:})) 'Z']; name='';
else
txt='Z';
end
end
for j=1:dim(2)
if(dim(1)>1)
txt=[txt '['];
end
for i=1:dim(1)
txt=[txt obj2ubjson(name,item{i,j},level+(len>1),varargin{:})];
end
if(dim(1)>1)
txt=[txt ']'];
end
end
if(len>1)
txt=[txt ']'];
end
%%-------------------------------------------------------------------------
function txt=struct2ubjson(name,item,level,varargin)
txt='';
if(~isstruct(item))
error('input is not a struct');
end
dim=size(item);
if(ndims(squeeze(item))>2) % for 3D or higher dimensions, flatten to 2D for now
item=reshape(item,dim(1),numel(item)/dim(1));
dim=size(item);
end
len=numel(item);
forcearray= (len>1 || (jsonopt('NoRowBracket',1,varargin{:})==0 && level>0));
if(~isempty(name))
if(forcearray)
txt=[N_(checkname(name,varargin{:})) '['];
end
else
if(forcearray)
txt='[';
end
end
for j=1:dim(2)
if(dim(1)>1)
txt=[txt '['];
end
for i=1:dim(1)
names = fieldnames(item(i,j));
if(~isempty(name) && len==1 && ~forcearray)
txt=[txt N_(checkname(name,varargin{:})) '{'];
else
txt=[txt '{'];
end
if(~isempty(names))
for e=1:length(names)
txt=[txt obj2ubjson(names{e},item(i,j).(names{e}),...
level+(dim(1)>1)+1+forcearray,varargin{:})];
end
end
txt=[txt '}'];
end
if(dim(1)>1)
txt=[txt ']'];
end
end
if(forcearray)
txt=[txt ']'];
end
%%-------------------------------------------------------------------------
function txt=str2ubjson(name,item,level,varargin)
txt='';
if(~ischar(item))
error('input is not a string');
end
item=reshape(item, max(size(item),[1 0]));
len=size(item,1);
if(~isempty(name))
if(len>1)
txt=[N_(checkname(name,varargin{:})) '['];
end
else
if(len>1)
txt='[';
end
end
for e=1:len
val=item(e,:);
if(len==1)
obj=[N_(checkname(name,varargin{:})) '' '',S_(val),''];
if(isempty(name))
obj=['',S_(val),''];
end
txt=[txt,'',obj];
else
txt=[txt,'',['',S_(val),'']];
end
end
if(len>1)
txt=[txt ']'];
end
%%-------------------------------------------------------------------------
function txt=mat2ubjson(name,item,level,varargin)
if(~isnumeric(item) && ~islogical(item))
error('input is not an array');
end
if(length(size(item))>2 || issparse(item) || ~isreal(item) || ...
(isempty(item) && any(size(item))) ||jsonopt('ArrayToStruct',0,varargin{:}))
cid=I_(uint32(max(size(item))));
if(isempty(name))
txt=['{' N_('_ArrayType_'),S_(class(item)),N_('_ArraySize_'),I_a(size(item),cid(1)) ];
else
if(isempty(item))
txt=[N_(checkname(name,varargin{:})),'Z'];
return;
else
txt=[N_(checkname(name,varargin{:})),'{',N_('_ArrayType_'),S_(class(item)),N_('_ArraySize_'),I_a(size(item),cid(1))];
end
end
else
if(isempty(name))
txt=matdata2ubjson(item,level+1,varargin{:});
else
if(numel(item)==1 && jsonopt('NoRowBracket',1,varargin{:})==1)
numtxt=regexprep(regexprep(matdata2ubjson(item,level+1,varargin{:}),'^\[',''),']','');
txt=[N_(checkname(name,varargin{:})) numtxt];
else
txt=[N_(checkname(name,varargin{:})),matdata2ubjson(item,level+1,varargin{:})];
end
end
return;
end
if(issparse(item))
[ix,iy]=find(item);
data=full(item(find(item)));
if(~isreal(item))
data=[real(data(:)),imag(data(:))];
if(size(item,1)==1)
% Kludge to have data's 'transposedness' match item's.
% (Necessary for complex row vector handling below.)
data=data';
end
txt=[txt,N_('_ArrayIsComplex_'),'T'];
end
txt=[txt,N_('_ArrayIsSparse_'),'T'];
if(size(item,1)==1)
% Row vector, store only column indices.
txt=[txt,N_('_ArrayData_'),...
matdata2ubjson([iy(:),data'],level+2,varargin{:})];
elseif(size(item,2)==1)
% Column vector, store only row indices.
txt=[txt,N_('_ArrayData_'),...
matdata2ubjson([ix,data],level+2,varargin{:})];
else
% General case, store row and column indices.
txt=[txt,N_('_ArrayData_'),...
matdata2ubjson([ix,iy,data],level+2,varargin{:})];
end
else
if(isreal(item))
txt=[txt,N_('_ArrayData_'),...
matdata2ubjson(item(:)',level+2,varargin{:})];
else
txt=[txt,N_('_ArrayIsComplex_'),'T'];
txt=[txt,N_('_ArrayData_'),...
matdata2ubjson([real(item(:)) imag(item(:))],level+2,varargin{:})];
end
end
txt=[txt,'}'];
%%-------------------------------------------------------------------------
function txt=matdata2ubjson(mat,level,varargin)
if(isempty(mat))
txt='Z';
return;
end
type='';
hasnegtive=(mat<0);
if(isa(mat,'integer') || isinteger(mat) || (isfloat(mat) && all(mod(mat(:),1) == 0)))
if(isempty(hasnegtive))
if(max(mat(:))<=2^8)
type='U';
end
end
if(isempty(type))
% todo - need to consider negative ones separately
id= histc(abs(max(mat(:))),[0 2^7 2^15 2^31 2^63]);
if(isempty(id~=0))
error('high-precision data is not yet supported');
end
key='iIlL';
type=key(id~=0);
end
txt=[I_a(mat(:),type,size(mat))];
elseif(islogical(mat))
logicalval='FT';
if(numel(mat)==1)
txt=logicalval(mat+1);
else
txt=['[$U#' I_a(size(mat),'l') typecast(swapbytes(uint8(mat(:)')),'uint8')];
end
else
if(numel(mat)==1)
txt=['[' D_(mat) ']'];
else
txt=D_a(mat(:),'D',size(mat));
end
end
%txt=regexprep(mat2str(mat),'\s+',',');
%txt=regexprep(txt,';',sprintf('],['));
% if(nargin>=2 && size(mat,1)>1)
% txt=regexprep(txt,'\[',[repmat(sprintf('\t'),1,level) '[']);
% end
if(any(isinf(mat(:))))
txt=regexprep(txt,'([-+]*)Inf',jsonopt('Inf','"$1_Inf_"',varargin{:}));
end
if(any(isnan(mat(:))))
txt=regexprep(txt,'NaN',jsonopt('NaN','"_NaN_"',varargin{:}));
end
%%-------------------------------------------------------------------------
function newname=checkname(name,varargin)
isunpack=jsonopt('UnpackHex',1,varargin{:});
newname=name;
if(isempty(regexp(name,'0x([0-9a-fA-F]+)_','once')))
return
end
if(isunpack)
isoct=jsonopt('IsOctave',0,varargin{:});
if(~isoct)
newname=regexprep(name,'(^x|_){1}0x([0-9a-fA-F]+)_','${native2unicode(hex2dec($2))}');
else
pos=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','start');
pend=regexp(name,'(^x|_){1}0x([0-9a-fA-F]+)_','end');
if(isempty(pos))
return;
end
str0=name;
pos0=[0 pend(:)' length(name)];
newname='';
for i=1:length(pos)
newname=[newname str0(pos0(i)+1:pos(i)-1) char(hex2dec(str0(pos(i)+3:pend(i)-1)))];
end
if(pos(end)~=length(name))
newname=[newname str0(pos0(end-1)+1:pos0(end))];
end
end
end
%%-------------------------------------------------------------------------
function val=N_(str)
val=[I_(int32(length(str))) str];
%%-------------------------------------------------------------------------
function val=S_(str)
if(length(str)==1)
val=['C' str];
else
val=['S' I_(int32(length(str))) str];
end
%%-------------------------------------------------------------------------
function val=I_(num)
if(~isinteger(num))
error('input is not an integer');
end
if(num>=0 && num<255)
val=['U' data2byte(swapbytes(cast(num,'uint8')),'uint8')];
return;
end
key='iIlL';
cid={'int8','int16','int32','int64'};
for i=1:4
if((num>0 && num<2^(i*8-1)) || (num<0 && num>=-2^(i*8-1)))
val=[key(i) data2byte(swapbytes(cast(num,cid{i})),'uint8')];
return;
end
end
error('unsupported integer');
%%-------------------------------------------------------------------------
function val=D_(num)
if(~isfloat(num))
error('input is not a float');
end
if(isa(num,'single'))
val=['d' data2byte(num,'uint8')];
else
val=['D' data2byte(num,'uint8')];
end
%%-------------------------------------------------------------------------
function data=I_a(num,type,dim,format)
id=find(ismember('iUIlL',type));
if(id==0)
error('unsupported integer array');
end
% based on UBJSON specs, all integer types are stored in big endian format
if(id==1)
data=data2byte(swapbytes(int8(num)),'uint8');
blen=1;
elseif(id==2)
data=data2byte(swapbytes(uint8(num)),'uint8');
blen=1;
elseif(id==3)
data=data2byte(swapbytes(int16(num)),'uint8');
blen=2;
elseif(id==4)
data=data2byte(swapbytes(int32(num)),'uint8');
blen=4;
elseif(id==5)
data=data2byte(swapbytes(int64(num)),'uint8');
blen=8;
end
if(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))
format='opt';
end
if((nargin<4 || strcmp(format,'opt')) && numel(num)>1)
if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))
cid=I_(uint32(max(dim)));
data=['$' type '#' I_a(dim,cid(1)) data(:)'];
else
data=['$' type '#' I_(int32(numel(data)/blen)) data(:)'];
end
data=['[' data(:)'];
else
data=reshape(data,blen,numel(data)/blen);
data(2:blen+1,:)=data;
data(1,:)=type;
data=data(:)';
data=['[' data(:)' ']'];
end
%%-------------------------------------------------------------------------
function data=D_a(num,type,dim,format)
id=find(ismember('dD',type));
if(id==0)
error('unsupported float array');
end
if(id==1)
data=data2byte(single(num),'uint8');
elseif(id==2)
data=data2byte(double(num),'uint8');
end
if(nargin>=3 && length(dim)>=2 && prod(dim)~=dim(2))
format='opt';
end
if((nargin<4 || strcmp(format,'opt')) && numel(num)>1)
if(nargin>=3 && (length(dim)==1 || (length(dim)>=2 && prod(dim)~=dim(2))))
cid=I_(uint32(max(dim)));
data=['$' type '#' I_a(dim,cid(1)) data(:)'];
else
data=['$' type '#' I_(int32(numel(data)/(id*4))) data(:)'];
end
data=['[' data];
else
data=reshape(data,(id*4),length(data)/(id*4));
data(2:(id*4+1),:)=data;
data(1,:)=type;
data=data(:)';
data=['[' data(:)' ']'];
end
%%-------------------------------------------------------------------------
function bytes=data2byte(varargin)
bytes=typecast(varargin{:});
bytes=bytes(:)';
|
github
|
mdreisbach/Predictive-Maintenance-System-master
|
trainClassifier.m
|
.m
|
Predictive-Maintenance-System-master/MATLAB Prototype Source Code/trainClassifier.m
| 6,766 |
utf_8
|
514538a24da1b418974f7075d48e5310
|
%Code to dynmically set the file path for files used
currentDir = pwd;
splitDir = strsplit(currentDir, 'Predictive_Maintenance_System');
rootDir = splitDir(1);
finalPath = strcat(rootDir, 'Predictive_Maintenance_System\Files\Training_Data\dataMeasurementFull.csv');
pathToLoad = char(finalPath);
%Training data
trainingDataTable = readtable(pathToLoad, 'ReadVariableNames', false);
trainDataAsArray = table2array(trainingDataTable);
%Train model
%Use line to train model on new data
[trainedModel, validationAccuracy] = trainClassifierFunc(trainDataAsArray);
%Get testing data
%Must be a three column CSV with no headers
%Principal Component 1, Principal Component 2, Shipspeed
finalPath2 = strcat(rootDir, 'Predictive_Maintenance_System\Files\Classification_Tables\Full_Table\dataPcaFull.csv');
pathToLoad2 = char(finalPath2);
testingData = readtable(pathToLoad2, 'ReadVariableNames', false);
testDataAsArray = table2array(testingData);
%Get predictions for test data
yfit = trainedModel.predictFcn(testDataAsArray);
%Write to a one column csv
finalPath3 = strcat(rootDir, 'Predictive_Maintenance_System\Files\Predicted_Data\predictedClasses.csv');
pathToLoad3 = char(finalPath3);
csvwrite(pathToLoad3, yfit)
function [trainedClassifier, validationAccuracy] = trainClassifierFunc(trainingData)
% [trainedClassifier, validationAccuracy] = trainClassifier(trainingData)
% returns a trained classifier and its accuracy. This code recreates the
% classification model trained in Classification Learner app. Use the
% generated code to automate training the same model with new data, or to
% learn how to programmatically train models.
%
% Input:
% trainingData: a matrix with the same number of columns and data type
% as imported into the app.
%
% Output:
% trainedClassifier: a struct containing the trained classifier. The
% struct contains various fields with information about the trained
% classifier.
%
% trainedClassifier.predictFcn: a function to make predictions on new
% data.
%
% validationAccuracy: a double containing the accuracy in percent. In
% the app, the History list displays this overall accuracy score for
% each model.
%
% Use the code to train the model with new data. To retrain your
% classifier, call the function from the command line with your original
% data or new data as the input argument trainingData.
%
% For example, to retrain a classifier trained with the original data set
% T, enter:
% [trainedClassifier, validationAccuracy] = trainClassifier(T)
%
% To make predictions with the returned 'trainedClassifier' on new data T2,
% use
% yfit = trainedClassifier.predictFcn(T2)
%
% T2 must be a matrix containing only the predictor columns used for
% training. For details, enter:
% trainedClassifier.HowToPredict
% Auto-generated by MATLAB on 10-Dec-2017 10:54:39
% Extract predictors and response
% This code processes the data into the right shape for training the
% model.
% Convert input to table
inputTable = array2table(trainingData, 'VariableNames', {'column_1', 'column_2', 'column_3', 'column_4'});
predictorNames = {'column_1', 'column_2', 'column_4'};
predictors = inputTable(:, predictorNames);
response = inputTable.column_3;
isCategoricalPredictor = [false, false, false];
% Train a classifier
% This code specifies all the classifier options and trains the classifier.
template = templateTree(...
'MaxNumSplits', 20);
classificationEnsemble = fitcensemble(...
predictors, ...
response, ...
'Method', 'AdaBoostM2', ...
'NumLearningCycles', 30, ...
'Learners', template, ...
'LearnRate', 0.1, ...
'ClassNames', [0; 1; 2]);
% Create the result struct with predict function
predictorExtractionFcn = @(x) array2table(x, 'VariableNames', predictorNames);
ensemblePredictFcn = @(x) predict(classificationEnsemble, x);
trainedClassifier.predictFcn = @(x) ensemblePredictFcn(predictorExtractionFcn(x));
% Add additional fields to the result struct
trainedClassifier.ClassificationEnsemble = classificationEnsemble;
trainedClassifier.About = 'This struct is a trained model exported from Classification Learner R2017a.';
trainedClassifier.HowToPredict = sprintf('To make predictions on a new predictor column matrix, X, use: \n yfit = c.predictFcn(X) \nreplacing ''c'' with the name of the variable that is this struct, e.g. ''trainedModel''. \n \nX must contain exactly 3 columns because this model was trained using 3 predictors. \nX must contain only predictor columns in exactly the same order and format as your training \ndata. Do not include the response column or any columns you did not import into the app. \n \nFor more information, see <a href="matlab:helpview(fullfile(docroot, ''stats'', ''stats.map''), ''appclassification_exportmodeltoworkspace'')">How to predict using an exported model</a>.');
% Extract predictors and response
% This code processes the data into the right shape for training the
% model.
% Convert input to table
inputTable = array2table(trainingData, 'VariableNames', {'column_1', 'column_2', 'column_3', 'column_4'});
predictorNames = {'column_1', 'column_2', 'column_4'};
predictors = inputTable(:, predictorNames);
response = inputTable.column_3;
isCategoricalPredictor = [false, false, false];
% Set up holdout validation
cvp = cvpartition(response, 'Holdout', 0.2);
trainingPredictors = predictors(cvp.training, :);
trainingResponse = response(cvp.training, :);
trainingIsCategoricalPredictor = isCategoricalPredictor;
% Train a classifier
% This code specifies all the classifier options and trains the classifier.
template = templateTree(...
'MaxNumSplits', 20);
classificationEnsemble = fitcensemble(...
trainingPredictors, ...
trainingResponse, ...
'Method', 'AdaBoostM2', ...
'NumLearningCycles', 30, ...
'Learners', template, ...
'LearnRate', 0.1, ...
'ClassNames', [0; 1; 2]);
% Create the result struct with predict function
ensemblePredictFcn = @(x) predict(classificationEnsemble, x);
validationPredictFcn = @(x) ensemblePredictFcn(x);
% Add additional fields to the result struct
% Compute validation predictions
validationPredictors = predictors(cvp.test, :);
validationResponse = response(cvp.test, :);
[validationPredictions, validationScores] = validationPredictFcn(validationPredictors);
% Compute validation accuracy
correctPredictions = (validationPredictions == validationResponse);
isMissing = isnan(validationResponse);
correctPredictions = correctPredictions(~isMissing);
validationAccuracy = sum(correctPredictions)/length(correctPredictions);
end
|
github
|
mdreisbach/Predictive-Maintenance-System-master
|
PCA_Function_Testing.m
|
.m
|
Predictive-Maintenance-System-master/MATLAB Prototype Source Code/PCA_Function_Testing.m
| 2,163 |
utf_8
|
03277efd5cc31d7f4076d1605041f1ae
|
%Code to dynmically set the file path for files used
currentDir = pwd;
splitDir = strsplit(currentDir, 'Predictive_Maintenance_System');
rootDir = splitDir(1);
finalPath = strcat(rootDir, 'Predictive_Maintenance_System\Files\Current_File\data.csv');
pathToLoad = char(finalPath);
%Read in ship speed from an edited csv file and assign it to a currentDataTable
currentDataTable = readtable(pathToLoad,'ReadVariableNames',false);
%Call function to perform PCA and classify data
pcaClassifier(currentDataTable)
%Function to calculate the pca for the called dataSet, as well as graph the
%pca in a cumulative/indiviudal comparison plot for the # of principal
%components, and the percent of the data explained by that number
%of principal components.
function z = pcaClassifier(data)
%Parse table to array
dataAsArray = table2array(data);
%gets the size of the table where m = # of rows and n = # of columns
%n will be used later for determining the # of individual principal
%components, **m is not needed**.
[m,n] = size(dataAsArray);
%Normalize columns of matrix
mn = mean(dataAsArray);
sd = std(dataAsArray);
sd(sd==0) = 1;
dataNorm = bsxfun(@minus,dataAsArray,mn);
dataNorm = bsxfun(@rdivide,dataNorm,sd);
%Replace original data with normalized data
dataAsArray = dataNorm;
%Perform Principal Component Analysis on normalized data
%Allows variance of data to be seen in only a few dimensions
coeff = pca(dataAsArray);
[coeff,score,latent] = pca(dataAsArray);
%Create a matrix of the PCA scores and class of data for each row
classificationMatrix = cat(2, score(:,1), score(:,2));
%Set the output path csv dynamically
currentDir = pwd;
splitDir = strsplit(currentDir, 'Predictive_Maintenance_System');
rootDir = splitDir(1);
finalPath = strcat(rootDir, 'Predictive_Maintenance_System\Files\Current_File\dataPCA.csv');
pathToLoad = char(finalPath);
%Save the classification matrix as a CSV file in Classification_Tables
csvwrite(pathToLoad, classificationMatrix)
end
|
github
|
mdreisbach/Predictive-Maintenance-System-master
|
PCA_Classification_Function.m
|
.m
|
Predictive-Maintenance-System-master/MATLAB Prototype Source Code/PCA_Classification_Function.m
| 4,337 |
utf_8
|
fc5e33674a807c11d64d149b7dd9c960
|
%Code to dynmically set the file path for files used
currentDir = pwd;
splitDir = strsplit(currentDir, 'Predictive_Maintenance_System');
rootDir = splitDir(1);
finalPath = strcat(rootDir, 'Predictive_Maintenance_System\Files\Current_File\data.csv');
pathToLoad = char(finalPath);
%Read in ship speed from an edited csv file and assign it to a currentDataTable
currentDataTable = readtable(pathToLoad,'ReadVariableNames',false);
%Call function to perform PCA and classify data
pcaClassifier(currentDataTable)
%Function to calculate the pca for the called dataSet, as well as graph the
%pca in a cumulative/indiviudal comparison plot for the # of principal
%components, and the percent of the data explained by that number
%of principal components.
function z = pcaClassifier(data)
%Parse table to array
dataAsArray = table2array(data);
%gets the size of the table where m = # of rows and n = # of columns
%n will be used later for determining the # of individual principal
%components, **m is not needed**.
[m,n] = size(dataAsArray);
%Normalize columns of matrix
mn = mean(dataAsArray);
sd = std(dataAsArray);
sd(sd==0) = 1;
dataNorm = bsxfun(@minus,dataAsArray,mn);
dataNorm = bsxfun(@rdivide,dataNorm,sd);
%Replace original data with normalized data
dataAsArray = dataNorm;
%Perform Principal Component Analysis on normalized data
%Allows variance of data to be seen in only a few dimensions
coeff = pca(dataAsArray);
[coeff,score,latent] = pca(dataAsArray);
%Calculate the IQR for the first two Principal Component Scores
%This is used to determine outliers and create groups of data
column1IQR = iqr(score(:,1));
column2IQR = iqr(score(:,2));
%Possible IQR values to use for determining groups of data
%Currently using weak to achieve results for each group
%Plan to allow the user to choose values based on settings file
%Normal IQR in Stats: 1.5/3.0
%STRICT?(Default?): 1.5/2.0
%MEDIUM?: 1.25/1.75
%WEAK?: 1.0/1.5
%Variable to hold IQR multiplier
%TODO read value from settings file to set this dynmically
%Currently using weak values
warningIqrMultiplier = 1.0;
alarmIqrMultiplier = 1.5;
%Determine the outlier coefficients for the first principal comp
warningOutlierColumn1 = warningIqrMultiplier * column1IQR;
alarmOutlierColumn1 = alarmIqrMultiplier * column1IQR;
%Determine the outlier coefficients for the second principal comp
warningOutlierColumn2 = warningIqrMultiplier * column2IQR;
alarmOutlierColumn2 = alarmIqrMultiplier * column2IQR;
%Group data points into Alarm and Warning groups
%Generating a boolean array corresponding to each data point
idxAlarm = score(:,1) > alarmOutlierColumn1 | score(:,1) < -alarmOutlierColumn1 | score(:,2) > alarmOutlierColumn2 | score(:,2) < -alarmOutlierColumn2;
idxWarn = score(:,1) > warningOutlierColumn1 | score(:,2) > warningOutlierColumn2 | score(:,2) < -warningOutlierColumn2 & ~idxAlarm;
%Create an array of the same length that initializes to all 0's
classOfData = zeros(length(idxAlarm),1);
%Loop through the Alarm array and set the class of data to group value
%0 means the data point is in a good range
%1 means the data point is in the warning range
%2 means the data point is in the alarm range
for i = 1:length(idxAlarm)
if idxAlarm(i) == 1
classOfData(i,1) = 2;
elseif idxWarn(i) == 1
classOfData(i,1) = 1;
end
end
%Create a matrix of the PCA scores and class of data for each row
classificationMatrix = cat(2, score(:,1), score(:,2), classOfData);
%Set the output path csv dynamically
currentDir = pwd;
splitDir = strsplit(currentDir, 'Predictive_Maintenance_System');
rootDir = splitDir(1);
finalPath = strcat(rootDir, 'Predictive_Maintenance_System\Files\dataTraining.csv');
pathToLoad = char(finalPath);
%Save the classification matrix as a CSV file in Classification_Tables
csvwrite(pathToLoad, classificationMatrix)
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
coart_sea_reflectance.m
|
.m
|
BEHR-core-master/Albedo/coart_sea_reflectance.m
| 4,146 |
utf_8
|
48a0832c253ee8306bd4cd45a194c92c
|
function [ refl, refl_struct ] = coart_sea_reflectance( sza, refl_struct )
%COART_SEA_REFLECTANCE Calculate sea surface reflectance from the COART LUT
% The COART (Coupled Atmosphere-Ocean Radiative Transfer) model allows
% for simulation of light interacting with the surface of the ocean. The
% model is hosted online at
%
% https://cloudsgate2.larc.nasa.gov/jin/coart.html
%
% In order to calculate surface reflectance, I produced a look-up table
% from this model that parameterizes the surface reflectance by the solar
% zenith angle. This function reads the HTML version of that output and
% interpolates to the input SZA values.
%
% [ REFL, REFL_STRUCT ] = COART_SEA_REFLECTANCE( SZA ) will read the HTML
% file 'coart.htm' in the same directory as this function, extracting the
% surface reflectance/SZA look-up table from it and finding the surface
% reflectances that correspond to the input SZAs (which may be an array),
% returned as REFL (which will be the same size as SZA). REFL_STRUCT is
% the result of reading the HTML file, it is a structure with fields
% "sza" and "alb".
%
% [ ___ ] = COART_SEA_REFLECTANCE( SZA, REFL_STRUCT ) skips reading the
% HTML file and instead takes the LUT from the REFL_STRUCT input. This
% can speed up the algorithm significantly, especially if running in
% parallel (as parallel read requests can jam up).
if ~exist('refl_struct', 'var')
refl_struct = read_coart_html();
end
refl = interp1(refl_struct.sza, refl_struct.alb, sza);
end
function refl = read_coart_html()
mydir = fileparts(mfilename('fullpath'));
coart_file = fullfile(mydir, 'coart.htm');
% Preallocate the SZA and ALB vectors - will clean up unused elements at
% the end
refl.sza = nan(1,20);
refl.alb = nan(1,20);
i_alb = 1;
% For more helpful error messages
line_num = 1;
fid = fopen(coart_file, 'r');
try
tline = fgetl(fid);
while ischar(tline)
% The COART HTML output prints the albedo information in groups of
% four lines, where each line gives the albedo information for a
% different measurement altitude and each block is for a different
% SZA. We will search for lines that define the reflectance at 0 km
% altitude (so right at the surface).
vals = strsplit(tline);
% We need to remove any values that aren't just a number, the
% numbers could be defined a 1.23E+2 or 4.5e-3, so we look for any
% characters that aren't a number, a decimal, E, +, -, or NaN. This
% will usually be extra HTML tags.
xx = iscellcontents(regexp(vals, '[^\d\.eE\+\-(NaN)]'),'isempty');
% We also remove any cells that only contain an empty string
xx = xx & ~iscellcontents(vals, 'isempty');
vals = vals(xx);
if numel(vals) == 8
% The measurement altitude is the third column, the SZA the second.
% The albedo is the last column, assuming that we take the advice
% of http://www.oceanopticsbook.info/view/radiative_transfer_theory/level_2/measures_of_reflectance
% more specifically, the "Light and Water" book they reference in
% that the "oceanographer's albedo" is the ratio of upwelling to
% downwelling irradiance.
%
% Some other sources (of varying rigor) on the relationship between
% radiance, irradiance, and albedo/reflectance:
% http://www.cesbio.ups-tlse.fr/multitemp/?p=9148
% http://ceeserver.cee.cornell.edu/wdp2/cee6100/6100_Labs/Lab03_Fa14_Radiance%20&%20Reflectance.pdf
meas_alt = str2double(vals{3});
if meas_alt == 0
alb = str2double(vals{end});
if ~isnan(alb)
refl.sza(i_alb) = str2double(vals{2});
refl.alb(i_alb) = alb;
i_alb = i_alb + 1;
end
end
end
line_num = line_num + 1;
tline = fgetl(fid);
end
catch err
fclose(fid);
rethrow(err);
end
fclose(fid);
refl.sza(i_alb:end) = [];
refl.alb(i_alb:end) = [];
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
mobley_sea_refl.m
|
.m
|
BEHR-core-master/Albedo/mobley_sea_refl.m
| 6,327 |
utf_8
|
aac340549615ee5b0f3dda6fc5e834ac
|
function [ refl, refl_struct ] = mobley_sea_refl( sza, vza, raa, refl_struct )
%MODIS_SEA_REFL Look up sea reflectivity from Mobley 2015
% The MCD43C1 product does not give BRDF coefficients over ocean.
% Vasilkov et al. (2017) handled this by using two models that combined
% direct (specular) and volumetric reflection from the ocean. However, as
% far as I could tell, that seemed like it would require calculating the
% normal for a surface that would yield specular reflection from the
% solar to viewing angle, which is a difficult calculation. To do the
% volumetric/diffuse calculation would require measurements of
% chorolphyll concentration as well, and I was keen to avoid adding
% another dataset dependency to BEHR.
%
% Fortunately, I found a paper by Curtis D. Mobley, which calculated
% ocean reflectances using a Monte Carlo method. He provided a table of
% reflectances, which was downloadable at
%
% http://www.oceanopticsbook.info/view/remote_sensing/level_3/surface_reflectance_factors
%
% These are calculated for 550 nm, and from the figure at the above link,
% it looks like this means we can expect at least a 20% error. The usual
% way to correct that (e.g. McLinden et al. 2014,
%
% REFL = MOBLEY_SEA_REFL( SZA, VZA, RAA ) returns the reflectivity for
% given values of SZA, VZA, and RAA. If the inputs are arrays, REFL will
% be an array of the same size. Note that SZA, VZA, and RAA are expected
% to be defined in degrees, and RAA is expected to be defined as it is in
% the TOMRAD table, I believe. From p. 4844 of Mobley 2015:
% "...the azimuthal viewing direction, \phi_v, which is measured
% relative to the Sun's azimuthal direction. That is, \phi_v = 0
% corresponds to looking towards the Sun, \phi_v = 90 deg is looking
% at right angles to the Sun's incident rays, and \phi_v = 180 deg is
% looking away from the Sun."
%
% [REFL, REFL_STRUCT = MOBLEY_SEA_REFL( ___ ) returns the structure
% created by reading the Mobley text file so that it can be passed in
% with the next syntax:
%
% REFL = MOBLEY_SEA_REFL( ___, REFL_STRUCT ) passes the structure back
% into this function so that you do not need to re-read the file, which
% can be the bulk of the execution time.
%
% Sources:
%
% Curtis D. Mobley, "Polarized reflectance and transmittance
% properties of windblown sea surfaces," Appl. Opt. 54, 4828-4849
% (2015), doi: 10.1364/AO.54.004828
%
% Vasilkov et al., "Accounting for the effects of surface BRDF on
% satellite cloud and trace gas retrievals: a new approach based on
% geometry-dependent Lambertian equivalent reflectivity applied to
% OMI algorithms," Atmos. Meas. Tech., 10, 333-349 (2017), doi:
% 10.5194/amt-10-333-2017
if ~isequal(size(sza), size(vza)) || ~isequal(size(sza), size(raa)) || ...
~isnumeric(sza) || ~isnumeric(vza) || ~isnumeric(raa)
E.badinput('SZA, VZA, and RAA must all be numeric arrays of the same size')
end
if ~exist('refl_struct', 'var')
refl_struct = read_in_table();
end
assumed_wind = repmat(5, size(sza));
refl = interpn(refl_struct.wind_speed, refl_struct.sza, refl_struct.vza, refl_struct.raa,...
refl_struct.refl, assumed_wind, sza, vza, raa);
end
function refl = read_in_table()
E = JLLErrors;
E.addCustomError('table_read', 'Problem reading Mobley 2015 table: %s');
table_file = fullfile(behr_repo_dir, 'Albedo', 'Mobley2015_SeaSurfRefl.txt');
fid = fopen(table_file, 'r');
table_started = false;
tline = fgetl(fid);
wind_dim = [0, 2, 4, 5, 6, 8, 10, 12, 14, 15];
sza_dim = [0:10:80, 87.5];
vza_dim = [0:10:80, 87.5];
raa_dim = 0:15:180;
curr_wind = nan;
curr_sza = nan;
refl_table = nan(length(wind_dim), length(sza_dim), length(vza_dim), length(raa_dim));
while ischar(tline)
if ~table_started && ~isempty(strfind(tline, 'WIND SPEED'))
table_started = true;
end
if table_started
if ~isempty(strfind(tline, 'WIND SPEED'))
tmpline = strsplit(tline, ';');
tmpline{1} = strsplit(tmpline{1}, '=');
tmpline{2} = strsplit(tmpline{2}, '=');
curr_wind = str2double(tmpline{1}{2});
curr_sza = str2double(tmpline{2}{2});
else
if isnan(curr_wind) || isnan(curr_sza)
E.callCustomError('table_read', 'Tried to read reflectance value before wind speed and SZA read');
end
tmpline = strsplit(strtrim(tline));
curr_vza = str2double(tmpline{1});
curr_raa = str2double(tmpline{2});
curr_refl = str2double(tmpline{3});
inds = find_table_inds(curr_wind, curr_sza, curr_vza, curr_raa);
refl_table(inds(1), inds(2), inds(3), inds(4)) = curr_refl;
end
end
tline = fgetl(fid);
end
fclose(fid);
% There will be some NaNs remaining in the table because the Mobley file
% does not include output for multiple RAAs when VZA = 0 (since RAA does
% not actually mean anything in that case). We want to replicate the RAA =
% 0 values to fill those nans
% Verified that this copies the values for RAA = 0 VZA = 0 to all values of
% RAA for VZA = 0 properly with (let refl_table_old be refl_table before
% the subsitution)
% notnans = ~isnan(refl_table_old);
% isequal(refl_table(notnans), refl_table_old(notnans)) % true
% sum(diff(refl_table(:,:,1,:),[],4),4) % all values are 0
vza_eq_0 = refl_table(:,:,1,1);
refl_table(:,:,1,2:end) = repmat(vza_eq_0, 1, 1, length(raa_dim)-1);
refl = struct('refl', refl_table, 'wind_speed', wind_dim, 'sza', sza_dim, 'vza', vza_dim, 'raa', raa_dim,...
'refl_dimensions', {{'wind_speed', 'sza', 'vza', 'raa'}});
function found_inds = find_table_inds(wind, sza, vza, raa)
found_inds = nan(1,4);
vals = [wind, sza, vza, raa];
dims = {wind_dim, sza_dim, vza_dim, raa_dim};
for a=1:numel(vals)
xx = vals(a) == dims{a};
if sum(xx) ~= 1
dim_names = {'wind', 'SZA', 'VZA', 'RAA'};
E.callCustomError('table_read', sprintf('Could not identify indices for %s = %.1f', dim_names{a}, vals(a)));
else
found_inds(a) = find(xx);
end
end
end
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
behr_uncertainty_estimation.m
|
.m
|
BEHR-core-master/Utils/behr_uncertainty_estimation.m
| 5,935 |
utf_8
|
5f40f34fe8818e277997bac9ac1e5dc3
|
function [ Delta, DeltaGrid ] = behr_uncertainty_estimation( Data, OMI, parameter, percent_change, varargin )
%BEHR_UNCERTAINTY_ESTIMATION Estimate the uncertainty in BEHR NO2
% [ DELTA, DELTAGRID ] = BEHR_UNCERTAINTY_ESTIMATION( DATA, PARAMETER, PERCENT_CHANGE )
% This function will run the BEHR retrieval for the structure DATA but
% with the field PARAMETER changed by PERCENT_CHANGE percent. PARAMETER
% must match a field in DATA. This will return structures DELTA and
% DELTAGRID which are the equivalent of Data and OMI except they will
% have the modified parameter values and the resultant different VCDs and
% AMFs.
%
% PERCENT_CHANGE may be either a number or a function handle. If a
% number, then the field given by PARAMETER is changed to:
%
% value * (100 + percent_change)/100
%
% before running the BEHR algorithm. If PERCENT_CHANGE is a function
% handle instead, then it must take a scalar structure as its sole
% input and return the value that the field PARAMETER should take on.
% For example, to use an absolute difference in MODISAlbedo rather
% than a percent difference, use:
%
% PERCENT_DIFFERENCE = @(Data) Data.MODISAlbedo + 0.05;
%
% There are two parameters:
%
% 'remove_unchanged_fields' - is a scalar logical; if true, then all
% numeric fields except those modified or added by this function are
% removed from Delta and DeltaGrid before returning.
%
% 'DEBUG_LEVEL' - a scalar number indicating verbosity. Default is 2; 0
% means no output.
E = JLLErrors;
% This function requires that rProfile_WRF.m in the BEHR-core-utils repo
% has the commit that allows it to keep the full extrapolated NO2 and
% temperature profiles instead of clipping them just below and above the
% surface pressure and tropopause pressure, respectively.
G = GitChecker;
G.addReqCommits(behr_paths.behr_utils, 'ca2faf3');
p = inputParser;
p.KeepUnmatched = true; % this should avoid errors with extra parameters to be passed through to BEHR_main_one_day
p.addParameter('remove_unchanged_fields', false);
p.addParameter('DEBUG_LEVEL', 2);
p.parse(varargin{:});
pout = p.Results;
remove_unchanged_fields = pout.remove_unchanged_fields;
DEBUG_LEVEL = pout.DEBUG_LEVEL;
if ~isstruct(Data)
E.badinput('DATA must be a structure')
end
if ~ischar(parameter) || (~isfield(Data, parameter) && ~any(strcmpi(parameter, {'profileloc', 'profiletime'})))
E.badinput('PARAMETER must be a field name in DATA or the special strings "profileloc" or "profiletime"')
end
if isnumeric(percent_change)
if ~isscalar(percent_change)
E.badinput('If given as a number, PERCENT_CHANGE must be a scalar');
else
percent_change = @(Data) Data.(parameter) * (100 + percent_change)/100;
end
elseif ~isa(percent_change, 'function_handle')
E.badinput('PERCENT_CHANGE must be a scalar number or a function handle');
end
Delta = Data;
for a=1:numel(Delta)
% Vary the specified parameter. Later we'll add an ability to vary the
% NO2 profiles in a realistic way, but for now we'll stick to just
% varying 2D parameters
if ~any(strcmpi(parameter, {'profileloc','profiletime'}))
Delta(a).(parameter) = percent_change(Delta(a));
end
end
% Now run BEHR but for the modified parameters
if strcmpi(parameter, 'profileloc')
[Delta, DeltaGrid] = BEHR_main_one_day(Delta, 'profile_mode', Delta(1).BEHRProfileMode, 'lookup_profile', true, 'lookup_sweights', false,...
'randomize_profile_loc', true, varargin{:});
elseif strcmpi(parameter, 'profiletime')
[Delta, DeltaGrid] = BEHR_main_one_day(Delta, 'profile_mode', Delta(1).BEHRProfileMode, 'lookup_profile', true, 'lookup_sweights', false,...
'randomize_profile_time', true, varargin{:});
else
[Delta, DeltaGrid] = BEHR_main_one_day(Delta, 'profile_mode', Delta(1).BEHRProfileMode, 'lookup_profile', false, 'lookup_sweights', true, 'extra_gridding_fields', {parameter}, varargin{:});
end
% Calculate the percent differences in the NO2 columns and AMFs
for a=1:numel(Delta)
Delta(a).PercentChangeNO2 = reldiff(Delta(a).BEHRColumnAmountNO2Trop, Data(a).BEHRColumnAmountNO2Trop)*100;
Delta(a).PercentChangeNO2Vis = reldiff(Delta(a).BEHRColumnAmountNO2TropVisOnly, Data(a).BEHRColumnAmountNO2TropVisOnly)*100;
Delta(a).PercentChangeAMF = reldiff(Delta(a).BEHRAMFTrop, Data(a).BEHRAMFTrop)*100;
Delta(a).PercentChangeAMFVis = reldiff(Delta(a).BEHRAMFTropVisOnly, Data(a).BEHRAMFTropVisOnly)*100;
DeltaGrid(a).PercentChangeNO2 = reldiff(DeltaGrid(a).BEHRColumnAmountNO2Trop, OMI(a).BEHRColumnAmountNO2Trop)*100;
DeltaGrid(a).PercentChangeNO2Vis = reldiff(DeltaGrid(a).BEHRColumnAmountNO2TropVisOnly, OMI(a).BEHRColumnAmountNO2TropVisOnly)*100;
DeltaGrid(a).PercentChangeAMF = reldiff(DeltaGrid(a).BEHRAMFTrop, OMI(a).BEHRAMFTrop)*100;
DeltaGrid(a).PercentChangeAMFVis = reldiff(DeltaGrid(a).BEHRAMFTropVisOnly, OMI(a).BEHRAMFTropVisOnly)*100;
end
if remove_unchanged_fields
% Keep the percent change fields, the NO2 and AMF fields themselves,
% the quality flags (so we can ID good pixels), and the changed
% parameter, but remove all other data fields (attribute fields will be
% kept, i.e. any non-numeric field)
fields_to_keep = {parameter, 'BEHRColumnAmountNO2Trop', 'BEHRAMFTrop', 'BEHRColumnAmountNO2TropVisOnly', 'BEHRAMFTropVisOnly',...
'PercentChangeNO2', 'PercentChangeAMF', 'PercentChangeNO2Vis', 'PercentChangeAMFVis', 'BEHRQualityFlags'};
Delta = cut_down_fields(Delta, fields_to_keep);
DeltaGrid = cut_down_fields(DeltaGrid, [fields_to_keep, {'Areaweight'}]);
end
end
function Data = cut_down_fields(Data, fields_to_keep)
fns = fieldnames(Data);
numeric_fields = structfun(@isnumeric, Data(1));
keep_fields = ismember(fns, fields_to_keep);
fields_to_remove = fns(~keep_fields & numeric_fields);
Data = rmfield(Data, fields_to_remove);
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
behr_generate_uncertainty_files.m
|
.m
|
BEHR-core-master/Utils/behr_generate_uncertainty_files.m
| 9,882 |
utf_8
|
7bb92109dbade18e03ba7e931c08b75c
|
function behr_generate_uncertainty_files(varargin)
%BEHR_GENERATE_UNCERTAINTY_FILES Generate the intermediate files for BEHR uncertainty analysis
% BEHR_GENERATURE_UNCERTAINTY_FILES( ) Generates files containing Delta
% and DeltaGrid structures, which are generated by
% BEHR_UNCERTAINTY_ESTIMATION() and contain the percent change in NO2
% VCDs and AMFs for a given variation of input parameters. This function
% will iterate over MODISAlbedo, GLOBETerpres, CloudPressure,
% CloudRadianceFraction, and the NO2 profiles and generate uncertainty
% estimation files for each. By default it generates them for March,
% June, September, and December of 2012 and stores the files under
% "us-uncertainty" in the behr_paths.behr_mat_dir folder. The following
% parameters allow you to override those options:
%
% 'test_year' - default is 2012, can be set to any year with BEHR
% data, though daily profiles are necessary for one of the tests.
% Give the year as a number.
%
% 'test_months' - a vector of months, given as numbers 1-12,
% specifying which months of the year to test.
%
% 'output_root' - the root directory where each input parameter
% tested will have its subdirectory placed. Default is
% fullfile(behr_paths.behr_mat_dir, 'us-uncertainty').
%
% 'region' - which region to test. Default is 'us'.
%
% 'prof_mode' - which BEHR subproduct to use. Default is 'daily'.
% Currently, 'monthly' will fail because the profile time uncertainty
% test relies on daily profiles.
%
% 'overwrite' - whether output files should be overwritten, or a day
% skipped if it already exists. Default is false, i.e. do not
% overwrite.
p = inputParser;
p.addParameter('test_year', 2012);
p.addParameter('test_months', 1:12);
p.addParameter('output_root', '');
p.addParameter('region', 'us');
p.addParameter('prof_mode', 'daily');
p.addParameter('overwrite', false);
p.parse(varargin{:});
pout = p.Results;
test_year = pout.test_year;
test_months = pout.test_months;
prof_mode = pout.prof_mode;
region = pout.region;
output_root = set_output_root(pout.output_root, region);
overwrite = pout.overwrite;
E = JLLErrors;
if ~isnumeric(test_year) || ~isscalar(test_year)
E.badinput('"test_year" must be a scalar')
end
if ~isnumeric(test_months) || any(test_months(:) < 1 | test_months(:) > 12)
E.badinput('"test_months" must be an array of numbers between 1 and 12')
end
if ~ischar(prof_mode) || ~strcmpi(prof_mode, 'daily')
E.badinput('"prof_mode" must be the char ''daily''. Currently, ''monthly'' is not supported')
end
if ~ischar(region)
% Assume that load_behr_file() will error if an invalid region is given
E.badinput('"region" must be a char array');
end
if ~islogical(overwrite) || ~isscalar(overwrite)
E.badinput('"overwrite" must be a scalar logical');
end
% Define the standard parameters to vary here. Give the percent change for
% each month; this allows for the fact that e.g. albedo can be more
% uncertain in winter when there is snow on the ground. "ProfileLoc" and
% "ProfileTime" are special parameter names that tell
% behr_uncertainty_estimation() to have BEHR_main_one_day randomize the
% profile location and day, respectively. The percent difference does not
% matter there because it uses the intrinsic variability of the profiles to
% calculate the uncertainty.
% MODIS v5 uncertainty given in
% https://link.springer.com/chapter/10.1007%2F978-1-4419-6749-7_24 as
% within 5% with good quality and within 10% even with low-quality data
% (search for "accuracy"). This link
% (https://landval.gsfc.nasa.gov/ProductStatus.php?ProductID=MOD43)
% reiterates the 5%/10% uncertainty and provides a list of references.
% Since we use the BRDF directly, rather than including an RTM, we should
% also include uncertainty from that. Using
% misc_alb_plots.misc_alb_plots.ler_vs_simple_brdf, the 75th percentile
% difference is ~14%, and the slope of LER vs. BRDF doesn't vary much by
% month, surprisingly, so we'll combine these in quadrature to get ( 10^2 +
% 14^2 )^0.5 = 17% uncertainty.
% For terrain pressure, I doubt that there's much uncertainty in GLOBE
% data, and since it is so much smaller than the OMI pixel, I find it
% unlikely that representativeness is an issue. Rather it seems more likely
% to me that whatever error there is in it comes from the fact we assume a
% fixed scale height in our calculation. The best way I've come up with to
% assess that is to compare the average WRF surface pressures to the GLOBE
% surface pressures. Using
% misc_behr_v3_validation.plot_behr_wrf_surfpres_diffs, I get at most a
% -1.5% bias (Jan, Sept, Nov 2012). I will carry that over into the
% new terrain height.
% For cloud pressure and cloud fraction, Acarreta et al.
% (doi:10.1029/2003JD003915) describes the error in sect. 6.1. They point
% out that it varies with a lot of parameters, and unfortunately the error
% they show in Fig. 3 is not a constant percentage. So we will extract the
% data from Fig. 3 and use it to calculate the error directly given the
% cloud fraction and pressure.
O2 = O2O2CloudUncert();
param_percent_changes = struct('MODISAlbedo', {{17, -17}},...
'GLOBETerrainHeight', {{@(Data) percent_change_in_range(Data, 'GLOBETerrainHeight', 1.5, [0 Inf]), @(Data) percent_change_in_range(Data, 'GLOBETerrainHeight', -1.5, [0 Inf])}},...
'BEHRTropopausePressure', {{@(Data) Data.TropopausePressure}},...
'CloudPressure', {{@(Data) Data.CloudPressure + O2.interpolant(Data.CloudFraction, Data.CloudPressure), @(Data) Data.CloudPressure - O2.interpolant(Data.CloudFraction, Data.CloudPressure)}},...
'CloudRadianceFraction', {{@(Data) differential_crf(Data, 0.05), @(Data) differential_crf(Data, -0.05)}},...
'ProfileLoc', 0,...
'ProfileTime', 0);
% Vary each parameter by the given amount, as well as randomizing the
% profile times and locations. Save the resulting files in subdirectories
% under the output root directory titles by what was varied.
params = fieldnames(param_percent_changes);
for i_param = 1:numel(params)
this_param = params{i_param};
output_dir = fullfile(output_root, this_param);
if ~exist(output_dir, 'dir')
mkdir(output_dir);
end
for i_month = 1:numel(test_months)
fprintf('Perturbing %s for month %d\n', this_param, test_months(i_month));
start_date = datenum(test_year, test_months(i_month), 1);
end_date = datenum(test_year, test_months(i_month), 31);
parfor this_date = start_date:end_date
savename = behr_filename(this_date, prof_mode, region);
savename = strrep(savename, 'BEHR', sprintf('BEHR-%s-UNCERTAINTY', this_param));
full_savename = fullfile(output_dir, savename);
if ~overwrite && exist(full_savename, 'file')
fprintf('%s exists; skipping\n', full_savename);
continue
end
this_percent_change = param_percent_changes.(this_param);
ErrorData = struct('parameter', this_param, 'percent_change_op', this_percent_change, 'Delta', cell(size(this_percent_change)), 'DeltaGrid', cell(size(this_percent_change)));
file_name = behr_filename(this_date, prof_mode, region);
F = load(fullfile(behr_paths.BEHRMatSubdir(region, prof_mode), file_name));
Data = F.Data;
OMI = F.OMI;
for i_change = 1:numel(this_percent_change)
[ErrorData(i_change).Delta, ErrorData(i_change).DeltaGrid] = behr_uncertainty_estimation(Data, OMI, params{i_param}, ErrorData(i_change).percent_change_op, 'remove_unchanged_fields', true);
end
saveoutput(full_savename, ErrorData)
end
end
end
end
function saveoutput(savename, ErrorData) %#ok<INUSD>
save(savename, 'ErrorData');
end
function output_root = set_output_root(given_dir, region)
% If no output root directory given, default to a subdirectory titled
% "<region>-uncertainty" in the behr_mat_dir defined by behr_paths.
% Otherwise, require that the given directory exist.
E = JLLErrors;
if isempty(given_dir)
if ~exist(behr_paths.behr_mat_dir, 'dir')
E.dir_dne('behr_paths.behr_mat_dir')
end
output_root = fullfile(behr_paths.behr_mat_dir, sprintf('%s-uncertainty', lower(region)));
if ~exist(output_root, 'dir')
mkdir(output_root)
end
else
output_root = given_dir;
if ~exist(output_root, 'dir')
E.badinput('Given output root directory (%s) does not exist', given_dir);
end
end
end
function cldradfrac = differential_crf(Data, cloudfrac_error)
% First, bin the data by 0.05 cloud fraction increments. Any missing bins,
% just remove, will be handled by the interpolation later. Put the bin
% centers such that then fall on 0 and 1.
bin_edges = -0.025:0.05:1.025;
cf_bins = bin_data(Data.CloudFraction, Data.CloudFraction, bin_edges);
crf_bins = bin_data(Data.CloudFraction, Data.CloudRadianceFraction, bin_edges);
cf_means = cellfun(@nanmean, cf_bins);
crf_means = cellfun(@nanmean, crf_bins);
xx = ~isnan(cf_means) & ~isnan(crf_means);
cf_means = cf_means(xx);
crf_means = crf_means(xx);
% Now find each starting cloud radiance fraction on the curve, move the
% cloud fraction over by the error, and take the difference in the cloud
% radiance fractions to get how much the CRF should increase.
crf_diff = interp1(cf_means, crf_means, clipmat(Data.CloudFraction + cloudfrac_error, 0, 1), 'linear', 'extrap')...
- interp1(cf_means, crf_means, Data.CloudFraction, 'linear', 'extrap');
cldradfrac = clipmat(Data.CloudRadianceFraction + crf_diff, 0, 1);
end
function val = percent_change_in_range(Data, field, percent_change, range)
val = Data.(field);
val = val + val * percent_change / 100;
val = clipmat(val, range);
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
globe_fix_16aug2016.m
|
.m
|
BEHR-core-master/Utils/Fixes/globe_fix_16aug2016.m
| 5,187 |
utf_8
|
98e0a4c51be32f132cd763d4f9204d0a
|
function [ ] = globe_fix_16aug2016( start_date, end_date )
%GLOBE_FIX_16AUG2016 Fixes an issue with sea-level GLOBE terrain pressure
% Pixels over ocean in BEHR have a GLOBETerpres value of ~1080 hPa, which
% is wrong. This happens because a fill value of -500 snuck into the
% terrain altitude at the beginning wherever GLOBE is a NaN. This code
% will be just redoing the GLOBE calculation.
DEBUG_LEVEL = 1;
globe_dir = '/global/scratch/laughner/SAT/BEHR/GLOBE_Database/';
load_dir = '/global/scratch/laughner/SAT/BEHR/SP_Files_preGlobeFix';
save_dir = '/global/scratch/laughner/SAT/BEHR/SP_Files';
% These will be set from the data structure
lonlim = [-135 -55];
latlim = [15 60];
% Load the terrain pressure with the new boundaries
[terpres, refvec] = globedem(globe_dir,1,latlim,lonlim);
%refvec will contain (1) number of cells per degree, (2)
%northwest corner latitude, (3) NW corner longitude.
%(2) & (3) might differ from the input latmin & lonmin
%because of where the globe cell edges fall
if DEBUG_LEVEL > 0; fprintf('\n Creating lon/lat matrices for GLOBE data \n'); end
cell_count = refvec(1);
globe_latmax = refvec(2); globe_latmin = globe_latmax - size(terpres,1)*(1/cell_count);
globe_lat_matrix = (globe_latmin + 1/(2*cell_count)):(1/cell_count):globe_latmax;
globe_lat_matrix = globe_lat_matrix';
globe_lat_matrix = repmat(globe_lat_matrix,1,size(terpres,2));
globe_lonmin = refvec(3); globe_lonmax = globe_lonmin + size(terpres,2)*(1/cell_count);
globe_lon_matrix = globe_lonmin + 1/(2*cell_count):(1/cell_count):globe_lonmax;
globe_lon_matrix = repmat(globe_lon_matrix,size(terpres,1),1);
terpres(isnan(terpres)) = 0; % CHANGED: NaNs occur over ocean so should be altitude ASL of 0
dnums = datenum(start_date):datenum(end_date);
parfor d=1:numel(dnums)
tID = getCurrentTask;
load_name = sprintf('OMI_SP_v2-1Arev1_%04d%02d%02d.mat', year(dnums(d)), month(dnums(d)), day(dnums(d)));
file_name = fullfile(load_dir, load_name);
if ~exist(file_name, 'file')
fprintf('%s does not exist, skipping\n', load_name)
else
D = load(file_name);
if DEBUG_LEVEL > 0; fprintf('w%d: Loaded %s\n', tID.ID, load_name); end
Data = D.Data;
for E=1:numel(Data)
if numel(Data(E).Loncorn) == 1
fprintf(' w%d: Swath %d has no data, saving as is\n', tID.ID, E)
else
if DEBUG_LEVEL > 0; fprintf(' w%d: Swath %d of %d\n', tID.ID, E, numel(Data)); end
GLOBETerpres = zeros(size(Data(E).Latitude));
%GLOBE matrices are arrange s.t. terpres(1,1) is in the SW
%corner and terpres(end, end) is in the NE corner.
for k=1:numel(Data(E).Longitude)
if DEBUG_LEVEL > 1; fprintf('Averaging GLOBE data to pixel %u of %u \n',k,c); end
if DEBUG_LEVEL > 2; tic; end
x1 = Data(E).Loncorn(1,k); y1 = Data(E).Latcorn(1,k);
x2 = Data(E).Loncorn(2,k); y2 = Data(E).Latcorn(2,k);
x3 = Data(E).Loncorn(3,k); y3 = Data(E).Latcorn(3,k);
x4 = Data(E).Loncorn(4,k); y4 = Data(E).Latcorn(4,k);
xall=[x1;x2;x3;x4;x1];
yall=[y1;y2;y3;y4;y1];
%%%%SPEED IT UP%%%%
% Since GLOBE data is on a grid where a row of
% latitudinal points all have the same longitude and
% vice versa, we can quickly reduce the number of
% points by comparing just one lat and lon vector to
% the extent of the pixel.
ai=find(globe_lat_matrix(:,1)>=min(yall) & globe_lat_matrix(:,1)<=max(yall));
bi=find(globe_lon_matrix(1,:)>=min(xall) & globe_lon_matrix(1,:)<=max(xall));
pressurex=terpres(ai,bi);
pressure_latx=globe_lat_matrix(ai,bi);
pressure_lonx=globe_lon_matrix(ai,bi);
%%%%%%%%%%%%%%%%%%%
% inpolygon is slow compared to a simple logical test,
% so we only apply it to the subset of GLOBE heights
% immediately around our pixel.
xx_globe = inpolygon(pressure_latx,pressure_lonx,yall,xall);
pres_vals=pressurex(xx_globe);
GLOBETerpres(k)=1013.25 .* exp(-mean(pres_vals) / 7400 ); %Originally divided by 7640 m
if DEBUG_LEVEL > 2; telap = toc; fprintf('Time for GLOBE --> pixel %u/%u = %g sec \n',k,c,telap); end
end
Data(E).GLOBETerpres = GLOBETerpres;
end
end
save_name = sprintf('OMI_SP_v2-1B_%04d%02d%02d.mat',year(dnums(d)), month(dnums(d)), day(dnums(d)));
saveData(fullfile(save_dir, save_name),Data);
end
end
end
function saveData(save_path, Data) %#ok<INUSD>
save(save_path,'Data');
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
BEHR_main.m
|
.m
|
BEHR-core-master/BEHR_Main/BEHR_main.m
| 10,293 |
utf_8
|
663701aa5d6628aa878f67282cd23047
|
function BEHR_main(varargin)
% BEHR_MAIN: primary BEHR algorithm
%
% This function is the primary BEHR algorithm, it takes the OMI, MODIS,
% and GLOBE data read in by read_main.m and uses it to
% recalculated the BEHR AMFs and VCDs. There are a number of input
% parameters that control it's operation; the defaults are set such that
% it should run if you simply execute this script, but in most cases you
% will want to change at least the start and end dates.
%
% Parameters:
% 'start' - the first date to process as a date number or date string
% that Matlab recognized implicitly. If not given, defaults to
% 2005-01-01.
%
% 'end' - the last date to process, same format requirements as
% starting date. If not given, defaults to today.
%
% 'behr_mat_dir' - the directory that the final .mat file should be
% saved in. If not given, it defaults to
% fullfile(behr_paths.behr_mat_dir, lower(region), lower(prof_mode)).
%
% 'sp_mat_dir' - the directory that the .mat files resulting from
% read_main.m are stored in. If not given, it defaults to
% fullfile(behr_paths.sp_mat_dir, lower(region))
%
% 'amf_tools_path' - the directory that contains the files
% nmcTmpYr.txt and damf.txt. If not given, defaults to the path
% stored in behr_paths.m
%
% 'no2_profile_path' - the directory to look for WRF output files in.
% If not given, or given as an empty string, this is determined
% automatically.
%
% 'overwrite' - a boolean that controls whether existing files in the
% behr_mat_dir should be overwritten or not. Defaults to false (if a
% file exists for a given day, that day will not be reprocessed).
%
% 'profile_mode' - must be the string 'daily' or 'monthly' (defaults
% to 'monthly'). Controls whether daily or monthly profiles will be
% used, which also controls whether rProfile_WRF.m looks for files
% named 'WRF_BEHR_monthly_yyyy-mm.nc' (monthly) or
% 'wrfout_*_yyyy-mm-dd_hh-00-00' (daily).
%
% 'use_psm_gridding' - if false (default), uses CVM gridding for all
% fields. If true, then NO2 fields will be gridded using the PSM
% method (specifically, fields specified as psm_gridded_vars in
% BEHR_publishing_gridded_fields will be gridded by PSM).
%
% 'err_wrf_missing_attr' - if true (default), then if WRF files are
% missing attributes that are read in (usually units), an error is
% thrown. However, if false, then default units are assumed. Use
% "false" with caution, as if the necessary variables are given in
% the wrong units, there will be no way to catch that if "false" is
% given for this parameter.
%
% 'DEBUG_LEVEL' - level of progress messaged printed to the console.
% 0 = none, 1 = minimal, 2 = all, 3 = processing times are added.
% Default is 2.
%
%Josh Laughner <[email protected]>
%Based on BEHR_nwus by Ashley Russell (02/09/2012)
%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% INITIALIZATION %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%
E = JLLErrors;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% INITIALIZATION AND INPUT VALIDATION %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% You may change the default values here if you just want to be able to
% click 'Run', but it's usually better to pass these as parameters.
p = inputParser;
p.addParameter('start', '2005-01-01');
p.addParameter('end', today);
p.addParameter('behr_mat_dir', '');
p.addParameter('sp_mat_dir', '');
p.addParameter('amf_tools_path', behr_paths.amf_tools_dir);
p.addParameter('no2_profile_path', '');
p.addParameter('region', 'us');
p.addParameter('overwrite', false);
p.addParameter('profile_mode', 'monthly');
p.addParameter('use_psm_gridding', false);
p.addParameter('err_wrf_missing_attr', true);
p.addParameter('DEBUG_LEVEL', 2);
p.parse(varargin{:});
pout = p.Results;
date_start = pout.start;
date_end = pout.end;
behr_mat_dir = pout.behr_mat_dir;
sp_mat_dir = pout.sp_mat_dir;
no2_profile_path = pout.no2_profile_path;
region = pout.region;
overwrite = pout.overwrite;
prof_mode = pout.profile_mode;
use_psm = pout.use_psm_gridding;
err_wrf_missing_attr = pout.err_wrf_missing_attr;
DEBUG_LEVEL = pout.DEBUG_LEVEL;
%%% Validation %%%
allowed_prof_modes = {'daily','monthly'};
date_start = validate_date(date_start);
date_end = validate_date(date_end);
if ~ischar(behr_mat_dir)
E.badinput('Parameter "behr_mat_dir" must be a string');
elseif ~ischar(sp_mat_dir)
E.badinput('Parameter "sp_mat_dir" must be a string');
elseif ~ischar(no2_profile_path)
E.badinput('Parameter "no2_profile_path" must be a string');
elseif (~islogical(overwrite) && ~isnumeric(overwrite)) || ~isscalar(overwrite)
E.badinput('Parameter "overwrite" must be a scalar logical or number')
elseif ~ismember(prof_mode,allowed_prof_modes)
E.badinput('prof_mode (if given) must be one of %s', strjoin(allowed_prof_modes,', '));
elseif ~isscalar(use_psm) || (~islogical(use_psm) && ~isnumeric(use_psm))
E.badinput('use_psm_gridding must be a scalar logical or number')
end
% If using the default SP file directory, look in the right region
% subfolder.
if isempty(sp_mat_dir)
sp_mat_dir = behr_paths.SPMatSubdir(region);
end
% Set behr_mat_dir to the daily or monthly directory, with region
% subdirectory, if using the default path
if isempty(behr_mat_dir)
behr_mat_dir = behr_paths.BEHRMatSubdir(region, prof_mode);
end
% Bring the AMF tools path (where damf.txt is) into this workspace
amf_tools_path = behr_paths.amf_tools_dir;
% Verify the paths integrity.
nonexistant = {};
if ~exist(behr_mat_dir,'dir')
nonexistant{end+1} = 'behr_mat_dir';
end
if ~exist(sp_mat_dir,'dir')
nonexistant{end+1} = 'sp_mat_dir';
end
if ~exist(amf_tools_path,'dir')
nonexistant{end+1} = 'amf_tools_path';
end
if ~isempty(no2_profile_path) && ~exist(no2_profile_path,'dir')
nonexistant{end+1} = 'no2_profile_path';
end
if numel(nonexistant)>0
string_spec = [repmat('\n\t%s',1,numel(nonexistant)),'\n\n'];
msg = sprintf('The following paths are not valid: %s Please double check them in the run file',string_spec);
error(E.callError('bad_cluster_path',sprintf(msg,nonexistant{:})));
end
%Store paths to relevant files
fileDamf = fullfile(amf_tools_path,'damf.txt');
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% PARALLELIZATION OPTIONS %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Specifies whether the script is executing on a cluster; this must be set
% (globally) in the calling script. This allows for the execution of code
% needed on the cluster (i.e. adding necessary folders to the Matlab path,
% opening a parallel pool) without running them on the local machine. If
% onCluster hasn't been defined yet, set it to false.
global onCluster;
if isempty(onCluster);
fprintf('Assuming onCluster is false\n');
onCluster = false;
end
% Defined the number of threads to run, this will be used to open a
% parallel pool. numThreads should be set in the calling run script,
% otherwise it will default to 1.
global numThreads;
if isempty(numThreads)
numThreads = 1;
end
% Cleanup object will safely exit if there's a problem
if onCluster
cleanupobj = onCleanup(@() mycleanup());
end
%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% MAIN FUNCTION %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%
% Create a parallel pool if one doesn't exist and we are on a cluster
if onCluster
if isempty(gcp('nocreate'))
parpool(numThreads);
end
n_workers = Inf;
else
n_workers = 0;
end
if onCluster
n_workers=numThreads;
else
% Running a parfor loop with 0 workers makes it run in serial mode,
% which means it doesn't waste time sending data to and from the
% workers
n_workers=0;
end
core_githead = git_head_hash(behr_paths.behr_core);
behrutils_githead = git_head_hash(behr_paths.behr_utils);
genutils_githead = git_head_hash(behr_paths.utils);
psm_githead = git_head_hash(behr_paths.psm_dir);
imatpy_githead = git_head_hash(behr_paths.python_interface);
wrfutils_githead = git_head_hash(behr_paths.wrf_utils);
datenums = datenum(date_start):datenum(date_end);
parfor(j=1:length(datenums), n_workers)
%for j=1:length(datenums)
savename = behr_filename(datenums(j), prof_mode, region);
if exist(fullfile(behr_mat_dir, savename),'file') && ~overwrite
fprintf('%s already exists, skipping\n', savename);
continue
end
if DEBUG_LEVEL > 0
fprintf('Processing data for %s\n', datestr(datenums(j)));
end
sp_mat_name = sp_savename(datenums(j), region);
if DEBUG_LEVEL > 1
fprintf('Looking for SP file %s ...', fullfile(sp_mat_dir,sp_mat_name));
end
if ~exist(fullfile(sp_mat_dir,sp_mat_name),'file')
if DEBUG_LEVEL > 0; disp('No SP file exists for given day'); end
continue
end
if DEBUG_LEVEL > 1; fprintf('\t ...Found.\n'); end
S=load(fullfile(sp_mat_dir,sp_mat_name));
Data=S.Data;
if isempty(Data)
% If there is no data read in from read_main.m, then there's
% nothing to do for this day.
continue
end
% Double check that the loaded SP file is for the same region as we're
% trying to process
if ~strcmpi(Data(1).BEHRRegion, region)
E.callError('behr_region', 'Somehow I loaded a file with a different region specified in Data.BEHRRegion (%s) than I am trying to process (%s)', Data(1).BEHRRegion, region)
end
%%%%%%%%%%%%%%%%%%%%%%
% CALCULATE OUR AMFS %
%%%%%%%%%%%%%%%%%%%%%%
[Data, OMI] = BEHR_main_one_day(Data, 'no2_profile_path', no2_profile_path, 'profile_mode', prof_mode, 'use_psm_gridding', use_psm, 'err_wrf_missing_attr', err_wrf_missing_attr);
%%%%%%%%%%%%%
% SAVE FILE %
%%%%%%%%%%%%%
if DEBUG_LEVEL > 0; disp([' Saving data as',fullfile(behr_mat_dir,savename)]); end
saveData(fullfile(behr_mat_dir,savename),Data,OMI)
end
end
function saveData(filename,Data,OMI)
save(filename,'OMI','Data')
end
function mycleanup()
err=lasterror;
if ~isempty(err.message)
fprintf('MATLAB exiting due to problem: %s\n', err.message);
if ~isempty(gcp('nocreate'))
delete(gcp)
end
exit(1)
end
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
BEHR_InSitu_Reprocessing.m
|
.m
|
BEHR-core-master/BEHR_Main/BEHR_InSitu_Reprocessing.m
| 19,539 |
utf_8
|
33e57f621d7db8046bd19a76ce7e0c85
|
function BEHR_InSitu_Reprocessing
%BEHR_InSitu_Reprocessing
%
% This script will take aircraft data and use it to recalculate AMFs and
% produce a new satellite column using that AMF.
%
% Returns a quality flag with each bit representing a specific warning
% about the data. These mimic the flags in the spiral verification code.
% 1st: Summary, set to 1 if any flags are set.
% 2nd: Reserved as a second summary bit against future need.
% 3rd: Unused
% 4th: Indicates that < 10% of the data points in the profile had NO2
% data
% 5-15: Unused
% 16th: Set if the column was skipped due to < 1% valid
% NO2, pressure, or temperature data
%
% Josh Laughner <[email protected]> 18 Aug 2013
E = JLLErrors;
campaign_name = 'discover-co';
[Names, merge_dates, merge_dir, range_file] = merge_field_names(campaign_name);
if ~isempty(range_file)
load(range_file); % Adds the variable "Ranges" into the workspace
range_avail_dates = {Ranges(:).Date};
profnum_bool = false;
else
profnum_bool = true;
end
start_date = merge_dates{1};
end_date = merge_dates{2};
starttime = '12:00';
endtime = '15:00';
%Which clouds to use for the AMF calculation; 'CloudFraction' for OMI and
%'MODISCloud' for MODIS
cld_field = 'CloudFraction';
%Which NO2 field from the aircraft file to use; for MD options are
%'NO2_NCAR' and 'NO2_LIF'; for CA and TX, 'NO2_MixingRatio' or
%'NO2_MixingRatio_LIF'
no2field = Names.no2_lif;
%The directory where the original BEHR files are located
behr_prefix = 'OMI_BEHR_*';
behr_dir = '/Volumes/share-sat/SAT/BEHR/BEHR_Files_2014/';
%The file prefix and directory to save the resulting files under
save_prefix = 'OMI_BEHR_InSitu_';
save_dir = '/Volumes/share-sat/SAT/BEHR/DISCOVER_BEHR_REPROCESSED/';
amf_tools_path = '/Users/Josh/Documents/MATLAB/BEHR/AMF_tools';
fileTmp = fullfile(amf_tools_path,'nmcTmpYr.txt');
fileDamf = fullfile(amf_tools_path,'damf.txt');
DEBUG_LEVEL = 2;
%%%%% END USER INPUT %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% BEGIN FUNCTION %%%%%
fill_val = -9e9; % defined fill value
% Make sure the save prefix ends in an underscore
if ~strcmp(save_prefix(end),'_'); save_prefix(end+1) = '_'; end
dates = datenum(start_date):datenum(end_date);
for d=1:numel(dates)
curr_date = datestr(dates(d),29);
year = curr_date(1:4);
month = curr_date(6:7);
day = curr_date(9:10);
if DEBUG_LEVEL > 0; fprintf('Now on %s\n',curr_date); end
if DEBUG_LEVEL > 1; fprintf(' Loading data...\n'); end
% Load the merge and BEHR file
merge_filename = sprintf('*%s_%s_%s.mat',year,month,day);
behr_filename = sprintf('%s%s%s%s.mat',behr_prefix,year,month,day);
merge_files = dir(fullfile(merge_dir,merge_filename));
if numel(merge_files)==1
load(fullfile(merge_dir, merge_files(1).name),'Merge')
elseif isempty(merge_files)
if DEBUG_LEVEL > 1; fprintf('No Merge file for %s\n',datestr(dates(d))); end
continue
else
error('run_spiral:tmm','Number of merge files for %s is not 1 or 0',datestr(dates(d)));
end
behr_files = dir(fullfile(behr_dir,behr_filename));
if numel(behr_files)==1
load(fullfile(behr_dir,behr_files(1).name),'Data')
elseif isempty(behr_files)
if DEBUG_LEVEL > 1; fprintf('No BEHR file for %s\n',datestr(dates(d))); end
continue
else
error('run_spiral:tmm','Number of BEHR files for %s is not 1 or 0',datestr(dates(d)));
end
flag_base = uint16(0);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% AIRCRAFT DATA PREPROCESSING %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Load the aircraft data and check if greater than 99% of it is
%nans; if so, fill the new BEHR fields with NaNs and skip this
%loop. If >90% of the NO2 data are nans, then set a warning flag
%but continue processing
[no2, utc, pres, lon, lat] = remove_merge_fills(Merge,no2field,'alt','PRESSURE');
no2(no2<0) = NaN; % Because of the exponential shape of the NO2 profile, negative values lead to imaginary values for the AMFs
percent_no2_nans = sum(isnan(no2))/numel(no2); percent_pres_nans = sum(isnan(pres))/numel(pres);
if percent_no2_nans > 0.99 || percent_pres_nans > 0.99;
flag_base = bitset(flag_base,16,1);
if DEBUG_LEVEL > 1; fprintf('NO2 %.2f NaNs, PRESSURE %.2f NaNs, skipping\n', percent_no2_nans, percent_pres_nans); end
Data = return_null(Data,flag_base);
saveData(Data,curr_date,save_dir,save_prefix);
continue
elseif percent_no2_nans > 0.9 || percent_pres_nans > 0.9
flag_base = bitset(flag_base,4,1);
if DEBUG_LEVEL > 1; fprintf('NO2 %.2f NaNs, PRESSURE %.2f NaNs, setting warning flag\n', percent_no2_nans, percent_pres_nans); end
end
% Handle both identifying profiles by number and by UTC range
if profnum_bool
% Load the profile numbers
profnum = remove_merge_fills(Merge, Names.profile_numbers);
% Calculate the UTC offset to match flight times to satellite overpass
tz = round(nanmean(lon)/15);
% Get all unique profile numbers and their start times
unique_profnums = unique(profnum(profnum~=0));
start_times = zeros(numel(unique_profnums),1);
for a=1:numel(unique_profnums)
xx = profnum == unique_profnums(a);
start_times(a) = min(utc(xx));
end
% Remove from consideration any profiles with a start time before 10:45
% am or after 4:45 pm local standard time
yy = start_times >= local2utc(starttime,tz) & start_times <= local2utc(endtime,tz);
unique_profnums = unique_profnums(yy); start_times = start_times(yy);
% Save each profile's NO2, altitude, radar altitude, latitude, and
% longitude as an entry in a cell array
s = size(unique_profnums);
no2_array = cell(s); utc_array = cell(s);
lat_array = cell(s); lon_array = cell(s);
pres_array = cell(s); profnum_array = cell(s);
for a=1:numel(unique_profnums)
xx = profnum == unique_profnums(a);
no2_array{a} = no2(xx);
lat_array{a} = lat(xx);
lon_array{a} = lon(xx);
pres_array{a} = pres(xx);
utc_array{a} = utc(xx);
profnum_array{a} = unique_profnums(a);
end
else
% Case where a Ranges structure is available. Figure out if any
% ranges are defined for this day. If not it'll have to be skipped
range_date = datestr(dates(d),'mm/dd/yyyy');
rr = ~iscellcontents(regexp(range_avail_dates, range_date),'isempty');
if sum(rr) > 1
E.callError('non_unique_range','More than one range with the date %s was found',range_date);
elseif sum(rr) < 1 || isempty(Ranges(rr).Ranges)
if DEBUG_LEVEL > 0; fprintf('No ranges found for %s, skipping\n',range_date); end
continue
end
% Calculate the UTC offset to match flight times to satellite overpass
tz = round(lon/15);
% Find all the utc start times that are between within the
% specified range of local times. Go through each range, find the
% data points that correspond to it, get the most common timezone,
% use that to set whether to include that range or not. Also, check
% the "user_profnums" variable which will have specific UTC ranges
% to allow
yy = false(size(Ranges(rr).Ranges,1),1);
for a=1:size(Ranges(rr).Ranges,1)
tz_ind = utc >= Ranges(rr).Ranges(a,1) & utc <= Ranges(rr).Ranges(a,2);
mct = mode(tz(tz_ind));
range_start_local = utc2local_sec(Ranges(rr).Ranges(a,1),mct);
yy(a) = range_start_local >= local2utc(starttime,0) && range_start_local <= local2utc(endtime,0);
end
if sum(yy) < 1
if DEBUG_LEVEL > 0; fprintf('No ranges fall within the specified window around OMI overpass, skipping\n'); end
continue
end
ranges_in_time = Ranges(rr).Ranges(yy,:);
s = [1,sum(yy)];
no2_array = cell(s);
lat_array = cell(s);
lon_array = cell(s);
pres_array = cell(s);
utc_array = cell(s);
profnum_array = cell(s);
for a=1:s(2)
xx = utc >= ranges_in_time(a,1) & utc <= ranges_in_time(a,2);
no2_array{a} = no2(xx);
lat_array{a} = lat(xx);
lon_array{a} = lon(xx);
pres_array{a} = pres(xx);
utc_array{a} = utc(xx);
profnum_array{a} = ranges_in_time(a,:);
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% RECALCULATE VCDs %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
for s=1:numel(Data)
if DEBUG_LEVEL > 1; fprintf('\tNow on swath %d of %d\n',s,numel(Data)); end
%Initialize the new fields in the Data structure to hold the AMF
%calculated with the in-situ column and the reprocesses satellite
%column itself.
Data(s).InSituAMF = fill_val * ones(size(Data(s).Latitude));
Data(s).BEHR_R_ColumnAmountNO2Trop = fill_val * ones(size(Data(s).Latitude));
Data(s).ProfileCount = fill_val * ones(size(Data(s).Latitude));
Data(s).InSituFlags = fill_val * ones(size(Data(s).Latitude));
flags = uint16(double(flag_base)*ones(size(Data(s).Latitude)));
%Now iterate through each profile. Find the pixels it intersects
%and use the profile to calculate a new AMF for those pixels
omi_lat = Data(s).Latitude; omi_lon = Data(s).Longitude;
corner_lat = Data(s).Latcorn; corner_lon = Data(s).Loncorn;
omi_no2 = Data(s).ColumnAmountNO2Trop;
sza = Data(s).SolarZenithAngle; vza = Data(s).ViewingZenithAngle;
phi = Data(s).RelativeAzimuthAngle; albedo = Data(s).MODISAlbedo;
surfPres = Data(s).GLOBETerpres; cloudPres = Data(s).CloudPressure;
cldFrac = Data(s).(cld_field); cldRadFrac = Data(s).CloudRadianceFraction;
behr_no2 = Data(s).BEHRColumnAmountNO2Trop;
% This will be where the amfs due to each profile go to be averaged
% together to find the average amf for the pixel
tmp_amfs = nan(numel(behr_no2),numel(no2_array));
tmp_pres = nan(30,numel(behr_no2),numel(no2_array));
tmp_prof = nan(30,numel(behr_no2),numel(no2_array));
tmp_ghost = nan(numel(behr_no2),numel(no2_array));
tmp_count = zeros(size(behr_no2));
for p=1:numel(no2_array)
% Find all the pixels that impinge on this profile
lontest = lon_array{p}; lontest(isnan(lontest)) = [];
lattest = lat_array{p}; lattest(isnan(lattest)) = [];
lat_logical = max(lattest) > min(corner_lat,[],1) & min(lattest) < max(corner_lat,[],1);
lon_logical = max(lontest) > min(corner_lon,[],1) & min(lontest) < max(corner_lon,[],1);
latlon_logical = lat_logical & lon_logical;
pix_indices = find(latlon_logical);
% Skip this profile if no pixels intersect it
if sum(latlon_logical) == 0;
if DEBUG_LEVEL > 1; fprintf('\t\tNo pixels fall within initial lat/lon boundaries\n'); end
continue;
end
loncorn_p = corner_lon(:,latlon_logical); latcorn_p = corner_lat(:, latlon_logical);
% Finally actually check if the profile falls in the pixel
% using inpolygon(). Recall that we require there to be 20
% valid measurements in the lowest 3 km (~ 675 hPa).
no2_3km = no2_array{p}(pres_array{p}>675);
lon_3km = lon_array{p}(pres_array{p}>675);
lat_3km = lat_array{p}(pres_array{p}>675);
pix_coverage = zeros(size(loncorn_p,2),1);
pix_xx = true(size(loncorn_p,2),1);
for pix=1:size(loncorn_p,2)
IN_3km = inpolygon(lon_3km, lat_3km, loncorn_p(:,pix), latcorn_p(:,pix));
if sum(~isnan(no2_3km(IN_3km)))<20
pix_xx(pix) = false;
continue % Pixel must have 20 valid measurments between 0-3 km altitude (good sampling of boundary layer)
end
% Calculate what percentage of the profile actually falls in
% this pixel; append to all values for this pixel
IN_all = inpolygon(lon_array{p}, lat_array{p}, loncorn_p(:,pix), latcorn_p(:,pix));
pix_coverage(pix) = sum(IN_all)/numel(no2_array{p});
end
%If there are no pixels that have part of the profile below 3
%km within them, skip this profile
if sum(pix_indices)==0;
if DEBUG_LEVEL > 1; fprintf('\t\tNo pixels fall within strict lat/lon boundaries\n'); end
continue;
end
pix_indices = pix_indices(pix_xx);
omi_lon_p = omi_lon(pix_indices); omi_lat_p = omi_lat(pix_indices);
loncorn_p = corner_lon(:,pix_indices); latcorn_p = corner_lat(:, pix_indices);
omi_no2_p = omi_no2(pix_indices); behr_no2_p = behr_no2(pix_indices);
sza_p = sza(pix_indices); vza_p = vza(pix_indices);
phi_p = phi(pix_indices); albedo_p = albedo(pix_indices);
surfPres_p = surfPres(pix_indices); cloudPres_p = cloudPres(pix_indices);
cldFrac_p = cldFrac(pix_indices); cldRadFrac_p = cldRadFrac(pix_indices);
amfs_p = nan(size(omi_lon_p));
new_prof_p = nan(30, size(omi_lon_p,1), size(omi_lon_p,2));
new_pres_p = nan(30, size(omi_lon_p,1), size(omi_lon_p,2));
new_ghost_p = nan(size(omi_lon_p));
if DEBUG_LEVEL > 1; fprintf('\t\tRecalculating AMFs\n'); end
for pix=1:numel(omi_lon_p)
% Extrapolate and bin the profile using median extrapolation on
% the bottom and by inserting a scaled WRF profile on the top,
% if needed. Only the surface pressure changes for each
% pixel.
[insitu_profile, insitu_pressures] = extrapolate_profile(no2_array{p}, pres_array{p}, 'surfacePressure', surfPres_p(pix),'top', 'wrf', 'bottom', 'median',...
'utc',nanmean(utc_array{p}), 'month',month,'lat',lat_array{p},'lon',lon_array{p},'date',curr_date,'shape','exp');
% The profiles and pressure must be columns for the AMF calculation to
% function correctly
if ~iscolumn(insitu_pressures); insitu_pressures = insitu_pressures'; end
if ~iscolumn(insitu_profile); insitu_profile = insitu_profile'; end
% A surface pressure greater than 1013 will cause the dAmf
% interpolation to fail.
this_surfPres = min([surfPres_p(pix),1013]);
% Calculate the new AMF
mean_lon = nanmean(lon_array{p});
mean_lat = nanmean(lat_array{p});
if isnan(mean_lon) || isnan(mean_lat)
E.badvar('mean_lon/mean_lat','the mean lon/lat is a NaN');
end
[temperature, ~] = rNmcTmp2(fileTmp, insitu_pressures, nanmean(lon_array{p}), nanmean(lat_array{p}), str2double(month));
dAmfClr2 = rDamf2(fileDamf, insitu_pressures, sza_p(pix), vza_p(pix), phi_p(pix), albedo_p(pix), this_surfPres);
cloudalbedo=0.8;
dAmfCld2 = rDamf2(fileDamf, insitu_pressures, sza_p(pix), vza_p(pix), phi_p(pix), cloudalbedo, cloudPres_p(pix));
noGhost = 0; ak = 1;
[amfs_p(pix), ~, ~, ~, ~, new_prof_pix, new_pres_pix, new_ghost_pix] = omiAmfAK2(this_surfPres, cloudPres_p(pix), cldFrac_p(pix), cldRadFrac_p(pix), insitu_pressures, dAmfClr2, dAmfCld2, temperature, insitu_profile, insitu_profile, noGhost, ak);
% Since each in situ profile will be a different length,
% force them all to be 30 elements long.
if length(new_prof_pix) > 30
E.unknownError('Somehow one of the profile vectors has > 30 elements');
end
profile_length = 30;
nan_pad = nan(profile_length-length(new_prof_pix),1);
new_pres_pix = cat(1, new_pres_pix, nan_pad);
new_prof_pix = cat(1, new_prof_pix, nan_pad);
new_prof_p(:,pix) = new_prof_pix;
new_pres_p(:,pix) = new_pres_pix;
new_ghost_p(pix) = new_ghost_pix;
end
tmp_amfs(pix_indices,p) = amfs_p;
tmp_prof(:,pix_indices,p) = new_prof_p;
tmp_pres(:,pix_indices,p) = new_pres_p;
tmp_ghost(pix_indices,p) = new_ghost_p;
tmp_count(pix_indices) = tmp_count(pix_indices) + 1;
end
new_amfs = nanmean(tmp_amfs,2);
new_amfs = reshape(new_amfs,size(omi_lat));
new_columns = Data(s).BEHRColumnAmountNO2Trop .* Data(s).BEHRAMFTrop ./ new_amfs;
new_profs = nanmean(tmp_prof,3);
new_profs = reshape(new_profs, 30, size(omi_lat,1), size(omi_lat,2));
new_pres = nanmean(tmp_pres,3);
new_pres = reshape(new_pres, 30, size(omi_lat,1), size(omi_lat,2));
new_ghost = nanmean(tmp_ghost,2);
new_ghost = reshape(new_ghost, size(omi_lat));
Data(s).InSituAMF = new_amfs;
Data(s).BEHR_R_ColumnAmountNO2Trop = new_columns;
Data(s).InSituProfile = new_profs;
Data(s).InSituPressureLevels = new_pres;
Data(s).InSituGhostFraction = new_ghost;
Data(s).ProfileCount = tmp_count;
Data(s).InSituFlags = flags;
end
%Run these checks in a separate for loop so that any continue
%statements in the first don't cause them to be skipped.
for s=1:numel(Data)
% Check if either of the new fields is still a fill value; if so
% throw a warning to let the user know that for some reason the
% values were not set at some point in the loop
if any(Data(s).InSituAMF == fill_val);
warning('In-situ AMF not set for swath %d on %s',s,curr_date);
end
if any(Data(s).BEHR_R_ColumnAmountNO2Trop == fill_val);
warning('Reprocessed column not set for swath %d on %s',s,curr_date);
end
if any(Data(s).InSituFlags == fill_val);
warning('Flags not set for swath %d on %s',s,curr_date);
end
end
saveData(Data,curr_date,save_dir,save_prefix);
clear Data
end
function data = return_null(data,flag)
for i=1:numel(data)
data(i).InSituAMF = nan(size(data(i).Longitude));
data(i).BEHR_R_ColumnAmountNO2Trop = nan(size(data(i).Longitude));
data(i).ProfileCount = zeros(size(data(i).Longitude));
data(i).InSituFlags = uint16(double(flag) * ones(size(data(i).Longitude)));
end
function saveData(Data,curr_date,save_dir,prefix)
year = curr_date(1:4);
month = curr_date(6:7);
day = curr_date(9:10);
savename = sprintf('%s%s%s%s.mat',prefix,year,month,day);
save(fullfile(save_dir,savename),'Data');
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
BEHR_main_one_day.m
|
.m
|
BEHR-core-master/BEHR_Main/BEHR_main_one_day.m
| 20,226 |
utf_8
|
1dc50e019931e401cee177df318402ba
|
function [ Data, OMI ] = BEHR_main_one_day( Data, varargin )
%BEHR_MAIN_ONE_DAY The BEHR algorithm for a single day's data.
% [ DATA, OMI ] = BEHR_main_one_day( DATA ) Takes as input a DATA
% structure created by READ_MAIN() and calculates BEHR AMFs for it as
% well as grids the data using the BEHR-PSM repository. Returns the
% structure DATA with the native pixels including BEHR NO2 VCDs and the
% OMI structure with the gridded quantities.
%
%
% Additional parameters:
%
% 'no2_profile_path' - the top directory to look for WRF output files
% in. If not given, or given as an empty string, this is determined
% automatically based on "profile_mode" and the region specified in
% Data as the field "BEHRRegion".
%
% 'profile_mode' - must be the string 'daily' or 'monthly' (defaults
% to 'monthly'). Controls whether daily or monthly profiles will be
% used, which also controls whether rProfile_WRF.m looks for files
% named 'WRF_BEHR_monthly_yyyy-mm.nc' (monthly) or
% 'wrfout_*_yyyy-mm-dd_hh-00-00' (daily).
%
% 'use_psm_gridding' - if false (default), uses CVM gridding for all
% fields. If true, then NO2 fields will be gridded using the PSM
% method (specifically, fields specified as psm_gridded_vars in
% BEHR_publishing_gridded_fields will be gridded by PSM).
%
% 'err_wrf_missing_attr' - if true (default), then if WRF files are
% missing attributes that are read in (usually units), an error is
% thrown. However, if false, then default units are assumed. Use
% "false" with caution, as if the necessary variables are given in
% the wrong units, there will be no way to catch that if "false" is
% given for this parameter.
%
% 'extra_gridding_fields' - a cell array of strings that lists extra
% fields that you wish to have gridded, beyond the standard fields
% listed in BEHR_publishing_gridded_fields.
%
% 'DEBUG_LEVEL' - level of progress messaged printed to the console.
% 0 = none, 1 = minimal, 2 = all, 3 = processing times are added.
% Default is 2.
%
%
% Parameters specific to error analysis:
%
% 'lookup_sweights' - scalar logical, determines whether the
% algorithm should look up scattering weights based on SZA, VZA, etc.
% or use the scattering weights already stored in Data. This is
% intended to allow for uncertainty testing by ensuring the same
% scattering weights are used while the NO2 profile is varied.
% Default is true, i.e. the scattering weights are looked up from the
% TOMRAD table and not read from Data.
%
% 'lookup_profile' - scalar logical, determined whether the WRF NO2
% profile should be read from WRF netCDF files (true, default) or
% read in from Data. Similar to "lookup_sweights", intended for
% uncertainty analysis. If false, Data must be the result of
% BEHR_main to have the profiles stored in it.
%
% 'randomize_profile_time' - scalar logical (default false), if true,
% the NO2 profile will be chosen from a different day in the same
% month. The hour of day is still chosen properly, so the dominant
% error that this is testing is if the wind direction is very wrong.
%
% 'randomize_profile_loc' - scalar logical (default false), if true,
% then the pixel corners will be "jittered" by some amount (currently
% 0.2 degrees) so that a WRF profile from a nearby, but different,
% location is used for that pixel. All corners of one pixel are
% jittered in the same direction. This simulates if the emissions,
% transport speed, or chemistry are in the incorrect place in WRF.
% Currently, the randomization allows the pixels to not move, in
% order to include the possibility that the
% emissions/transport/chemistry are correct in the WRF simulation.
p = inputParser;
% Parameters relevant to the normal retrieval
p.addParameter('no2_profile_path', '');
p.addParameter('profile_mode', 'monthly');
p.addParameter('use_psm_gridding', false);
p.addParameter('err_wrf_missing_attr', true);
p.addParameter('extra_gridding_fields', {});
% Parameters relevant to error analysis
p.addParameter('lookup_sweights', true);
p.addParameter('lookup_profile', true);
p.addParameter('randomize_profile_time', false);
p.addParameter('randomize_profile_loc', false);
% Other parameters
p.addParameter('DEBUG_LEVEL', 2);
p.KeepUnmatched = true;
p.parse(varargin{:});
pout = p.Results;
no2_profile_path = pout.no2_profile_path;
prof_mode = pout.profile_mode;
use_psm = pout.use_psm_gridding;
err_wrf_missing_attr = pout.err_wrf_missing_attr;
extra_gridding_fields = pout.extra_gridding_fields;
lookup_sweights = pout.lookup_sweights;
lookup_profile = pout.lookup_profile;
randomize_profile_time = pout.randomize_profile_time;
randomize_profile_loc = pout.randomize_profile_loc;
DEBUG_LEVEL = pout.DEBUG_LEVEL;
allowed_prof_modes = {'daily','monthly'};
if ~ischar(no2_profile_path)
E.badinput('Parameter "no2_profile_path" must be a string');
elseif ~ismember(prof_mode,allowed_prof_modes)
E.badinput('prof_mode (if given) must be one of %s', strjoin(allowed_prof_modes,', '));
elseif ~isscalar(use_psm) || (~islogical(use_psm) && ~isnumeric(use_psm))
E.badinput('use_psm_gridding must be a scalar logical or number')
end
if ~iscellstr(extra_gridding_fields)
E.badinput('extra_gridding_fields must be a cell array of char arrays')
end
%Store paths to relevant files
fileDamf = fullfile(behr_paths.amf_tools_dir,'damf.txt');
% Get the Git head hashes
core_githead = git_head_hash(behr_paths.behr_core);
behrutils_githead = git_head_hash(behr_paths.behr_utils);
genutils_githead = git_head_hash(behr_paths.utils);
psm_githead = git_head_hash(behr_paths.psm_dir);
imatpy_githead = git_head_hash(behr_paths.python_interface);
wrfutils_githead = git_head_hash(behr_paths.wrf_utils);
this_date = Data(1).Date;
region = Data(1).BEHRRegion;
for d=1:length(Data)
% Data is initialized in read_main with a single 0 in the Longitude
% field. Since points outside the lat/lons of interest are removed
% completely, we should also check if all points are gone.
if numel(Data(d).Longitude)==1 || isempty(Data(d).Longitude)
if DEBUG_LEVEL > 1; fprintf(' Note: Data(%u) is empty\n',d); end
continue %JLL 17 Mar 2014: Skip doing anything if there's really no information in this data
end
if DEBUG_LEVEL>0; fprintf(' Swath %u of %s \n',d,datestr(this_date)); end
%JLL 17 Mar 2014: Load some of the variables from 'Data' to
%make referencing them less cumbersome. Also convert some
%to column vectors to work with rNmcTmp2 and rDamf2
loncorns = Data(d).FoV75CornerLongitude;
latcorns = Data(d).FoV75CornerLatitude;
time = Data(d).Time;
sza = Data(d).SolarZenithAngle;
vza = Data(d).ViewingZenithAngle;
phi = Data(d).RelativeAzimuthAngle;
globe_terheight = Data(d).GLOBETerrainHeight;
albedo = Data(d).MODISAlbedo;
cldFrac = Data(d).CloudFraction;
cldRadFrac = Data(d).CloudRadianceFraction;
pressure = behr_pres_levels();
if DEBUG_LEVEL > 1; fprintf(' Reading NO2 and temperature profiles\n'); end
if randomize_profile_time
% Used for uncertainty analysis to vary the day of month that the
% profile is chosen from.
if strcmpi(prof_mode, 'daily')
prof_date = random_day_in_month(this_date, true);
else
E.callError('incompatible_prof_mode', '"randomize_profile_time" cannot be used unless "prof_mode" is "daily"');
end
else
prof_date = this_date;
end
if randomize_profile_loc
% Used for uncertainty analysis. Move the pixel corners by roughly
% one OMI pixel in each dimension to "jitter" which profiles are
% used. This should emulate if e.g. emissions are allocated in the
% wrong place, or transport is wrong, or chemistry is wrong,
% because by moving the pixel we can alter how long the air parcel
% has aged chemically by moving it nearer or farther from the
% source.
[prof_loncorns, prof_latcorns] = jitter_corners(loncorns, latcorns, 0.2);
else
prof_loncorns = loncorns;
prof_latcorns = latcorns;
end
% For normal runs, we want the NO2 profiles and temperature profiles
% set to NaN outside the integration limits (actually before the bin
% just below the surface and after the bin just above the tropopause).
% However, when doing error analysis, we sometimes run into issues
% where the tropopause is different whether using precalculated or
% online computed temperature profiles. (The difference in the
% temperature profile itself is very small, but it is occasionally
% enough to move the lapse rate to the other side of the 2 K/km
% threshold.) Therefore in that case we need to keep the NO2 and
% temperature profiles over all bins.
keep_all_bins = ~lookup_profile;
[no2Profile, temperature, wrf_profile_file, surfPres, surfPres_WRF, tropoPres, tropopause_interp_flag, wrf_pres_mode, wrf_temp_mode] = ...
rProfile_WRF(prof_date, prof_mode, region, prof_loncorns, prof_latcorns, time, globe_terheight, pressure, no2_profile_path,...
'err_missing_att', err_wrf_missing_attr, 'clip_at_int_limits', ~keep_all_bins); %JLL 18 Mar 2014: Bins the NO2 profiles to the OMI pixels; the profiles are averaged over the pixel
surfPres(surfPres > 1013) = 1013;
cldPres = Data(d).CloudPressure;
cldPres = min(cldPres, surfPres); % Clamp cldPres to be <= surface pressure, should not have below surface clouds.
if ~lookup_profile
% If we want to use the exact same NO2 profiles as in the original
% run, we can't use the ones in the Data file directly because we
% might need the profile extrapolated over all the standard
% pressure levels. Since rProfile_WRF has an option to leave those
% in, instead of trying to extrapolate the stored profiles, we just
% check that there are no differences greater than 1 pptv. This will
% not fail if there are NaNs in the stored profiles but not the
% extrapolated ones because NaN > x will always be false.
no2Profile_check = remove_nonstandard_pressures(Data(d).BEHRNO2apriori, Data(d).BEHRPressureLevels, pressure);
if ~lookup_sweights && any(abs(no2Profile(:) - no2Profile_check(:)) > 1e-12)
% If not using the scattering weights from the Data structure,
% then we need to verify that we loaded the right temperature
% profiles for the scattering weights.
E.callError('profile_lookup', 'Looked up different temperature profiles than the NO2 profiles given in the file (difference exceeds 1 pptv)')
end
tropoPres = Data(d).BEHRTropopausePressure;
end
bad_profs = squeeze(all(isnan(no2Profile),1));
if lookup_sweights || randomize_profile_loc || randomize_profile_time
% If we change the profiles, we need to allow for the tropopause to
% have changed and the effect of the different temperature profile.
% The latter should be minimal, but I've run into cases where the
% old temperature wasn't defined at enough pressure levels for the
% new calculation to work (i.e. there were NaNs left during the
% integration).
if DEBUG_LEVEL > 1; fprintf(' Calculating clear and cloudy AMFs\n'); end
dAmfClr = rDamf2(fileDamf, pressure, sza, vza, phi, albedo, surfPres); %JLL 18 Mar 2014: Interpolate the values in dAmf to the albedo and other conditions input
cloudalbedo=0.8*ones(size(Data(d).CloudFraction)); %JLL 18 Mar 2014: Assume that any cloud has an albedo of 0.8
dAmfCld = rDamf2(fileDamf, pressure, sza, vza, phi, cloudalbedo, cldPres); %JLL 18 Mar 2014: Interpolate dAmf again, this time taking the cloud top and albedo as the bottom pressure
else
% Really we will almost never get here; if we're running BEHR
% normally, then we obviously need to look up the scattering
% weights; if we're running an error analysis and perturb one of
% the scattering weight input parameters, we need to look up the
% scattering weights; if we're running an error analysis and
% perturb the profiles, we need to look up the scattering weights
% again because the tropopause may have changed. I'm leaving this
% here in case it becomes useful in the future.
dAmfClr = remove_nonstandard_pressures(Data(d).BEHRScatteringWeightsClear, Data(d).BEHRPressureLevels, pressure);
dAmfCld = remove_nonstandard_pressures(Data(d).BEHRScatteringWeightsCloudy, Data(d).BEHRPressureLevels, pressure);
% The scattering weights in the Data structures already include the
% temperature correction, so we need to set the temperature
% profiles to something that makes that correction 1, i.e. no
% effect.
temperature = 220 * ones(size(dAmfClr));
end
if DEBUG_LEVEL > 1; disp(' Calculating BEHR AMF'); end
[amf, amfVis, ~, ~, scattering_weights_clear, scattering_weights_cloudy, avg_kernels, no2_prof_interp, sw_plevels] = omiAmfAK2(surfPres, tropoPres, cldPres, cldFrac, cldRadFrac, pressure, dAmfClr, dAmfCld, temperature, no2Profile); %JLl 18 Mar 2014: The meat and potatoes of BEHR, where the TOMRAD AMF is adjusted to use the GLOBE pressure and MODIS cloud fraction
amf(bad_profs)=NaN;
amfVis(bad_profs)=NaN;
scattering_weights_clear(:,bad_profs)=NaN;
scattering_weights_cloudy(:,bad_profs)=NaN;
avg_kernels(:,bad_profs)=NaN;
sw_plevels(:,bad_profs)=NaN;
no2_prof_interp(:,bad_profs)=NaN;
sz = size(Data(d).Longitude);
len_vecs = size(scattering_weights_clear,1); % JLL 26 May 2015 - find out how many pressure levels there are. Will often be 30, but might change.
% Need this to properly reshape the scattering weights, AKs, pressure levels, and (soon) profiles
Data(d).BEHRAMFTrop = reshape(amf,sz); %JLL 18 Mar 2014: Save the resulting AMF of the pixel
Data(d).BEHRAMFTropVisOnly = reshape(amfVis,sz);
Data(d).BEHRScatteringWeightsClear = reshape(scattering_weights_clear, [len_vecs, sz]);
Data(d).BEHRScatteringWeightsCloudy = reshape(scattering_weights_cloudy, [len_vecs, sz]);
Data(d).BEHRAvgKernels = reshape(avg_kernels, [len_vecs, sz]);
Data(d).BEHRNO2apriori = reshape(no2_prof_interp, [len_vecs, sz]);
Data(d).BEHRWRFFile = wrf_profile_file;
Data(d).BEHRWRFPressureMode = wrf_pres_mode;
Data(d).BEHRWRFTemperatureMode = wrf_temp_mode;
Data(d).BEHRProfileMode = prof_mode;
Data(d).BEHRPressureLevels = reshape(sw_plevels, [len_vecs, sz]);
% temporary fields, will be removed after the warning flag is set
Data(d).TropoPresVSCldPres = (tropoPres-cldPres) > 0;
Data(d).Interp_TropopausePressure = tropopause_interp_flag;
%
Data(d).BEHRSurfacePressure = surfPres;
Data(d).WRFSurfacePressure = surfPres_WRF; % mainly for testing, I'm curious how much WRF's surface pressure differs when adjusted with GLOBE
Data(d).BEHRTropopausePressure = tropoPres;
Data(d).BEHRQualityFlags = behr_quality_flags(Data(d));
end
% remove the field 'TropoPresVSCldPres' as it's only used in behr_quality_flags
Data = rmfield(Data,{'TropoPresVSCldPres','Interp_TropopausePressure'});
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% CALCULATE VCDS FROM NASA SCDS AND OUR AMFS %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
b=length(Data);
for z=1:b
if ~isfield(Data,'BEHRAMFTrop') || isempty(Data(z).BEHRAMFTrop)
continue
end
Data(z).BEHRColumnAmountNO2Trop=Data(z).ColumnAmountNO2Trop.*Data(z).AmfTrop./Data(z).BEHRAMFTrop;
Data(z).BEHRColumnAmountNO2TropVisOnly=Data(z).ColumnAmountNO2Trop.*Data(z).AmfTrop./Data(z).BEHRAMFTropVisOnly;
% make sure fill values in the original column or AMF are
% fill values in BEHR.
Data(z).BEHRColumnAmountNO2Trop(Data(z).ColumnAmountNO2Trop < -1e29 | Data(z).AmfTrop < -30000) = nan;
Data(z).BEHRColumnAmountNO2TropVisOnly(Data(z).ColumnAmountNO2Trop < -1e29 | Data(z).AmfTrop < -30000) = nan;
if DEBUG_LEVEL > 0; fprintf(' BEHR [NO2] stored for swath %u\n',z); end
Data(z).GitHead_Core_Main = core_githead;
Data(z).GitHead_BEHRUtils_Main = behrutils_githead;
Data(z).GitHead_GenUtils_Main = genutils_githead;
Data(z).GitHead_PSM_Main = psm_githead;
Data(z).GitHead_MatPyInt_Main = imatpy_githead;
Data(z).GitHead_WRFUtils_Main = wrfutils_githead;
end
%%%%%%%%%%%%%%%%%
% GRIDDING DATA %
%%%%%%%%%%%%%%%%%
OMI = psm_wrapper(Data, Data(1).Grid, 'only_cvm', ~use_psm, 'extra_cvm_fields', extra_gridding_fields, 'DEBUG_LEVEL', DEBUG_LEVEL);
end
function val_out = remove_nonstandard_pressures(val, pres, std_pres)
n_std_pres = numel(std_pres);
E = JLLErrors;
if size(val,1) ~= n_std_pres + 3 || ndims(val) ~= 3
E.badinput('VAL should be 3D with the first dimension having length %d', n_std_pres + 3)
end
if ~isequal(size(pres), size(val))
E.badinput('PRES must be the same size as VAL')
end
sz = size(val);
val_out = nan([n_std_pres, sz(2:end)]);
for a=1:prod(sz(2:end))
if all(isnan(pres(:,a)))
if ~all(isnan(val(:,a)))
E.callError('find_std_pres', 'Trying to remove nonstandard pressures, but PRES(:,a) is all NaNs and VAL(:,a) is not');
end
xx = false(size(pres(:,a)));
% If all the pressures are NaNs for this profile, then there's no
% way that we can find the standard pressures. This usually happens
% when the pixel is outside of the WRF domain, so there's no
% profile data of any sort. As long as that is true (i.e. VAL(:,a)
% is all NaNs, checked above), just take the first n NaNs so that
% the value out is the right size.
xx(1:n_std_pres) = true;
else
xx = ismember(pres(:,a), std_pres);
end
if sum(xx) ~= n_std_pres
E.callError('find_std_pres','Did not find the %d standard pressures', n_std_pres)
end
val_out(:,a) = val(xx,a);
end
end
function date_out = random_day_in_month(date_in, forbid_current_day)
if ~exist('forbid_current_day', 'var')
forbid_current_day = false;
end
y = year(date_in);
m = month(date_in);
d = day(date_in);
while d == day(date_in)
d = randi(eomday(y,m));
if ~forbid_current_day
% If we allow the randomization to pick the same day as the input
% date, then we can always exit the first time.
break
end
end
date_out = datenum(y,m,d);
end
function [loncorn_out, latcorn_out] = jitter_corners(loncorn_in, latcorn_in, jitter_amt, force_jitter)
if ~exist('force_jitter', 'var')
force_jitter = false;
end
E = JLLErrors;
if size(loncorn_in, 1) ~= 4 || size(latcorn_in, 1) ~= 4
E.badinput('LONCORN_IN and LATCORN_IN must have length 4 in the first dimension')
elseif ~isequal(size(loncorn_in), size(latcorn_in))
E.badinput('LONCORN_IN and LATCORN_IN must be the same size')
end
if ~isnumeric(jitter_amt) || ~isscalar(jitter_amt) || jitter_amt <= 0
E.badinput('JITTER_AMT must be a scalar, positive number')
end
sz = size(loncorn_in);
x_sign = randi([-1 1], sz(2:3));
y_sign = randi([-1 1], sz(2:3));
% Okay, here's the slightly tricky part. If we want to force a pixel to
% jitter, then we need to rerandomize any locations where the x and y shift
% are both zero.
if force_jitter
both_zero = x_sign(:) == 0 & y_sign(:) == 0;
while any(both_zero(:))
x_sign(both_zero) = randi([-1 1], sum(both_zero), 1);
y_sign(both_zero) = randi([-1 1], sum(both_zero), 1);
both_zero = x_sign(:) == 0 & y_sign(:) == 0;
end
end
% Now we just need to create the actual jitter matrices so that all the
% corners for a given pixel move by the same amount.
x_shift = repmat(permute(x_sign, [3 1 2]) * jitter_amt, 4, 1, 1);
y_shift = repmat(permute(y_sign, [3 1 2]) * jitter_amt, 4, 1, 1);
loncorn_out = loncorn_in + x_shift;
latcorn_out = latcorn_in + y_shift;
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
BEHR_publishing_main.m
|
.m
|
BEHR-core-master/HDF tools/BEHR_publishing_main.m
| 27,750 |
utf_8
|
94c9f8d5b73d10d19844b4b0963dd564
|
function [ ] = BEHR_publishing_main(varargin)
%BEHR_publishing_v2 Create the HDF files for BEHR products
% BEHR_Publishing_v2 can accept a number of input parameters to alter its
% behavior. All of these have default values that are set up so that
% calling it without parameters will lead to standard behavior. The
% parameters are:
%
% 'start': the first date to process, as a date number or a string
% implicitly understood by datenum(). Default is '2005-01-01'
%
% 'end': the last date to process; same format requirements as
% 'start'. Default is today.
%
% 'output_type': one of the strings 'hdf' or 'txt', determines which
% output format will be used. 'hdf' will produce HDF version 5 files.
% Default is 'hdf'
%
% 'pixel_type': one of the strings 'native' or 'gridded', determines
% whether the native pixels (i.e. the 'Data' structure) or the
% gridded pixel (i.e. the 'OMI' structure) will be saved. Default is
% 'native'.
%
% 'reprocessed': a boolean (true or false). If true, this tells the
% publishing algorithm to include fields that used in situ
% measurements from the DISCOVER-AQ campaign as a priori profiles.
% That is a specialized product that hasn't been updated in years.
% Default is false.
%
% 'region': a string indicating which region to publish, must match
% the directory structure in behr_paths.behr_mat_dir. Only used if
% "mat_dir" is not specified. Default is 'us'.
%
% 'profile_mode': a string which a priori profiles' retrieval to use,
% must match the directory structure in behr_paths.behr_mat_dir
% (within each region). Only used if "mat_dir" is not specified.
% Default is 'monthly'.
%
% 'mat_dir': the directory from which to load the Matlab files with
% BEHR output saved in the. If not given (or given as an empty
% string) then behr_paths.BEHRMatSubdir(region, profile_mode) is
% called with the values of the "region" and "profile_mode"
% parameters to determine the directory for the given region and
% profile mode. Otherwise, BEHR .mat files are read from the
% directory given.
%
% 'save_dir': the directory to which to save the resulting HDF or CSV
% files. Default is the value returned by
% behr_paths.website_staging_dir.
%
% 'organize': a boolean that indicates whether the output should go
% directly in the save directory (false) or in a subdirectory named
% behr_<pixel_type>-<output_type>-<behr version>, e.g.
% "behr_native-hdf-v2-1C". Default is true.
%
% 'overwrite': controls the behavior of this function if one of the
% files it is trying to output already exists. This is a number, a
% negative value will cause it to ask you on at least the first file,
% whether it continues to ask on successive files depends on your
% response. 0 means do not overwrite, and a positive value means
% always overwrite. Default is -1, i.e. ask the user.
%
% 'DEBUG_LEVEL': a scalar number that controls the verbosity of this
% function. 0 is minimum verbosity, higher numbers print more to the
% screen.
%
% This function can also be parallelized using the global variables
% numThreads and onCluster.
global onCluster
if isempty(onCluster)
onCluster = false;
end
if onCluster
addpath('~/MATLAB/Utils');
addpath('~/MATLAB/Classes');
% Cleanup object will safely exit if there's a problem
cleanupobj = onCleanup(@() mycleanup());
end
E = JLLErrors;
p = inputParser;
p.addParameter('output_type', 'hdf');
p.addParameter('pixel_type', 'native');
p.addParameter('start', '2005-01-01');
p.addParameter('end', datestr(today, 'yyyy-mm-dd'));
p.addParameter('reprocessed', false);
p.addParameter('mat_dir', '');
p.addParameter('region', 'us');
p.addParameter('profile_mode', 'monthly');
p.addParameter('save_dir', behr_paths.website_staging_dir);
p.addParameter('organize', true);
p.addParameter('overwrite', -1);
p.addParameter('DEBUG_LEVEL', 1);
p.parse(varargin{:});
pout = p.Results;
%%%%%%%%%%%%%%%%%%%%%%%
%%%%% SET OPTIONS %%%%%
%%%%%%%%%%%%%%%%%%%%%%%
% Start and end date
start_date = pout.start;
end_date = pout.end;
% Output type should be 'txt' or 'hdf'. Text (csv) files are for native
% resolution only.
output_type = pout.output_type;
allowed_outtype = {'txt','hdf'};
if ~ismember(output_type,allowed_outtype)
E.badinput('output_type must be one of %s',strjoin(allowed_outtype,', '));
end
% Set to 'native' to save the native OMI resolution pixels. Set to
% 'gridded' to save the 0.05 x 0.05 gridded data
pixel_type = pout.pixel_type;
allowed_pixtype = {'native','gridded'};
if ~ismember(pixel_type,allowed_pixtype)
E.badinput('pixel_type must be one of %s',strjoin(allowed_pixtype,', '));
end
% Other options - reprocessed should be TRUE to include in situ fields
is_reprocessed = pout.reprocessed;
if ~isscalar(is_reprocessed) || ~islogical(is_reprocessed)
E.badinput('REPROCESSED must be a scalar logical')
end
% Whether subdirectories should be created within the save directory,
% organized by pixel type, output type, and BEHR version.
organized_subdir = pout.organize;
if ~isscalar(organized_subdir) || ~islogical(organized_subdir)
E.badinput('ORGANIZE must be a scalar logical')
end
% How to handle overwriting. 1 = overwrite, 0 = don't overwrite, -1 = ask.
overwrite = pout.overwrite;
DEBUG_LEVEL = pout.DEBUG_LEVEL;
if ~isscalar(DEBUG_LEVEL) || ~isnumeric(DEBUG_LEVEL)
E.badinput('DEBUG_LEVEL must be a scalar number')
end
% File locations
mat_file_dir = pout.mat_dir;
save_dir = pout.save_dir;
if isempty(mat_file_dir)
mat_file_dir = behr_paths.BEHRMatSubdir(pout.region, pout.profile_mode);
end
% Check that the directories exist like this so that a single error message
% describes if both directories don't exist - handy for running on the
% cluster so that you don't wait forever for the job to start, only to have
% it fail b/c you forgot to make the output directory.
dirs_dne = {};
if ~exist(mat_file_dir,'dir')
dirs_dne{end+1} = sprintf('mat_file_dir (%s)', mat_file_dir);
end
if ~exist(save_dir,'dir')
dirs_dne{end+1} = sprintf('save_dir (%s)', save_dir);
end
if ~isempty(dirs_dne)
E.dir_dne(dirs_dne);
end
% Make the list of variables to put in the HDF files. Std. variables will
% be added by default; see the "set_variables" function for additional
% options. The pixel type needs to be passed so that it knows whether to
% keep the pixel specific variables or not.
global numThreads
if onCluster
global_unset = {};
% Check that all global variables are set
if isempty(numThreads)
global_unset{end+1} = 'numThreads';
end
if ~isempty(global_unset)
E.runscript_error(global_unset);
end
% Check that both directories exist and that numThreads is the proper
% type
if ~isnumeric(numThreads) || ~isscalar(numThreads)
E.badinput('numThreads should be a scalar number; this is a global setting, check the calling runscript')
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% INPUT CHECKING %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%
if strcmpi(output_type,'txt') && strcmpi(pixel_type, 'gridded')
E.badinput('Gridded output is only intended for HDF files')
end
if ~ismember(pixel_type,{'native','gridded'})
E.badinput('"pixel_type" must be "native" or "gridded"');
end
if datenum(start_date) < datenum('2004-10-01') || datenum(end_date) < datenum('2004-10-01')
E.badinput('start and end dates must be after Oct 1st, 2004');
elseif datenum(start_date) > datenum(end_date)
E.badinput('Start date must be earlier than end date');
end
if ~exist(mat_file_dir,'dir')
E.badinput('mat_file_dir must be a directory');
end
if ~exist(save_dir,'dir')
E.badinput('save_dir must be a directory');
end
git_heads.core = git_head_hash(behr_paths.behr_core);
git_heads.behr_utils = git_head_hash(behr_paths.behr_utils);
git_heads.gen_utils = git_head_hash(behr_paths.utils);
%%%%%%%%%%%%%%%%%%%%%
%%%%% MAIN LOOP %%%%%
%%%%%%%%%%%%%%%%%%%%%
% Split into two different loops: if running on a cluster, it will
% parallelize and assume that you want to overwrite any existing files. If
% running locally, it will not parallelize, and will ask for your decision
% on overwriting files.
FILES = dir(fullfile(mat_file_dir,'OMI_BEHR*.mat'));
if ~onCluster
for a=1:numel(FILES)
% Find the date part of the file
d_ind = regexp(FILES(a).name,'\d\d\d\d\d\d\d\d');
date_string = FILES(a).name(d_ind:d_ind+7);
if datenum(date_string,'yyyymmdd') >= datenum(start_date) && datenum(date_string,'yyyymmdd') <= datenum(end_date)
% If the file is less than a MB, it likely has no data (possibly
% because the OMI swaths needed were not created for that day). If this
% is true, skip this file.
if FILES(a).bytes < 1e6
if DEBUG_LEVEL > 0; fprintf('%s size < 1 MB, skipping due to lack of data\n',FILES(a).name); end
continue
end
load(fullfile(mat_file_dir,FILES(a).name));
if strcmpi(pixel_type,'native')
Data_to_save = Data;
else
Data_to_save = OMI;
end
if DEBUG_LEVEL > 0
fprintf('Saving %s %s for %s\n', pixel_type, output_type, date_string);
end
[vars, attr, savename, full_save_dir] = set_variables(Data_to_save, pixel_type, output_type, is_reprocessed, save_dir, organized_subdir);
if strcmpi(output_type,'hdf')
overwrite = make_hdf_file(Data_to_save, vars, attr, full_save_dir, savename, pixel_type, overwrite, git_heads, DEBUG_LEVEL);
elseif strcmpi(output_type,'txt')
overwrite = make_txt_file(Data_to_save,vars,attr,date_string,full_save_dir,savename,overwrite,DEBUG_LEVEL);
end
end
end
else
if onCluster && isempty(gcp('nocreate'))
parpool(numThreads);
end
parfor a=1:numel(FILES)
% Find the date part of the file
d_ind = regexp(FILES(a).name,'\d\d\d\d\d\d\d\d');
date_string = FILES(a).name(d_ind:d_ind+7);
if datenum(date_string,'yyyymmdd') >= datenum(start_date) && datenum(date_string,'yyyymmdd') <= datenum(end_date)
% If the file is less than a MB, it likely has no data (possibly
% because the OMI swaths needed were not created for that day). If this
% is true, skip this file.
if FILES(a).bytes < 1e6
if DEBUG_LEVEL > 0; fprintf('%s size < 1 MB, skipping due to lack of data\n',FILES(a).name); end
continue
end
D = load(fullfile(mat_file_dir,FILES(a).name));
if strcmpi(pixel_type,'native')
Data_to_save = D.Data;
else
Data_to_save = D.OMI;
end
if DEBUG_LEVEL > 0
fprintf('Saving %s %s for %s\n', pixel_type, output_type, date_string);
end
[vars, attr, savename, full_save_dir] = set_variables(Data_to_save, pixel_type, output_type, is_reprocessed, save_dir, organized_subdir);
if strcmpi(output_type,'hdf')
make_hdf_file(Data_to_save, vars, attr, full_save_dir, savename, pixel_type, overwrite, git_heads, DEBUG_LEVEL);
elseif strcmpi(output_type,'txt')
make_txt_file(Data_to_save, vars, attr, date_string, full_save_dir, savename, overwrite, DEBUG_LEVEL);
end
end
end
end
end
%%%%%%%%%%%%%%%%
% SUBFUNCTIONS %
%%%%%%%%%%%%%%%%
function [vars, attr, savename, save_dir] = set_variables(Data, pixel_type, output_type, reprocessed, save_dir, organized_subdir)
% Make a list of variables that should be added to the product. All the
% standard variables will be added always. Pass any or all of the following
% strings to add certain variables
%
% 'reprocessed' - fields related to columns that are reprocessed using
% in-situ profiles
%
% The standard variables to be included (listed in
% http://behr.cchem.berkeley.edu/TheBEHRProduct.aspx)
E = JLLErrors;
if organized_subdir
save_subdir = sprintf('behr-%s-%s_%s-%s_%s', Data(1).BEHRProfileMode, Data(1).BEHRRegion, pixel_type, output_type, BEHR_version);
save_dir = fullfile(save_dir, save_subdir);
if ~exist(save_dir, 'dir')
mkdir(save_dir);
end
end
% The attribute table contains the list of all variables we expect to provide.
% Choose the proper subset.
savename = behr_filename(Data(1).Date, Data(1).BEHRProfileMode, Data(1).BEHRRegion, output_type);
if reprocessed
attr = BEHR_publishing_attribute_table('pub-insitu', 'struct');
savename = strrep(savename, 'BEHR','BEHR-InSitu');
else
attr = BEHR_publishing_attribute_table('pub', 'struct');
end
vars = fieldnames(attr);
% Remove variables not defined as "gridded" by BEHR_publishing_gridded_fields
if strcmpi('gridded', pixel_type)
vars = remove_ungridded_variables(vars);
end
% Remove any fields not present in the structure. If one of the expected variables is
% not present, error (because we expect it to be there!) The exception are the PSM weight
% fields, they will not be present if we are gridding with CVM only.
vv = isfield(Data, vars);
if sum(~vv) > 0
pp = false(size(vars));
if strcmpi('native', pixel_type)
% If not doing the gridded data, then none of the weights fields will be present.
pp = pp | ismember(vars, BEHR_publishing_gridded_fields.cvm_weight_vars);
end
if strcmpi('native', pixel_type) || Data(1).Only_CVM
pp = pp | ismember(vars, BEHR_publishing_gridded_fields.psm_weight_vars);
end
missing_vars = vars(~vv & ~pp);
if numel(missing_vars) > 0
E.callError('missing_variable', 'The following variables are missing: %s', strjoin(missing_vars, ', '));
end
end
vars = vars(vv);
% Remove variables that cannot be put into a CSV text file because multiple
% values are required per pixel
if strcmpi('txt', output_type)
vars = remove_vector_variables(vars);
end
end
function vars = remove_ungridded_variables(vars)
E = JLLErrors;
if ~iscell(vars) || ~all(iscellcontents(vars,'ischar'))
E.badinput('"vars" must be a cell array of variable names as strings')
end
% Define what variables should be included in gridded products. You'll need
% to edit this is you add new gridded variables.
gridded_vars = BEHR_publishing_gridded_fields.all_gridded_vars;
gg = ismember(vars,gridded_vars);
vars = vars(gg);
end
function vars = remove_vector_variables(vars)
E=JLLErrors;
if ~iscell(vars) || ~all(iscellcontents(vars,'ischar'))
E.badinput('"vars" must be a cell array of variable names as strings')
end
% Define what variables cannot be saved in a text file and remove them
vector_vars = {'BEHRPressureLevels','BEHRScatteringWeights','BEHRAvgKernels','BEHRNO2apriori'};
vv = ismember(vars, vector_vars);
vars = vars(~vv);
end
function overwrite = make_hdf_file(Data_in, vars, attr, save_dir, savename, pixel_type, overwrite, current_git_heads, DEBUG_LEVEL)
E = JLLErrors;
[~,~,ext] = fileparts(savename);
if ~strcmp(ext,'.hdf')
E.badinput('SAVENAME must have the extension .hdf, not %s', ext);
end
hdf_fullfilename = fullfile(save_dir, savename);
% Check if the file exists. We may be already set to automatically
% overwrite or not, otherwise we have to ask the user what to do.
if exist(hdf_fullfilename,'file')
[overwrite, do_i_return] = do_i_overwrite(overwrite, hdf_fullfilename);
if do_i_return
return
end
end
% Iterate through each swath and save it as under the group
% /Data/Swath#####.
for d=1:numel(Data_in)
swath_id = max(Data_in(d).Swath(:));
group_name = sprintf('/Data/Swath%d',swath_id);
if swath_id == 0 || isnan(swath_id)
% A swath ID of 0 or NaN means that no pixels were gridded, so skip
% this swath since has it has no useful data.
continue
end
if DEBUG_LEVEL > 1; fprintf('\t Now writing %s\n',group_name); end
if DEBUG_LEVEL > 2; tic; end
for v=1:numel(vars)
var_name = sprintf('%s/%s',group_name,vars{v});
save_data = Data_in(d).(vars{v});
sz = size(save_data);
% Make NaNs into fill values - apparently this is better for HDF
% type files. Cell arrays are created for bit array flags in
% gridded products - so we don't want to do any fill values for
% them. Rather, since each cell contains a matrix of the bit array
% flags, we'll do a bitwise OR operation on them so that if a flag
% is set for any pixel used in that grid cell it carries through.
if ~iscell(save_data)
nans = isnan(save_data);
save_data(nans) = attr.(vars{v}).fillvalue;
else
E.notimplemented('Cell array variable');
end
if isa(save_data,'double')
if ismember(vars{v}, BEHR_publishing_gridded_fields.flag_vars)
save_data_uint = uint32(save_data);
if ~isequal(save_data_uint, save_data)
E.callError('flag_conversion_error', 'After conversion to uint32, the %s field changed', vars{v});
else
save_data = save_data_uint;
end
else
% Convert doubles to singles to save space for the data people
% will be downloading
save_data = single(save_data);
end
end
% Ensure that the fill value is of the same type as the data
fill_val = cast(attr.(vars{v}).fillvalue, 'like', save_data);
% Create the dataset, then write it and add the attributes
h5create(hdf_fullfilename, var_name, sz, 'Datatype', class(save_data), 'FillValue', fill_val);
h5write(hdf_fullfilename, var_name, save_data);
atts = fieldnames(attr.(vars{v}));
for a=1:numel(atts)
if strcmp(atts{a},'fillvalue')
continue % We've already handled the fill value with h5create
end
h5writeatt(hdf_fullfilename, var_name, atts{a}, attr.(vars{v}).(atts{a}));
end
% If doing gridded data, there's another attribute to set, which is
% whether the value was gridded with PSM, CVM, neither, or is a
% weight field.
if strcmpi(pixel_type, 'gridded')
grid_type_attr = 'gridding_method';
if ~isempty(regexpi(vars{v}, 'weight'))
grid_type = 'weight';
elseif ismember(vars{v}, BEHR_publishing_gridded_fields.all_psm_vars) && ~Data_in(d).Only_CVM
grid_type = 'parabolic spline method';
elseif ismember(vars{v}, BEHR_publishing_gridded_fields.all_cvm_vars) || (ismember(vars{v}, BEHR_publishing_gridded_fields.all_psm_vars) && Data_in(d).Only_CVM)
grid_type = 'constant value method';
elseif ismember(vars{v}, BEHR_publishing_gridded_fields.flag_vars)
grid_type = 'flag, bitwise OR';
elseif ismember(vars{v}, BEHR_publishing_gridded_fields.publish_only_gridded_vars)
grid_type = 'grid property';
else
grid_type = 'undefined';
end
h5writeatt(hdf_fullfilename, var_name, grid_type_attr, grid_type);
end
% If this is the BEHRQualityFlags field, add the bit meanings to
% the attributes. We need to give behr_quality_flags input that it
% can treat as if it were getting actual data in order to create
% the flags definition cell array.
if strcmpi(vars{v}, 'BEHRQualityFlags')
[~,flags_definition] = behr_quality_flags();
flags_definition = flags_definition(~iscellcontents(flags_definition, 'isempty'));
h5writeatt(hdf_fullfilename, var_name, 'FlagMeanings', strjoin(flags_definition, ', '));
end
end
% Write an attribute to the swath group describing if it is gridded or
% native. Also include the version string and the Git head hashes
switch lower(pixel_type)
case 'native'
swath_attr = 'OMI SP and BEHR data at native OMI resolution';
case 'gridded'
swath_attr = 'OMI SP and BEHR data gridded to 0.05 x 0.05 deg';
otherwise
E.badinput('"pixel_type" not recognized');
end
% General swath attributes
h5writeatt(hdf_fullfilename, group_name, 'Description', swath_attr);
h5writeatt(hdf_fullfilename, group_name, 'Version', BEHR_version());
% Files read in in the process of generating the product
swath_attr_fields = BEHR_publishing_gridded_fields.swath_attr_vars;
for a=1:numel(swath_attr_fields)
attr_val = Data_in(d).(swath_attr_fields{a});
if iscellstr(attr_val)
attr_val = strjoin(attr_val, ', ');
elseif isnumeric(attr_val)
if isscalar(attr_val)
attr_val = num2str(attr_val);
else
attr_val = mat2str(attr_val);
end
elseif ~ischar(attr_val)
E.notimplemented('Attribute values must be a number, string, or cell array of strings');
end
h5writeatt(hdf_fullfilename, group_name, swath_attr_fields{a}, attr_val);
end
h5writeatt(hdf_fullfilename, group_name, 'GitHead-Core_Pub', current_git_heads.core);
h5writeatt(hdf_fullfilename, group_name, 'GitHead-BEHRUtils_Pub', current_git_heads.behr_utils);
h5writeatt(hdf_fullfilename, group_name, 'GitHead-GenUtils_Pub', current_git_heads.gen_utils);
if DEBUG_LEVEL > 2; toc; end
end
end
function ask_to_overwrite = make_txt_file(Data_in, vars, attr, date_string, save_dir, savename, ask_to_overwrite, DEBUG_LEVEL) %#ok<INUSD>
warning('make_txt_file has not been validated for BEHR > v2.1C, check the resulting files carefully before use')
if ~strcmp(savename(end),'_')
savename = strcat(savename,'_');
end
txt_filename = strcat(savename, date_string, '.txt');
txt_fullfilename = fullfile(save_dir, txt_filename);
% Check if the file exists. Give the user 3 options if it does: abort,
% overwrite, overwrite all.
if exist(txt_fullfilename,'file')
[ask_to_overwrite, do_i_return] = do_i_overwrite(ask_to_overwrite, txt_fullfilename);
if do_i_return
return;
end
end
% For text files, we will not break up by swaths, instead all pixels will
% be in one giant CSV type output.
% First we'll create the format string based on the variables requested.
% Most variables will have 6 significant digits, using %g (so exponential
% or standard form will be chosen for compactness). Some will be specified
% to be integers - either if the class of the value is an integer or it is
% a flag field. Time will be treated specially because we want very high
% precision, and the Lat/Loncorn fields will need to be expanded into four
% individual fields. Next the header - start with lon, lat, and the
% corners. The order of the rest is less important.
n_vars = numel(vars);
header_cell = cell(1,n_vars+6);
header_cell(1:10) = {'Longitude','Latitude','Loncorn1','Loncorn2','Loncorn3','Loncorn4','Latcorn1','Latcorn2','Latcorn3','Latcorn4'};
format_spec = cell(1,n_vars+6);
format_spec(1:10) = repmat({'%.4f'},1,10);
i=11;
for a=1:n_vars
if ~ismember(vars{a}, {'Longitude','Latitude','Loncorn','Latcorn'});
header_cell{i} = vars{a};
if strcmpi(vars{a},'Time')
format_spec{i} = '%f';
elseif isinteger(Data_in(1).(vars{a})(1)) || ~isempty(regexpi(vars{a},'Flag')) || any(strcmpi(vars{a},{'Row','Swath'}))
format_spec{i} = '%d';
else
format_spec{i} = '%.4g';
end
i=i+1;
end
end
header_line = strjoin(header_cell,',');
% Open the file and loop through all the swaths and pixels and
% write the values.
fid = fopen(txt_fullfilename,'w');
fprintf(fid,'%s\n',header_line);
for s=1:numel(Data_in)
for p=1:numel(Data_in(s).Longitude)
for a=1:numel(header_cell)
switch header_cell{a}
case 'Loncorn1'
v = Data_in(s).Loncorn(1,p);
if isnan(v)
v = attr.Loncorn.fillvalue;
end
fprintf(fid,format_spec{a},v);
case 'Loncorn2'
v = Data_in(s).Loncorn(2,p);
if isnan(v)
v = attr.Loncorn.fillvalue;
end
fprintf(fid,format_spec{a},v);
case 'Loncorn3'
v = Data_in(s).Loncorn(3,p);
if isnan(v)
v = attr.Loncorn.fillvalue;
end
fprintf(fid,format_spec{a},v);
case 'Loncorn4'
v = Data_in(s).Loncorn(4,p);
if isnan(v)
v = attr.Loncorn.fillvalue;
end
fprintf(fid,format_spec{a},v);
case 'Latcorn1'
v = Data_in(s).Latcorn(1,p);
if isnan(v)
v = attr.Latcorn.fillvalue;
end
fprintf(fid,format_spec{a},v);
case 'Latcorn2'
v = Data_in(s).Latcorn(2,p);
if isnan(v)
v = attr.Latcorn.fillvalue;
end
fprintf(fid,format_spec{a},v);
case 'Latcorn3'
v = Data_in(s).Latcorn(3,p);
if isnan(v)
v = attr.Latcorn.fillvalue;
end
fprintf(fid,format_spec{a},v);
case 'Latcorn4'
v = Data_in(s).Latcorn(4,p);
if isnan(v)
v = attr.Latcorn.fillvalue;
end
fprintf(fid,format_spec{a},v);
otherwise
v = Data_in(s).(header_cell{a})(p);
if isnan(v)
v = attr.(header_cell{a}).fillvalue;
end
fprintf(fid,format_spec{a},v);
end
if a<numel(header_cell)
fprintf(fid,',');
else
fprintf(fid,'\n');
end
end
end
end
fclose(fid);
end
function [overwrite, do_return] = do_i_overwrite(overwrite, full_file_name)
E = JLLErrors;
do_return = false;
if overwrite < 0
user_ans = ask_multichoice(sprintf('File %s exists.\nOverwrite, skip, abort, overwrite all, or overwrite none?', full_file_name), {'o','s','a','oa','on'});
user_ans = lower(user_ans);
switch user_ans
case 'o'
delete(full_file_name);
case 'oa'
delete(full_file_name);
overwrite = 1;
case 's'
do_return = true;
case 'on'
overwrite = 0;
do_return = true;
case 'a'
E.userCancel;
otherwise
E.notimplemented('User answer %s', user_ans);
end
elseif overwrite > 0
delete(full_file_name);
else
fprintf('%s exists, skipping\n', full_file_name);
do_return = true;
end
end
function mycleanup()
err=lasterror; %#ok<LERR>
if ~isempty(err.message)
fprintf('MATLAB exiting due to problem: %s\n', err.message);
if ~isempty(gcp('nocreate'))
delete(gcp)
end
exit(1)
end
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
unit_test_maps.m
|
.m
|
BEHR-core-master/Production tests/unit_test_maps.m
| 10,920 |
utf_8
|
9323df8af9a3024ee2a617478e802d36
|
function unit_test_maps(ut_base_dir, ut_new_dir, fields, varargin)
%UNIT_TEST_MAPS Make maps of the differences between two unit tests
% UNIT_TEST_MAPS() will interactively request all the necessary options
%
% UNIT_TEST_MAPS( UT_BASE_DIR, UT_NEW_DIR, FIELDS ) will read the
% OMI_BEHR .mat files from UT_BASE_DIR and UT_NEW_DIR and plot
% differences (relative by default) for each field specified by FIELDS in
% the Data structure.
%
% Parameter arguments:
%
% 'diff_type' - must be the string 'rel' (default, relative percent
% differences), 'abs' (absolute difference), 'nan' (plot the
% difference in whether the value of the fields is a NaN), 'base'
% (plot the base value, no difference), 'new' (plot the new value,
% no difference), 'basenan', or 'newnan' (plot whether the base or
% new value is a NaN, respectively).
%
% This can also be given as a structure, where each
% field name is one of the fields to be plotted and the field value
% is one of the allowed values of diff_type. In this form, each field
% can have it's own difference type, but the structure must include
% every field.
%
% 'structure' - must be the strings 'Data' (default) or 'OMI',
% controls which structure in the .mat file is plotted.
%
% 'close' - logical, default is true, which causes each day's plots
% to close when you go onto the next day. False will keep each day's
% plots open.
%
% 'clim' - specify a color limit to be passed to caxis() for each
% plot. The default is set using calc_plot_limits( del, 'pow10',
% 'diff' ), where del is the vector of differences.
%
% 'mode_3d' - how to deal with 3D fields. Options are: 'avg', meaning
% that differences are averaged along the vertical dimension before
% plotting. Other options are 'absavg', which takes the absolute
% value of the differences before averaging; 'sum', which adds up the
% differences; and 'abssum', which adds up the absolute values of the
% differences. For diff_type == 'rel' or 'abs', 'avg' is the default;
% for diff_type == 'nan', 'abssum' is the default.
E = JLLErrors;
p = inputParser;
p.addParameter('diff_type', 'rel');
p.addParameter('structure', 'Data');
p.addParameter('close', true);
p.addParameter('clim', []);
p.addParameter('mode_3d','');
if nargin == 0
[ut_base_dir, ut_new_dir, fields, varargin] = get_settings_interactive();
end
p.parse(varargin{:});
pout = p.Results;
diff_type = pout.diff_type;
structure = pout.structure;
do_close = pout.close;
clim = pout.clim;
mode_3d = pout.mode_3d;
if ~exist(ut_base_dir, 'dir')
E.badinput('UT_BASE_DIR must be a valid directory')
end
if ~exist(ut_new_dir, 'dir')
E.badinput('UT_NEW_DIR must be a valid directory')
end
if ischar(fields)
fields = {fields};
elseif ~iscellstr(fields)
E.badinput('FIELDS must be a char or cell array of chars');
end
allowed_diff_types = {'rel', 'abs', 'nan', 'base', 'new', 'basenan', 'newnan'};
if ischar(diff_type)
if ~ismember(diff_type, allowed_diff_types)
E.badinput('If given as a char, ''diff_type'' must be one of: %s', strjoin(allowed_diff_types, ', '))
end
diff_type = make_empty_struct_from_cell(fields, diff_type);
elseif isstruct(diff_type)
missing_fields = fields(~isfield(diff_type, fields));
if ~isempty(missing_fields)
E.badinput('If given as a struct, ''diff_type'' must have every value in FIELDS as a field. The following fields are missing: %s', strjoin(missing_fields, ', '));
elseif any(~structfun(@ischar, diff_type)) || any(~structfun(@(x) ismember(x, allowed_diff_types), diff_type))
E.badinput('One or more of the fields in ''diff_type'' is not one of the allowed values: %s', strjoin(allowed_diff_types, ', '));
end
end
allowed_structures = {'Data', 'OMI'};
if ~ischar(structure) || ~ismember(structure, allowed_structures)
E.badinput('''structure'' must be one of: %s', strjoin(allowed_structures, ', '));
end
if ~islogical(do_close) || ~isscalar(do_close)
E.badinput('''close'' must be a scalar logical')
end
if ~isempty(clim) && (~isnumeric(clim) || numel(clim) ~= 2)
E.badinput('''clim'' must be a 2-element numeric vector, if given')
end
% We have different 3D operation modes depending on what difference type
% we're taking
if isempty(mode_3d)
if strcmpi(diff_type, 'nan')
mode_3d = 'abssum';
else
mode_3d = 'avg';
end
end
allowed_mode_3ds = {'avg', 'absavg', 'sum', 'abssum'};
if ~ischar(mode_3d) || ~ismember(mode_3d, allowed_mode_3ds)
E.badinput('''mode_3d'' must be one of: %s', strjoin(allowed_mode_3ds, ', '));
end
%%%%%%%%%%%%%%%%%
% MAIN FUNCTION %
%%%%%%%%%%%%%%%%%
% First get the list of files in each directory for the same date
[Fbase, Fnew] = list_common_behr_files(ut_base_dir, ut_new_dir);
% We can assume the same number of files will be selected
for i_file = 1:numel(Fbase)
figs = gobjects(size(fields));
for i_field = 1:numel(fields)
this_diff_type = diff_type.(fields{i_field});
[del, lon, lat] = diff_files(Fbase(i_file).name, Fnew(i_file).name, fields{i_field}, structure, this_diff_type, mode_3d);
figs(i_field) = figure;
sp_size = square_subplot_dims(numel(del), 'exact');
for i_del = 1:numel(del)
subplot(sp_size(1), sp_size(2), i_del);
try
pcolor(lon{i_del}, lat{i_del}, del{i_del});
shading flat;
cb=colorbar;
cb.Label.String = cb_label(fields{i_field}, this_diff_type);
state_outlines('k');
if isempty(clim)
fig_clim = calc_plot_limits( del{i_del}, 'pow10', diff_limit_type(this_diff_type) );
else
fig_clim = clim;
end
caxis(fig_clim);
title(datestr(date_from_behr_filenames(Fbase(i_file).name)))
catch err
if strcmpi(err.identifier, 'MATLAB:pcolor:NonMatrixColorInput') && isvector(del{i_del})
fprintf('Skipping orbit %d because pcolor can''t handle vector inputs\n', i_del);
else
rethrow(err);
end
end
end
colormap(diff_colormap(this_diff_type));
end
if do_close
tilefigs;
input('Press ENTER to continue', 's');
close(figs);
end
end
end
function [ut_base_dir, ut_new_dir, fields, opts_cell] = get_settings_interactive()
ut_base_dir = ask_file('Choose the base directory with OMI_BEHR .mat files', 'dir');
ut_new_dir = ask_file('Choose the new directory with OMI_BEHR .mat files', 'dir');
opts.structure = ask_multichoice('Which structure to use?', {'Data', 'OMI'}, 'list', true, 'default', 'Data');
% load the first base file to get the list of available fields. Only allow
% the user to choose numeric fields.
F = dirff(fullfile(ut_base_dir, 'OMI_BEHR*.mat'));
tmp = load(F(1).name, opts.structure);
S = tmp.(opts.structure);
fns = fieldnames(S);
keep_fn = true(size(fns));
for i_fn = 1:numel(fns)
keep_fn(i_fn) = isnumeric(S(1).(fns{i_fn}));
end
fields = ask_multiselect('Which field(s) to plot?', fns(keep_fn));
opts.diff_type = ask_multichoice('Which difference type to plot?', {'rel','abs','nan'}, 'list', true);
opts.close = ask_yn('Close each day''s figure before moving onto the next?');
if strcmpi(opts.diff_type, 'nan')
default_3d = 'abssum';
else
default_3d = 'avg';
end
opts.mode_3d = ask_multichoice('How to plot 3D fields?', {'avg', 'absavg', 'sum', 'abssum'}, 'list', true, 'default', default_3d);
opts_cell = struct2cell2(opts);
end
function [Fbase, Fnew] = list_common_behr_files(ut_base_dir, ut_new_dir)
Fbase = dirff(fullfile(ut_base_dir, 'OMI_BEHR*.mat'));
Fnew = dirff(fullfile(ut_new_dir, 'OMI_BEHR*.mat'));
base_dates = date_from_behr_filenames(Fbase);
new_dates = date_from_behr_filenames(Fnew);
[perm_base, perm_new] = find_common_elements(base_dates, new_dates);
Fbase = Fbase(perm_base);
Fnew = Fnew(perm_new);
end
function [del, lon, lat] = diff_files(base_name, new_name, field, structure, diff_type, mode_3d)
E = JLLErrors;
switch lower(diff_type)
case 'rel'
del_fxn = @(new, base) reldiff(new,base)*100;
case 'abs'
del_fxn = @(new, base) new - base;
case 'nan'
del_fxn = @(new, base) double(isnan(new)) - double(isnan(base));
case 'base'
del_fxn = @(new, base) base;
case 'new'
del_fxn = @(new, base) new;
case 'basenan'
del_fxn = @(new, base) double(isnan(base));
case 'newnan'
del_fxn = @(new, base) double(isnan(new));
otherwise
E.notimplemented('No difference function defined for diff_type == "%s"', diff_type);
end
switch lower(mode_3d)
case 'avg'
collapse_3d = @(x) squeeze(nanmean(x,1));
case 'absavg'
collapse_3d = @(x) squeeze(nanmean(abs(x),1));
case 'sum'
collapse_3d = @(x) squeeze(nansum2(x,1));
case 'abssum'
collapse_3d = @(x) squeeze(nansum2(abs(x),1));
otherwise
E.notimplemented('No method for reducing 3D arrays defined for mode_3d == "%s"', mode_3d);
end
Base = load(base_name, structure);
Base = Base.(structure);
New = load(new_name, structure);
New = New.(structure);
if ~isequal(size(New), size(Base))
E.notimplemented('New and Base structures are different sizes');
end
init_cell = cell(size(Base));
del = init_cell;
lon = init_cell;
lat = init_cell;
for i = 1:numel(Base)
del{i} = del_fxn(New(i).(field), Base(i).(field));
if ~ismatrix(del{i})
del{i} = collapse_3d(del{i});
end
if ~isequaln(New(i).Longitude, Base(i).Longitude) || ~isequaln(New(i).Latitude, Base(i).Latitude)
E.notimplemented('New and Base have different lat/lon coordinates')
end
lon{i} = New(i).Longitude;
lat{i} = New(i).Latitude;
end
end
function label = cb_label(field, diff_type)
E = JLLErrors;
switch lower(diff_type)
case 'rel'
label = sprintf('%%\\Delta %s', field);
case 'abs'
label = sprintf('\\Delta %s', field);
case 'nan'
label = sprintf('isnan(New) - isnan(Old) (%s)', field);
case 'base'
label = sprintf('Base %s', field);
case 'new'
label = sprintf('New %s', field);
case 'basenan'
label = sprintf('isnan(Base) (%s)', field);
case 'newnan'
label = sprintf('isnan(New) (%s)', field);
otherwise
E.notimplemented('No label defined for diff_type == "%s"', diff_type);
end
end
function lim_type = diff_limit_type(diff_type)
if regcmp(diff_type, '(base|new)')
lim_type = 'zero';
else
lim_type = 'diff';
end
end
function cmap = diff_colormap(diff_type)
if regcmp(diff_type, '(base|new)')
cmap = parula;
else
cmap = blue_red_cmap;
end
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
behr_prod_indiv_scatter.m
|
.m
|
BEHR-core-master/Production tests/behr_prod_indiv_scatter.m
| 4,787 |
utf_8
|
5b21fe91d4d125ee89fa2c10e9b0e151
|
function [ output_args ] = behr_prod_indiv_scatter( indiv_stats, field_to_plot, plot_mode, clim )
%BEHR_PROD_INDIV_SCATTER Plots day by day scatter plots where % diff > 0.5
% BEHR_PROD_INDIV_SCATTER( indiv_stats, field_to_plot ) makes plots for
% FIELD_TO_PLOT from INDIV_STATS returned by BEHR_PROD_TEST.
%
% BEHR_PROD_INDIV_SCATTER( ___, 'perdiff', [cmin, cmax] ) allows you to
% specify a color axis range.
%
% BEHR_PROD_INDIV_SCATTER( ___, 'diff' ) plots absolute differences
% again, still for pixels with a percent difference > 0.5%.
%
% BEHR_PROD_INDIV_SCATTER( ___, 'diff', [cmin, cmax] ) allows you to
% specify a color axis range.
%
% BEHR_PROD_INDIV_SCATTER( ___, 'values' ) will make two plots per day,
% with new and old values.
%
% BEHR_PROD_INDIV_SCATTER( ___, 'values', [cmin, cmax] ) allows you to
% specify a color axis range
%
% Any of the plotting modes (perdiff, diff, values) can have "all" added
% as a prefix to show all differences, not just those > 0.5%.
E = JLLErrors;
n = numel(indiv_stats);
if ~exist('plot_mode','var')
plot_mode = 'perdiff';
end
if ~exist('clim', 'var')
clim = [];
end
for a=1:n
if ~regexp(plot_mode,'all')
xx = abs(indiv_stats(a).(field_to_plot).difference_stats.percent_differences)>0.5;
else
xx = true(size(indiv_stats(a).(field_to_plot).difference_stats.percent_differences));
end
if ismember(lower(plot_mode), {'perdiff','diff','allperdiff','alldiff'})
f=figure;
if ismember(lower(plot_mode), {'perdiff','allperdiff'})
delta = indiv_stats(a).(field_to_plot).difference_stats.percent_differences(xx);
elseif ismember(lower(plot_mode), {'diff','alldiff'})
delta = indiv_stats(a).(field_to_plot).difference_stats.differences(xx);
end
scatter(indiv_stats(a).(field_to_plot).Longitude(xx), indiv_stats(a).(field_to_plot).Latitude(xx), 8, delta)
colorbar;
if ~isempty(clim)
caxis(clim)
end
states('k','cont');
title(indiv_stats(a).(field_to_plot).date)
elseif ismember(lower(plot_mode), {'values','allvalues'})
f(1) = figure;
scatter(indiv_stats(a).(field_to_plot).Longitude(xx), indiv_stats(a).(field_to_plot).Latitude(xx), 8, indiv_stats(a).(field_to_plot).difference_stats.value_pairs(xx,1));
colorbar;
if ~isempty(clim)
caxis(clim)
end
states('k','cont');
title(sprintf('New - %s',indiv_stats(a).(field_to_plot).date));
f(2) = figure;
scatter(indiv_stats(a).(field_to_plot).Longitude(xx), indiv_stats(a).(field_to_plot).Latitude(xx), 8, indiv_stats(a).(field_to_plot).difference_stats.value_pairs(xx,2));
colorbar;
if ~isempty(clim)
caxis(clim)
end
states('k','cont');
title(sprintf('Old - %s',indiv_stats(a).(field_to_plot).date));
space_out_figs(f);
elseif ismember(lower(plot_mode), {'nans', 'fills'})
lon_became = indiv_stats(a).(field_to_plot).fill_and_nan_changes.(sprintf('lon_for_became_%s', plot_mode));
lat_became = indiv_stats(a).(field_to_plot).fill_and_nan_changes.(sprintf('lat_for_became_%s', plot_mode));
lon_replaced = indiv_stats(a).(field_to_plot).fill_and_nan_changes.(sprintf('lon_for_replaced_%s', plot_mode));
lat_replaced = indiv_stats(a).(field_to_plot).fill_and_nan_changes.(sprintf('lat_for_replaced_%s', plot_mode));
values_became = indiv_stats(a).(field_to_plot).fill_and_nan_changes.(sprintf('values_that_became_%s', plot_mode));
values_replaced = indiv_stats(a).(field_to_plot).fill_and_nan_changes.(sprintf('values_that_replaced_%s', plot_mode));
f(1) = figure;
scatter(lon_became, lat_became, 8, values_became);
colorbar;
if ~isempty(clim)
caxis(clim);
end
state_outlines('k','not','ak','hi');
title(sprintf('Values that became %s', plot_mode));
f(2) = figure;
scatter(lon_replaced, lat_replaced, 8, values_replaced);
colorbar;
if ~isempty(clim)
caxis(clim);
end
state_outlines('k','not','ak','hi');
title(sprintf('Values that replaced %s - %s', plot_mode, indiv_stats(a).(field_to_plot).date));
space_out_figs(f);
else
E.badinput('Plotting mode ''%s'' not recognized', plot_mode);
end
drawnow
fprintf('Press any key for next day\n')
pause
for b=1:numel(f)
close(f(b))
end
end
end
function space_out_figs(figs)
figs(1).Position(1) = figs(1).Position(1) - figs(1).Position(3)/2; % move to left
figs(2).Position(1) = figs(2).Position(1) + figs(2).Position(3)/2; % move to right
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
behr_unit_test.m
|
.m
|
BEHR-core-master/Production tests/behr_unit_test.m
| 7,538 |
utf_8
|
b75018aec9fbc90e90e27f4421563f03
|
function [ success ] = behr_unit_test( new, old, DEBUG_LEVEL, fid, fields_to_ignore )
%BEHR_UNIT_TEST Compare old and new BEHR data
% SUCCESS = BEHR_UNIT_TEST( NEW, OLD )
% Takes two Data or OMI structures (NEW and OLD) and compares the values
% of each field in the structures. If everything matches, SUCCESS will be
% true, if not, it'll be false.
%
% SUCCESS = BEHR_UNIT_TEST( NEW, OLD, DEBUG_LEVEL ) Allows you to control
% the verbosity of the function. By default it prints out each individual
% comparison (DEBUG_LEVEL == 2). This is useful if you need the detail,
% but prints a lot of information. Passing 1 as DEBUG_LEVEL will only
% print failed tests, passing 0 will turn it off completely.
%
% SUCCESS = BEHR_UNIT_TEST( NEW, OLD, DEBUG_LEVEL, FID ) Redirects the
% output printing from the terminal to the file with file ID FID (from
% FOPEN).
%
% SUCCESS = BEHR_UNIT_TEST( NEW, OLD, DEBUG_LEVEL, FID, FIELDS_TO_IGNORE )
% FIELDS_TO_IGNORE is a cell array of strings that specifies fields that
% should not be compared, usually because you know they will fail for a
% good reason. For instance, the GitHead fields will almost always fail
% because the new and old data were produced with different versions of
% the code.
E = JLLErrors;
if ~exist('DEBUG_LEVEL', 'var')
DEBUG_LEVEL = 2;
end
if ~exist('fid', 'var')
% An fid of 1 will make fprint print to the command window as if no fid
% was given
fid = 1;
end
if ~exist('fields_to_ignore', 'var')
fields_to_ignore = {};
elseif ~iscellstr(fields_to_ignore)
E.badinput('FIELDS_TO_IGNORE must be a cell array of strings');
end
tol = 1e-4;
[test_field_names, new_old_fields_mapping] = compare_fields_present(new, old, DEBUG_LEVEL, fid, fields_to_ignore);
test_field_vales = compare_field_values(new, old, new_old_fields_mapping, tol, DEBUG_LEVEL, fid);
success = test_field_names && test_field_vales;
end
function [success, field_mapping] = compare_fields_present(new, old, DEBUG_LEVEL, fid, ignore_fields)
new_fields = fieldnames(new);
old_fields = fieldnames(old);
if length(ignore_fields) > 0
rr = regmember(new_fields, ignore_fields);
new_fields(rr) = [];
rr = regmember(old_fields, ignore_fields);
old_fields(rr) = [];
fprintf(fid, ' Ignoring fields:\n\t%s\n', strjoin(ignore_fields, '\n\t'));
end
in_new_only = ~ismember(new_fields, old_fields);
new_only_fields = new_fields(in_new_only);
in_old_only = ~ismember(old_fields, new_fields);
old_only_fields = old_fields(in_old_only);
% Allow for fields to have changed capitalization
field_mapping = {};
b = 1;
for a=1:numel(new_only_fields)
xx = strcmpi(new_only_fields{a}, old_only_fields);
if sum(xx) == 1
field_mapping(b,:) = [new_only_fields(a), old_only_fields(xx)];
in_new_only(strcmp(new_only_fields{a}, new_fields)) = false;
in_old_only(strcmp(old_only_fields{xx}, old_fields)) = false;
b = b+1;
elseif sum(xx) > 1
fprintf(fid, ' The field %s in the new structure has multiple case-insensitive matches in the old file (%s)\n', new_only_fields{a}, strjoin(old_only_fields(xx), ', '));
end
end
if DEBUG_LEVEL > 0
if sum(in_new_only) > 0
fprintf(fid, ' The following fields are only present in the new structure:\n');
fprintf(fid, ' %s\n', strjoin(new_fields(in_new_only),'\n '));
end
if sum(in_old_only) > 0
fprintf(fid, ' The following fields are only present in the old structure:\n');
fprintf(fid, ' %s\n', strjoin(old_fields(in_old_only),'\n '));
end
end
% Add the fields that exactly match to the mapping
xx = ismember(new_fields, old_fields);
xx = xx(:);
common_fields_mapping = repmat(new_fields(xx), 1, 2);
field_mapping = cat(1, field_mapping, common_fields_mapping);
success = sum(in_new_only) == 0 && sum(in_old_only) == 0;
end
function success = compare_field_values(new, old, mapping, tolerance, DEBUG_LEVEL, fid)
% new - the new Data or OMI structure
% old - the old Data or OMI structure
% mapping - an n-by-2 cell array with the new fields to check in the first
% column and the corresponding old field in the second column.
% tolerance - the maximum absolute difference allowed between two values
% for them to be considered equal.
% DEBUG_LEVEL - how verbose to be.
success = true;
if numel(new) ~= numel(old)
if DEBUG_LEVEL > 0
fprintf(fid, ' Number of swaths in new (%d) and old(%d) unequal', numel(new), numel(old));
end
success = false;
return
end
for a=1:numel(new)
if DEBUG_LEVEL > 0
fprintf(fid, '\n Checking swath %d\n', a);
end
for b=1:size(mapping,1)
if DEBUG_LEVEL > 1
fprintf(fid, ' Checking field %s -> %s: ', mapping{b,1}, mapping{b,2});
end
[eq, reason] = test_field_equality(new(a).(mapping{b,1}), old(a).(mapping{b,2}), tolerance);
success = success && eq > 0;
if DEBUG_LEVEL == 1 && eq <= 0
% If DEBUG_LEVEL > 1, we've already printed out the field
% name. If == 1, we only print when there's a problem
fprintf(fid, ' Field %s/%s: ', mapping{b,1}, mapping{b,2});
end
if DEBUG_LEVEL > 0
if eq > 0
if DEBUG_LEVEL > 1
fprintf(fid, 'PASS\n');
end
else
fprintf(fid, 'FAILED (%s)\n', reason);
end
end
end
end
end
function [eq, reason] = test_field_equality(new_val, old_val, tolerance_scale)
reason = 'reason unspecified';
% Scale up the tolerance based on the smallest magnitude of the values
% present, if needed. This will prevent false positives where, e.g. the
% BEHR VCDs are different because of a floating point difference in the AMF
% that gets scaled up. In the old gridding code, the flag fields are stored
% as cell arrays in the grid; this causes this calculation to error (and
% isn't important because the flags are around 0-256 in value) so skip it.
% One other field is a structure, so that needs skipped too.
if isnumeric(new_val) && isnumeric(old_val)
if isinteger(new_val) && isinteger(old_val)
eq = isequal(new_val, old_val);
if ~eq
reason = 'comparison of field values with "isequal" returned false';
end
return
elseif xor(isinteger(new_val), isinteger(old_val))
reason = 'fields are not the same type; one is an integer, one is a float';
eq = false;
return
else
tolerance = round(tolerance_scale * min(abs([new_val(:); old_val(:)])), 2, 'significant');
tolerance = max(tolerance, tolerance_scale);
if ~isequal(size(new_val), size(old_val))
eq = 0;
reason = sprintf('size of numeric field differs - %s vs %s', mat2str(size(new_val)), mat2str(size(old_val)));
return
end
newnans = isnan(new_val(:));
oldnans = isnan(old_val(:));
if any(xor(newnans, oldnans))
eq = 0;
reason = 'NaNs are different';
return
end
del = new_val(~newnans) - old_val(~oldnans);
eq = all(abs(del) <= tolerance);
if ~eq
reason = sprintf('at least one absolute difference exceeds tolerance of %g; min/max diff = %g/%g', tolerance, min(del), max(del));
end
end
else
eq = isequal(new_val, old_val);
if ~eq
reason = 'comparison of field values with "isequal" returned false';
end
end
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
match_hdf_txt.m
|
.m
|
BEHR-core-master/Production tests/match_hdf_txt.m
| 2,069 |
utf_8
|
4e36dd7ab06cf74f27b6d2519e61a780
|
function [ DataHDF, DataTXT ] = match_hdf_txt( hdffile, txtfile, fields )
%MATCH_HDF_TEXT Matches up data in a text file to the array shape in an HDF file
% [ DATAHDF, DATATXT ] = MATCH_HDF_TEXT( HDFFILE, TXTFILE, FIELDS )
% will read data from the files at paths HDFFILE and TXTFILE and return
% structures DATAHDF and DATATXT with fields FIELDS, matching up the data
% in the text file with that from the HDF file so that the arrays have
% the same shape.
fields = [{'Longitude', 'Latitude'}, fields(:)'];
hdf_data = prod_test_load_hdf(hdffile, fields);
hdf_data = cat_hdf(hdf_data);
DataTXT = prod_test_load_txt(txtfile, fields);
def_val = nan(size(DataTXT.Longitude));
DataHDF = make_empty_struct_from_cell(fields, def_val);
for i = 1:numel(DataTXT.Longitude)
if mod(i,100) == 0
fprintf('Now on %d of %d\n',i,numel(DataTXT.Longitude))
end
hdf_vals = match_pix_lat_lon(DataTXT.Longitude(i), DataTXT.Latitude(i));
for j = 1:numel(fields)
DataHDF.(fields{j})(i) = hdf_vals.(fields{j});
end
end
function vals = match_pix_lat_lon(lon, lat)
vals = make_empty_struct_from_cell(fields, nan);
for a = 1:numel(hdf_data)
for b = 1:numel(hdf_data(a).Longitude)
hlon = hdf_data(a).Longitude(b);
hlat = hdf_data(a).Latitude(b);
if abs(hlon - lon) < 0.0001 && abs(hlat - lat) < 0.0001
for f = 1:numel(fields)
vals.(fields{f}) = hdf_data(a).(fields{f})(b);
hdf_data.(fields{f})(b) = [];
end
return
end
end
end
% If we get here, we didn't find a pixel
warning('Pixel at lon = %f, lat = %f; could not find matching pixel in HDF file', lon, lat);
end
end
function hdf_data_out = cat_hdf(hdf_data)
fns = fieldnames(hdf_data);
hdf_data_out = make_empty_struct_from_cell(fns);
for f=1:numel(fns)
tmp = cat_sat_data(hdf_data, fns{f});
hdf_data_out.(fns{f}) = tmp(:);
end
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
prod_test_load_txt.m
|
.m
|
BEHR-core-master/Production tests/prod_test_load_txt.m
| 3,292 |
utf_8
|
4f07eee69c6989d9c390de494a2df25f
|
function [ D_new, fillvals ] = prod_test_load_txt( newfile, fields_to_check )
%[ D_NEW, D_OLD, FILLVALS ] = PROD_TEST_LOAD_TXT( NEWFILE, FIELDS_TO_CHECK )
% This function will load a BEHR .txt file and return the data in
% structures like those used in the .mat files. However, because of how
% the .txt files are formatted, each field will just contain a vector
% rather than a matrix. NEWFILE and OLDFILE must be strings pointing to
% the new and old files respectively. This returns two structures D_NEW
% and D_OLD which will have each swath as a top-level index and each
% field in that swath. FILL_VALS is a vector of fill values read from the
% HDF attributes. It will have the same order as the fields in DATA along
% the first dimension, the second dimension will be for new and old.
% E.g., if Data had only the fields AMFTrop, AMFStrat, and BEHRAMFTrop
% then FILL_VALS will have the form:
%
% [ Fill AMFTrop (new), Fill AMFTrop (old);
% Fill AMFStrat (new), Fill AMFStrat (old);
% Fill BEHRAMFTrop (new), Fill BEHRAMFTrop (old) ]
%%%%% INPUT CHECKING %%%%%
E = JLLErrors;
if ~ischar(newfile)
E.badinput('NEWFILE must be a path given as a string')
elseif ~exist(newfile, 'file')
E.badinput('NEWFILE (%s) does not exist.',newfile);
end
[D_new_tmp, fillvals_new] = read_file(newfile);
% Finally, cut down the structures to just the requested fields; this
% mimics the behavior of prod_test_load_hdf.m most closely.
fillvals = nan(numel(fields_to_check), 1);
fns_new = fieldnames(D_new_tmp);
xx = ismember(fns_new, fields_to_check);
if sum(xx) < numel(fields_to_check)
yy = ~ismember(fields_to_check, fns_new);
E.callError('unknown_field','Fields %s are not present in the files.',strjoin(fields_to_check(yy), ', '));
else
fillvals = fillvals_new(xx);
for a=1:numel(fields_to_check)
D_new.(fields_to_check{a}) = D_new_tmp.(fields_to_check{a});
end
end
end
function [Data, fillvals] = read_file(filename)
% Open the file and get the variable names first, then read in the rest.
fid = fopen(filename);
cleanobj = onCleanup(@() fclose(fid));
possible_fills = [-32.77, -32770, -9e9, -1.268e30, -3.402e38];
line = fgetl(fid);
varnames = strsplit(line,',');
format_spec = repmat('%f,',1,numel(varnames)-1);
format_spec = [format_spec, '%f'];
M = fscanf(fid,format_spec); % All values should be floats. Since we've already read in the first line, it'll start on the second.
M = reshape(M,numel(varnames),[])'; % It imports as one long vector, get it back into the shape it is in the file.
Data = make_empty_struct_from_cell(varnames);
fillvals = nan(size(varnames))';
for a=1:numel(varnames);
Data.(varnames{a}) = M(:,a);
% Guess the fill value from the possibilities. First try direct
% equality
foundit = false;
for b=1:numel(possible_fills)
if any(M(:,a) == possible_fills(b))
fillvals(a) = possible_fills(b);
foundit = true;
break
end
end
% Then allow some floating point error
if ~foundit
for b=1:numel(possible_fills)
if any(abs(M(:,a)-possible_fills(b)) < 0.1)
fillvals(a) = possible_fills(b);
break
end
end
end
end
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
unit_test_driver.m
|
.m
|
BEHR-core-master/Production tests/unit_test_driver.m
| 34,802 |
utf_8
|
3600c57f1f66581c5e3556be96eee790
|
function [ ] = unit_test_driver( self_test )
%UNIT_TEST_DRIVER Driver function for BEHR unit test
% This function, when called, asks a series of questions interactively to
% determine how the unit tests should proceed. It is capable of
% automatically generating OMI_SP and OMI_BEHR files using the current
% versions of read_main and BEHR_main, if this is requested,
% it saves the resulting files in a subdirectory of "UnitTestData" which
% will be created in the same directory as this function. The
% subdirectory will be named "ProducedYYYYMMDD". It will also contain a
% text file that describes the status of the BEHR git repository at the
% time of production, including both the commit hash and one line
% description of HEAD and the diff against HEAD.
%
% Whether you produce the data with this function or not, it will then
% called both BEHR_UNIT_TEST and (if testing read_main)
% READING_PRIORI_TESTS. BEHR_UNIT_TEST takes a pair of Data or OMI
% structures and attempts to verify that they are the same. If they are
% not the same, the unit test will fail overall for that date, but will
% also print out information about why it failed. Since changes to the
% algorithm intended to change the output will cause it to fail, it is up
% to the user to determine if the changes are the expected ones.
% READING_PRIORI_TESTS does not test the a priori profiles; rather it
% checks that certain elements of the OMI_SP files make sense a priori,
% that is, on their own without needing a previous version of the file to
% compare against.
%
% At a minimum, this should be used before producing a new version of
% BEHR to test selected dates (see below) against the existing version.
% This would be done by allowing it to generate the new data and compare
% against the files in the directories specified by BEHR_paths().
%
% This does not run the entire OMI data record. Within the code is a cell
% array of test dates which will be run. A few days are included in this
% array to test normal operation, the rest are there to test behavior
% under weird cases that have caused issues in the past. Consequently,
% you should add days to this as you find days that cause the BEHR
% algorithm to error or behave strangely, but you should not remove
% existing days.
%
% Internally, the ranges of dates that have daily profiles are specified,
% so as more daily profiles become available, one should modify this
% function to update that. It will only generate and test daily profile
% files within those specified ranges.
%
% UNIT_TEST_DRIVER( true ) runs a self test, so it only tries to do one
% day. This is useful if you've made changes to the unit test code itself
% and just want to make sure it works.
%
% Josh Laughner <[email protected]> 8 May 2017
E = JLLErrors;
DEBUG_LEVEL = 2;
if ~exist('self_test','var')
self_test = false;
end
% Test these dates. It's a good idea to check at least one regular day
% before the row anomaly started (2005-2006), after it was at its worst
% (after July 2011), plus zoom mode operation in both time periods.
% Additional days should be added that have caused or illuminated bugs
test_region = 'US';
test_dates = {'2005-06-02';... % pre-row anomaly summertime day, at least one day after zoom mode finishes
'2006-01-01';... % pre-row anomaly wintertime day, at least one day after zoom mode finishes
'2012-06-03';... % post-row anomaly summertime day, at least one day after zoom mode finishes
'2013-01-01';... % post-row anomaly wintertime day, at least one day after zoom mode finishes
'2014-07-08';... % post-row anomaly zoom mode, found by looking for days where OMPIXCORZ is produced for BEHR-relevant orbits for US region
'2006-09-20';... % pre-row anomaly zoom mode, found by looking for days where OMPIXCORZ is produced for BEHR-relevant orbits for US region
'2005-07-13';... % day mentioned in the 2.1Arev1 changelog with no NO2 data
'2010-01-29';... % day mentioned in the 2.1Arev1 changelog with no NO2 data
'2005-05-04';... % the center lon/lat for the OMPIXCOR product just different enough that cutting down by bounds results in different size arrays, so switched to find_submatrix2
'2005-05-14';... % Has both a row that is only partially fill values in lon/lat and the OMPIXCOR corners are mostly 0
'2010-05-09';... % day where the first swath is only 4 long in the along track dimension, which previously caused an error in the gridding algorithm, since it matches the length of the corner dimension
'2006-04-26';... % day where along and across track dimensions in the first orbit are both 60, which previously screwed up the dimension checking in the gridding algorithm
'2009-02-15'... % day where the along track dimension is cut all the way down to one, which exposed a bug in the matlab-python interface
};
% These are dates that the algorithm should be run for, but for which it is
% okay if no data is produced. This allows the unit tests to skip them
test_dates_no_data = {'2016-05-30';... % OMI was in safe mode; algorithm should gracefully handle the lack of data
'2007-12-19'}; % Should read in an empty structure from read_main.m, BEHR_main should just skip it and not try to produce anything
% These are the date ranges for which daily profiles are available. When
% running in daily mode, only test dates within these ranges will be
% produced. The first column represents the beginning of each range; the
% second column the end.
daily_profiles_available = {'2005-01-01', '2005-12-31';...
'2007-01-01', '2007-12-31';...
'2012-01-01', '2013-12-31'};
test_dates = unique(cat(1,test_dates,test_dates_no_data));
if self_test
test_dates = test_dates(1);
end
my_dir = fileparts(mfilename('fullpath'));
addpath(fullfile(my_dir,'SubTests'));
what_to_test = ask_multichoice('Which step should be tested?', {'all', 'reading', 'behrmain', 'publishing'});
use_behrpaths = ask_yn('Use the paths specified by BEHR_paths() for the old data?');
generate_new_data = ask_yn('Generate the new files? If not you will be asked to choose the directories to load new files from');
if generate_new_data
root_save_folder = make_data_folder();
read_save_folder = fullfile(root_save_folder, 'Reading');
main_root_save_folder = fullfile(root_save_folder, 'Main');
pub_root_save_folder = fullfile(root_save_folder, 'Publishing');
end
save_results_to_file = ask_yn('Save results to file? (If not, will be printed to screen).');
if save_results_to_file
results_file = make_results_file(what_to_test);
fid = fopen(results_file,'w');
else
% An fid of 1 will cause fprintf to print to the command window, as if
% no fid was given
fid = 1;
end
try
prompt_str = ['\nSpecify any fields to ignore in unit testing, separated by a space.\n',...
'Regular expressions can be used. By default, fields beginning with\n',...
'"GitHead" are ignored because they are expected to be different if the\n',...
'algorithm has changed. To override this, make one of the strings input\n',...
'"keepgit" (without the quotes), i.e. entering "keepgit .*File" will\n',...
'ignore any field ending in "File" but do compare the GitHead fields: '];
fields_to_ignore = input(prompt_str, 's');
fields_to_ignore = strsplit(fields_to_ignore);
xx = strcmpi('keepgit', fields_to_ignore);
if ~any(xx)
fields_to_ignore = veccat({'GitHead.*'},fields_to_ignore);
else
fields_to_ignore(xx) = [];
end
if generate_new_data
make_git_report(behr_paths.behr_core, 'GitReport-Core.txt');
make_git_report(behr_paths.behr_utils, 'GitReport-BEHRUtils.txt');
make_git_report(behr_paths.utils, 'GitReport-GenUtils.txt');
end
switch what_to_test
% Each of the testing subfunctions allows paths to be given to them
% by test_all() to minimized user interaction if all three steps are
% to be run. I've set it up so that if empty strings are passed, it
% considers those paths to not be given, but something has to be passed.
case 'reading'
success = test_reading('', '');
case 'behrmain'
success_m = test_behr_main('monthly', '', '');
success_d = test_behr_main('daily', '', '');
success = success_m & success_d;
case 'publishing'
success_m = test_publishing('monthly', '', '', '', '');
success_d = test_publishing('daily', '', '', '', '');
success = success_m * success_d;
case 'all'
success = test_all();
otherwise
E.notimplemented(what_to_test);
end
for a=1:numel(success)
fprintf(fid, '%s: %s\n', datestr(test_dates{a}), passfail(success(a)));
end
fprintf(fid, 'Overall: %s\n', passfail(all(success)));
msg = sprintf('BEHR unit test completed on %s step(s): %s', what_to_test, datestr(now));
border = repmat('*', 1, numel(msg));
fprintf(fid, '\n%s\n', border);
fprintf(fid, '%s\n', msg);
fprintf(fid, '%s\n\n', border);
catch err
if fid > 2
fclose(fid);
end
rethrow(err);
end
if fid > 2
fclose(fid);
end
if save_results_to_file
fprintf('Results saved to %s\n', results_file);
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% NESTED FUNCTIONS %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function dfolder = make_data_folder()
dfolder = fullfile(my_dir, 'UnitTestData', sprintf('Produced%s', datestr(today, 'yyyymmdd')));
if exist(dfolder, 'dir')
if ~ask_yn(sprintf('Directory\n %s\n exists, it will be cleared before continuing. Proceed?', dfolder))
E.userCancel()
else
% This is set up to delete all files in one of the
% "ProducedYYYYMMDD" folders b/c all files in those folders
% should be produced by the code in the state represented
% by the GitReport.txt file. If we allow ourselves to only
% remove a subset of files, that is no longer guaranteed to
% be true.
remove_contents(dfolder);
end
else
mkdir(dfolder);
end
fprintf('Unit test data will be stored in %s\n', dfolder);
end
function rfile = make_results_file(test_steps)
rfolder = fullfile(my_dir, 'UnitTestResults');
if ~exist(rfolder, 'dir')
mkdir(rfolder);
end
rfilename = sprintf('BEHR_%s_Unit_Test_Results_%s.txt', test_steps, datestr(now, 'yyyymmdd_HHMMSS'));
rfile = fullfile(rfolder, rfilename);
end
function make_git_report(repo_dir, report_name)
currdir = cd(repo_dir);
try
% Overall status (current branch, modified/added/deleted/untracked
% files). Use --porcelain to remove formatting (bold, color, etc)
% and --branch to force it to show the branch. --no-pager means it
% won't try to put the output through "less" and avoids a "terminal
% not fully functional" warning.
[~, gitstat] = system('git --no-pager status --porcelain --branch');
% Get the one line commit message for the last, decorated with any
% tags or branch heads. Remove color to avoid introducing special
% characters into the text.
[~, githead] = system('git --no-pager log -1 --pretty=oneline --decorate --no-color');
% Get the differenced since the last commit, sans color and pager
% for the same reasons as above. By specifically diffing against
% HEAD, we get staged and unstaged changes.
[~, gitdiff] = system('git --no-pager diff --no-color HEAD');
catch err
cd(currdir)
rethrow(err);
end
cd(currdir);
% Extract the branch from the status - with "--porcelain --branch"
% it is on its own line prefaced by ##
[i,j] = regexp(gitstat, '##.*?\n', 'once');
gitbranch = gitstat(i:j);
gitbranch = strrep(gitbranch, '#', '');
gitbranch = strtrim(gitbranch);
gitstat(i:j) = [];
% Also add space before each file in the diff (usually the "diff
% --git" is bolded so it's easier to see but we removed formatting)
gitdiff = strrep(gitdiff, 'diff --git', sprintf('\ndiff --git'));
gfid = fopen(fullfile(root_save_folder, report_name), 'w');
begin_msg = sprintf('Git report on %s for unit test data generated on %s', repo_dir, datestr(now));
gborder = repmat('*', size(begin_msg));
fprintf(gfid, '%s\n%s\n%s\n\n', gborder, begin_msg, gborder);
fprintf(gfid, 'Current branch: %s\n\n', gitbranch);
fprintf(gfid, 'HEAD at time of generation:\n%s\n%s\n\n', githead, gborder);
fprintf(gfid, 'Git status at time of generation\n (M = modified, A = added, D = deleted, ?? = untracked):\n\n%s\n%s\n\n', gitstat, gborder);
fprintf(gfid, 'Git diff (working dir against HEAD) at time of generation:\n\n%s', gitdiff);
fclose(gfid);
end
function successes = test_all()
% If doing all three tests, give the option to set up all the directories now
if ~use_behrpaths
if ask_yn('Compare against an old unit test?')
old_root = getdir('Choose the root ProducedYYYYMMDD directory', {});
old_sp_dir = fullfile(old_root, 'Reading');
old_daily_behr_dir = fullfile(old_root, 'Main', 'daily');
old_monthly_behr_dir = fullfile(old_root, 'Main', 'monthly');
old_daily_native_dir = fullfile(old_root, 'Publishing', 'native_hdf', 'daily');
old_monthly_native_dir = fullfile(old_root, 'Publishing', 'native_hdf', 'monthly');
old_daily_gridded_dir = fullfile(old_root, 'Publishing', 'gridded_hdf', 'daily');
old_monthly_gridded_dir = fullfile(old_root, 'Publishing', 'gridded_hdf', 'monthly');
else
old_sp_dir = getdir('You''ll need to choose the directory with the old OMI_SP files', test_dates);
old_monthly_behr_dir = getdir('You''ll need to choose the directory with the old monthly OMI_BEHR files', test_dates);
old_daily_behr_dir = getdir('You''ll need to choose the directory with the old daily OMI_BEHR files', test_dates);
old_daily_native_dir = getdir('You''ll need to choose the directory with the old daily native pixel HDF files', test_dates);
old_monthly_native_dir = getdir('You''ll need to choose the directory with the old monthly native pixel HDF files', test_dates);
old_daily_gridded_dir = getdir('You''ll need to choose the directory with the old daily gridded HDF files', test_dates);
old_monthly_gridded_dir = getdir('You''ll need to choose the directory with the old monthly gridded HDF files', test_dates);
end
else
% if using behr_paths, these will be set automatically
old_sp_dir = '';
old_monthly_behr_dir = '';
old_daily_behr_dir = '';
old_daily_native_dir = '';
old_monthly_native_dir = '';
old_daily_gridded_dir = '';
old_monthly_gridded_dir = '';
end
if ~generate_new_data
new_sp_dir = getdir('You''ll need to choose the directory with the new OMI_SP files', test_dates);
new_behr_dir = getdir('You''ll need to choose the directory with the new OMI_BEHR files with "daily" and "monthly" subfolders', test_dates);
new_native_dir = getdir('You''ll need to choose the directory containing the new native HDF files with "daily" and "monthly" subfolders', test_dates);
new_gridded_dir = getdir('You''ll need to choose the directory containing the new gridded HDF files with "daily" and "monthly" subfolders', test_dates);
else
% if generating new data, these are set from the save_folder automatically
new_sp_dir = '';
new_behr_dir = '';
new_native_dir = '';
new_gridded_dir = '';
end
read_success = test_reading(old_sp_dir, new_sp_dir);
behr_monthly_success = test_behr_main('monthly',old_monthly_behr_dir, new_behr_dir, read_save_folder);
behr_daily_success = test_behr_main('daily',old_daily_behr_dir, new_behr_dir, read_save_folder);
pub_monthly_success = test_publishing('monthly',old_monthly_native_dir, old_monthly_gridded_dir, new_native_dir, new_gridded_dir, fullfile(main_root_save_folder,'monthly'));
pub_daily_success = test_publishing('daily',old_daily_native_dir, old_daily_gridded_dir, new_native_dir, new_gridded_dir, fullfile(main_root_save_folder,'daily'));
successes = read_success & behr_monthly_success & behr_daily_success & pub_monthly_success & pub_daily_success;
end
function successes = test_reading(old_dir, new_dir)
if generate_new_data
% If generating new data, then our new_dir will always be the location where we generate the new data.
new_dir = read_save_folder;
mkdir(new_dir);
for i=1:numel(test_dates)
read_main('start', test_dates{i}, 'end', test_dates{i}, 'sp_mat_dir', new_dir, 'overwrite', true, 'region', test_region);
end
else
% Otherwise, a directory for new files may have already been passed (if running all tests, generally).
% if not, ask now which directory contains the new SP files.
if isempty(new_dir)
new_dir = getdir('You''ll need to choose the directory with the new OMI_SP files', test_dates);
end
% This fixed some weird bug where "read_save_folder" wasn't set because we weren't generating data, but
% it got used later. That probably shouldn't happen, so that bug should be fixed eventually and this
% removed.
if ~exist('save_folder', 'var')
read_save_folder = new_dir;
end
end
% Since a common use case of this function is to test a new version against the prior version, we gave
% the user the option at the beginning of using the standard paths for old data. If that's not what they
% chose, or an old directory wasn't already given, we need to ask now.
if use_behrpaths
old_dir = behr_paths.SPMatSubdir(test_region);
elseif isempty(old_dir)
old_dir = getdir('You''ll need to choose the directory with the old OMI_SP files', test_dates);
end
successes = true(size(test_dates));
for i=1:numel(test_dates)
if DEBUG_LEVEL > 0
fprintf(fid, '\n');
end
filepat = sp_savename(test_dates{i}, test_region, '.mat', true);
try
[old_data, old_file] = load_by_glob(fullfile(old_dir, filepat));
[new_data, new_file] = load_by_glob(fullfile(new_dir, filepat));
catch err
if strcmp(err.identifier, 'load_by_glob:file_not_found')
if ismember(test_dates{i}, test_dates_no_data)
if DEBUG_LEVEL > 0
fprintf(fid, 'No data for %s as expected\n', test_dates{i});
end
else
if DEBUG_LEVEL > 0
fprintf(fid, 'FAIL: No data produced for %s!!!\n', test_dates{i});
end
successes(i) = false;
end
continue
else
rethrow(err);
end
end
if DEBUG_LEVEL > 0
fprintf(fid, '\nChecking %s\n', test_dates{i});
fprintf(fid, 'Loaded old file: %s\n', old_file{1});
fprintf(fid, 'Loaded new file: %s\n', new_file{1});
end
if DEBUG_LEVEL > 0
header_msg = '***** Running priori tests on data read in ****';
header_border = repmat('*', 1, length(header_msg));
fprintf(fid, '\n%1$s\n%2$s\n%1$s\n', header_border, header_msg);
end
successes(i) = reading_priori_tests(new_data.Data, DEBUG_LEVEL, fid) && successes(i);
if DEBUG_LEVEL > 0
header_msg = '***** Running reading unit tests, comparing to previous data ****';
header_border = repmat('*', 1, length(header_msg));
fprintf(fid, '\n%1$s\n%2$s\n%1$s\n', header_border, header_msg);
end
successes(i) = behr_unit_test(new_data.Data, old_data.Data, DEBUG_LEVEL, fid, fields_to_ignore) && successes(i);
end
end
%%%%%%%%%%%%%%%%%%%
% BEHR MAIN TESTS %
%%%%%%%%%%%%%%%%%%%
function successes = test_behr_main(prof_mode, old_dir, new_dir, sp_data_dir)
if generate_new_data
new_dir = fullfile(main_root_save_folder, lower(prof_mode));
mkdir(new_dir)
% If sp_data_dir not already given, give the choice of using behr_paths.sp_mat_dir or a user-specified dir
if ~exist('sp_data_dir', 'var')
if ask_yn('Use the paths specified by behr_paths for the SP files to be read into BEHR_main?')
sp_data_dir = behr_paths.SPMatSubdir(test_region);
else
sp_data_dir = getdir('You''ll need to choose the directory with existing OMI_SP files', test_dates);
end
end
for i=1:numel(test_dates)
if strcmpi(prof_mode, 'daily') && ~can_do_daily(test_dates{i})
continue
end
BEHR_main('start', test_dates{i}, 'end', test_dates{i}, 'behr_mat_dir', new_dir, 'sp_mat_dir', sp_data_dir, 'profile_mode', prof_mode, 'overwrite', true);
end
else
% If we're not generating data, then check if new_dir is not empty (i.e. already given)
if isempty(new_dir)
new_dir = getdir(sprintf('You''ll need to choose the directory with the new %s OMI_BEHR files', prof_mode), test_dates);
end
end
% Since a common use case of this function is to test a new version against the prior version, we gave
% the user the option at the beginning of using the standard paths for old data. If that's not what they
% chose, or an old directory wasn't already given, we need to ask now.
if use_behrpaths
old_dir = behr_paths.BEHRMatSubdir(test_region, prof_mode);
elseif isempty(old_dir)
old_dir = getdir(sprintf('You''ll need to choose the directory with the old %s OMI_BEHR files', prof_mode), test_dates);
end
successes_data = true(size(test_dates));
successes_grid = true(size(test_dates));
for i=1:numel(test_dates)
if strcmpi(prof_mode, 'daily') && ~can_do_daily(test_dates{i})
successes_data(i) = true;
successes_grid(i) = true;
continue
end
if DEBUG_LEVEL > 0
fprintf(fid, '\n');
end
filepat = behr_filename(test_dates{i}, prof_mode, test_region, '.mat', true);
try
[old_data, old_file] = load_by_glob(fullfile(old_dir, filepat));
[new_data, new_file] = load_by_glob(fullfile(new_dir, filepat));
catch err
if strcmp(err.identifier, 'load_by_glob:file_not_found')
if ismember(test_dates{i}, test_dates_no_data)
if DEBUG_LEVEL > 0
fprintf(fid, 'No data for %s as expected\n', test_dates{i});
end
else
if DEBUG_LEVEL > 0
fprintf(fid, 'FAIL: No data produced for %s!!!\n', test_dates{i});
end
successes_data(i) = false;
successes_grid(i) = false;
end
continue
else
rethrow(err);
end
end
if DEBUG_LEVEL > 0
fprintf(fid, '\nChecking %s\n', test_dates{i});
fprintf(fid, 'Loaded old file: %s\n', old_file{1});
fprintf(fid, 'Loaded new file: %s\n', new_file{1});
end
% Only the native pixel struct has all of the necessary fields
% to assess the accuracy of the quality flags. We must assume
% that the gridding algorithm does its job properly... at least
% until we write a unit test for that.
if DEBUG_LEVEL > 0
header_msg = '***** Running priori tests on result of main algorithm, Data struct ****';
header_border = repmat('*', 1, length(header_msg));
fprintf(fid, '\n%1$s\n%2$s\n%1$s\n', header_border, header_msg);
end
successes_data(i) = main_priori_tests(new_data.Data, DEBUG_LEVEL, fid) && successes_data(i);
if DEBUG_LEVEL > 0
header_msg = '***** Running BEHR_main unit tests on Data struct ****';
header_border = repmat('*', 1, length(header_msg));
fprintf(fid, '\n%1$s\n%2$s\n%1$s\n', header_border, header_msg);
end
successes_data(i) = behr_unit_test(new_data.Data, old_data.Data, DEBUG_LEVEL, fid, fields_to_ignore) && successes_data(i);
if DEBUG_LEVEL > 0
header_msg = '***** Running BEHR_main unit tests on OMI struct ****';
header_border = repmat('*', 1, length(header_msg));
fprintf(fid, '\n%1$s\n%2$s\n%1$s\n', header_border, header_msg);
end
successes_grid(i) = behr_unit_test(new_data.OMI, old_data.OMI, DEBUG_LEVEL, fid, fields_to_ignore) && successes_grid(i);
end
successes = successes_data & successes_grid;
end
%%%%%%%%%%%%%%%%%%%%
% PUBLISHING TESTS %
%%%%%%%%%%%%%%%%%%%%
function successes = test_publishing(prof_mode, old_native_dir, old_gridded_dir, new_native_dir, new_gridded_dir, behr_data_dir)
if generate_new_data
% If we're generating new data, then our new file directories will always be in the save folder
% (where we generate the new data)
new_native_dir = fullfile(pub_root_save_folder, 'native_hdf', lower(prof_mode));
new_gridded_dir = fullfile(pub_root_save_folder, 'gridded_hdf', lower(prof_mode));
if ~exist('behr_data_dir', 'var')
if ask_yn('Use the paths specified by behr_paths for the BEHR files to be read into BEHR_publishing_main?')
behr_data_dir = behr_paths.BEHRMatSubdir(test_region, prof_mode);
else
behr_data_dir = getdir(sprintf('You''ll need to choose the directory with existing %s OMI_BEHR files', prof_mode), test_dates);
end
end
if ~exist(new_native_dir, 'dir')
mkdir(new_native_dir);
end
if ~exist(new_gridded_dir, 'dir')
mkdir(new_gridded_dir)
end
for i=1:numel(test_dates)
if strcmpi(prof_mode, 'daily') && ~can_do_daily(test_dates{i})
continue
end
BEHR_publishing_main('start', test_dates{i}, 'end', test_dates{i}, 'output_type', 'hdf', 'pixel_type', 'native', 'mat_dir', behr_data_dir, 'save_dir', new_native_dir,...
'organize', false, 'overwrite', true);
BEHR_publishing_main('start', test_dates{i}, 'end', test_dates{i}, 'output_type', 'hdf', 'pixel_type', 'gridded', 'mat_dir', behr_data_dir, 'save_dir', new_gridded_dir,...
'organize', false, 'overwrite', true);
end
else
% Otherwise, this may be already given
if isempty(new_native_dir)
new_native_dir = getdir(sprintf('You''ll need to choose the directory containing the new %s native HDF files', prof_mode), test_dates);
end
if isempty(new_gridded_dir)
new_gridded_dir = getdir(sprintf('You''ll need to choose the directory containing the new %s gridded HDF files', prof_mode), test_dates);
end
end
% Since a common use case of this function is to test a new version against the prior version, we gave
% the user the option at the beginning of using the standard paths for old data. If that's not what they
% chose, or an old directory wasn't already given, we need to ask now.
if use_behrpaths
old_root_dir = behr_paths.website_staging_dir;
% Assume that the staging directory is adjacent to the webData
% directory where the files are actually moved to show up on
% the website
old_native_dir = fullfile(old_root_dir, '..', 'webData', sprintf('behr_%s_hdf', lower(prof_mode)));
old_gridded_dir = fullfile(old_root_dir, '..', 'webData', sprintf('behr_%s_regridded_hdf', lower(prof_mode)));
else
if isempty(old_native_dir)
old_native_dir = getdir(sprintf('You''ll need to choose the directory with the old %s native pixel HDF files', prof_mode), test_dates);
end
if isempty(old_gridded_dir)
old_gridded_dir = getdir(sprintf('You''ll need to choose the directory with the old %s gridded HDF files', prof_mode), test_dates);
end
end
successes_native = test_publishing_subfunc(prof_mode, old_native_dir, new_native_dir);
successes_grid = test_publishing_subfunc(prof_mode, old_gridded_dir, new_gridded_dir);
successes = successes_native & successes_grid;
end
function successes = test_publishing_subfunc(prof_mode, old_dir, new_dir)
native_or_gridded = regexp(new_dir, '(native)|(gridded)', 'match', 'once');
successes = false(size(test_dates));
for i=1:numel(test_dates)
if strcmpi(prof_mode, 'daily') && ~can_do_daily(test_dates{i})
continue
end
if DEBUG_LEVEL > 0
fprintf(fid, '\n');
end
filepat = behr_filename(test_dates{i}, prof_mode, test_region, '.hdf', true);
try
[old_data, old_file] = load_hdf_by_glob(fullfile(old_dir, filepat));
[new_data, new_file] = load_hdf_by_glob(fullfile(new_dir, filepat));
catch err
if strcmp(err.identifier, 'load_hdf_by_glob:file_not_found')
if ismember(test_dates{i}, test_dates_no_data)
if DEBUG_LEVEL > 0
fprintf(fid, 'No data for %s as expected\n', test_dates{i});
end
successes(i) = true;
else
if DEBUG_LEVEL > 0
fprintf(fid, 'FAIL: No data produced for %s!!!\n', test_dates{i});
end
end
continue
else
rethrow(err);
end
end
if DEBUG_LEVEL > 0
fprintf(fid, '\nChecking %s\n', test_dates{i});
fprintf(fid, 'Loaded old file: %s\n', old_file);
fprintf(fid, 'Loaded new file: %s\n', new_file);
end
if DEBUG_LEVEL > 0
header_msg = sprintf('***** Running BEHR_publishing unit tests on %s HDFs ****', native_or_gridded);
header_border = repmat('*', 1, length(header_msg));
fprintf(fid, '\n%1$s\n%2$s\n%1$s\n', header_border, header_msg);
end
successes(i) = behr_unit_test(new_data, old_data, DEBUG_LEVEL, fid, fields_to_ignore);
end
end
% NESTED UTILITY FUNCTIONS %
function b = can_do_daily(date_in)
dnums = cellfun(@datenum, daily_profiles_available);
date_check = datenum(date_in) >= dnums(:,1) & datenum(date_in) <= dnums(:,2);
b = any(date_check);
end
end
%%%%%%%%%%%%%%%%
% SUBFUNCTIONS %
%%%%%%%%%%%%%%%%
function s = passfail(b)
if b
s = 'PASS';
else
s = 'FAIL';
end
end
function d = getdir(prompt, test_dates)
if ~isempty(test_dates)
fprintf('%s for the following dates:\n %s\n (press ENTER)\n', prompt, strjoin(test_dates, ', '));
else
fprintf('%s\n (press ENTER)\n', prompt);
end
input('','s'); %wait for the user
E=JLLErrors;
if isDisplay
d = uigetdir;
else
while true
d = input('Enter the directory: ', 's');
if strcmpi(d,'q')
E.userCancel;
elseif exist(d, 'dir')
break
else
fprintf('That directory does not exist.\n')
end
end
end
end
function [Data, file_name] = load_hdf_by_glob(file_pattern)
F = dir(file_pattern);
if numel(F) < 1
error('load_hdf_by_glob:file_not_found', 'No file matching "%s" found', file_pattern);
elseif numel(F) > 1
error('load_hdf_by_glob:too_many_files', 'Multiple files matching "%s" found', file_pattern);
end
hdf_dir = fileparts(file_pattern);
file_name = fullfile(hdf_dir, F(1).name);
hdfi = h5info(file_name);
Data = behrhdf2struct(hdfi);
end
function remove_contents(directory)
F = dir(fullfile(directory, '*'));
for i=1:numel(F)
if ~strcmp(F(i).name, '.') && ~strcmp(F(i).name, '..')
if F(i).isdir
remove_contents(fullfile(directory, F(i).name));
rmdir(fullfile(directory, F(i).name));
else
delete(fullfile(directory, F(i).name));
end
end
end
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
prod_test_load_hdf.m
|
.m
|
BEHR-core-master/Production tests/prod_test_load_hdf.m
| 2,939 |
utf_8
|
389542dd60ee66619d6623b673b18072
|
function [ D_new, fill_vals ] = prod_test_load_hdf( newfile, fields_to_check )
%[ D_NEW, D_OLD ] = PROD_TEST_LOAD_HDF( NEWFILE, FIELDS_TO_CHECK )
% This function will load a BEHR HDF file and return the data in
% structures like those used in the .mat files. NEWFILE and OLDFILE must
% be strings pointing to the new and old files respectively.
% FIELDS_TO_CHECK must be a cell array of fields in the HDF file to load.
% This returns two structures D_NEW and D_OLD which will have each swath
% as a top-level index and each field in that swath. FILL_VALS is a
% vector of fill values read from the HDF attributes. It will have the
% same order as FIELDS_TO_CHECK along the first dimension, the second
% dimension will be for new and old. E.g., if you request the fields
% AMFTrop, AMFStrat, and BEHRAMFTrop then FILL_VALS will have the form:
%
% [ Fill AMFTrop (new), Fill AMFTrop (old);
% Fill AMFStrat (new), Fill AMFStrat (old);
% Fill BEHRAMFTrop (new), Fill BEHRAMFTrop (old) ]
%%%%% INPUT CHECKING %%%%%
E = JLLErrors;
if ~ischar(newfile)
E.badinput('NEWFILE must be a path given as a string')
elseif ~exist(newfile, 'file')
E.badinput('NEWFILE (%s) does not exist.',newfile);
end
if ~iscellstr(fields_to_check)
E.badinput('FIELDS_TO_CHECK must be a cell array of strings')
end
%%%%% MAIN FUNCTION %%%%%
hi_new = h5info(newfile);
[D_new, fill_vals] = read_hdf(hi_new, fields_to_check);
end
function [D, fills] = read_hdf(hi, fields_to_check)
% Utility function that actually does the reading.
E=JLLErrors;
nswaths = numel(hi.Groups(1).Groups);
D = make_empty_struct_from_cell(fields_to_check);
D = repmat(D, nswaths, 1);
for a=1:nswaths
% Read in each field in each swath. If the read throws an error, see if
% it's because the field doesn't exist. If so, rephrase the error more
% succinctly.
for b=1:numel(fields_to_check)
try
D(a).(fields_to_check{b}) = h5read(hi.Filename, h5dsetname(hi,1,a,fields_to_check{b}));
catch err
if strcmp(err.identifier,'MATLAB:imagesci:h5read:libraryError')
dsets = {hi.Groups(1).Groups(a).Datasets.Name};
if ~ismember(fields_to_check{b}, dsets)
E.badinput('The field %s is not present in the file %s',fields_to_check{b},hi.Filename);
else
rethrow(err);
end
else
rethrow(err)
end
end
end
end
% Assume that the fill value is the same in every swath
fills = nan(numel(fields_to_check),1);
for a=1:numel(fields_to_check)
dsets = {hi.Groups(1).Groups(1).Datasets.Name};
xx = strcmp(dsets, fields_to_check{a});
if sum(xx) < 1
E.callError('find_fill','Could not find the field %s in %s',fields_to_check{a},hi.Filename);
end
fills(a) = hi.Groups(1).Groups(1).Datasets(xx).FillValue;
end
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
behr_prod_test.m
|
.m
|
BEHR-core-master/Production tests/behr_prod_test.m
| 21,793 |
utf_8
|
26e1d6d8f368c0fde51a20e3fbbd8098
|
function [ indiv_stats, overall_stats ] = behr_prod_test( varargin )
%[ INDIV_STATS, OVERALL_STATS] = BEHR_PROD_TEST()
% Tests a sample of OMI_BEHR files for differences. Whenever making a new
% version of BEHR, it's good to do some basic checking to make sure that
% the differences are what you expect. This function will choose a random
% selection of BEHR files to compare and report on individual, day-by-day
% stats and the overall differences. This version uses the hard coded
% directories and file patterns.
%
%[ INDIV_STATS, OVERALL_STATS] = BEHR_PROD_TEST( NEW_DIR, NEW_PATTERN, OLD_DIR, OLD_PATTERN )
% This version looks for new files matching the glob pattern NEW_PATTERN
% in NEW_DIR and old files in OLD_DIR matching OLD_PATTERN.
%
% Additional parameters:
%
% 'nfiles' - how many files to load and test. Default is 100.
%
% 'checkvar' - which variable in .mat files to test. Must be the string
% 'Data' or 'OMI'. Default is 'Data', but has no effect if not loading
% .mat files.
%
% 'fields' - a cell array of string indicating which fields in the files
% to check. Default is {'BEHRColumnAmountNO2Trop', 'BEHRAMFTrop',
% 'BEHRColumnAmountNO2TropVisOnly', 'BEHRAMFTropVisOnly'}.
%
% 'start', 'end' - start and end dates of the period to draw the data
% from. Must be either a date number or a date string that Matlab
% recognizes automatically. Default is 2005-01-01 to today.
% Enter the directory and file name pattern for the new and old versions.
% The pattern must be a string that, when used in dir(), uniquely returns
% only BEHR files of the desired version.
%%%% USER OPTIONS %%%%
p = inputParser;
p.addOptional('new_dir', '.', @ischar);
p.addOptional('new_pattern', 'OMI_BEHR*', @ischar);
p.addOptional('old_dir', behr_paths.behr_mat_dir, @ischar);
p.addOptional('old_pattern', 'OMI_BEHR*', @ischar);
p.addParameter('nfiles', 100);
p.addParameter('checkvar', 'Data');
p.addParameter('fields', {'BEHRColumnAmountNO2Trop','BEHRAMFTrop','BEHRColumnAmountNO2TropVisOnly','BEHRAMFTropVisOnly'});
p.addParameter('start', '2005-01-01');
p.addParameter('end', today);
p.parse(varargin{:});
pout = p.Results;
new_dir = pout.new_dir;
new_pattern = pout.new_pattern;
old_dir = pout.old_dir;
old_pattern = pout.old_pattern;
n_files = pout.nfiles;
checkvar = pout.checkvar;
fields_to_check = pout.fields;
start_date = pout.start;
end_date = pout.end;
% Validation
if ~exist(new_dir, 'dir')
E.badinput('new_dir "%s" does not exist', new_dir);
elseif ~exist(old_dir, 'dir')
E.badinput('old_dir "%s" does not exist', old_dir);
end
if ~ischar(new_pattern)
E.badinput('NEW_PATTERN must be a string')
elseif ~ischar(old_pattern)
E.badinput('OLD_PATTERN must be a string')
end
if ~isnumeric(n_files) || ~isscalar(n_files) || n_files < 1 || mod(n_files, 1) ~= 0
E.badinput('The value for "nfiles" must be a scalar, positive, whole number')
end
if ~ismember(checkvar, {'Data', 'OMI'})
E.badinput('The value for "checkvar" must be the string "Data" or "OMI"');
end
if ~iscellstr(fields_to_check)
E.badinput('The value for "fields" must be a cell array of strings');
end
validate_date(start_date);
validate_date(end_date);
%%%% END USER OPTIONS %%%%
F_new = dir(fullfile(new_dir,new_pattern));
F_new = cut_down_by_date(F_new, start_date, end_date);
F_old = dir(fullfile(old_dir,old_pattern));
F_old = cut_down_by_date(F_old, start_date, end_date);
n_files = min([n_files, numel(F_new), numel(F_old)]);
[~,~,fileext_new] = fileparts(F_new(1).name);
[~,~,fileext_old] = fileparts(F_old(1).name);
dnums_new = get_file_datenums(F_new);
dnums_old = get_file_datenums(F_old);
diff_struct = struct('num_dif_vals',0,'mean_difference',0,'mean_absolute_difference',0,'mean_percent_difference',0,'mean_absolute_percent_difference',0,...
'median_difference',0,'median_absolute_difference',0,'median_percent_difference',0,'median_absolute_percent_difference',0,'differences',[],...
'percent_differences',[],'value_pairs',[]);
mat_hdf_comp_bool = xor(strcmpi(fileext_new,'.mat'), strcmpi(fileext_old, '.mat'));
if mat_hdf_comp_bool;
% If comparing a .mat and a .hdf file, we will do a different
% comparison of fill values because the .hdf will have fill values
% where the .mat has NaNs in some cases
fills_struct = struct('num_new_nans_or_fills', 0, 'values_that_became_nans_or_fills', [],...
'num_old_nans_or_fills', 0, 'values_that_replaced_nans_or_fills', []);
else
fills_struct = struct('num_new_nans', 0, 'values_that_became_nans', [], 'lon_for_became_nans', [], 'lat_for_became_nans', [],...
'num_new_fills', 0, 'values_that_became_fills', [], 'lon_for_became_fills', [], 'lat_for_became_fills', [],...
'num_old_nans', 0, 'values_that_replaced_nans', [], 'lon_for_replaced_nans', [], 'lat_for_replaced_nans', [],...
'num_old_fills', [], 'values_that_replaced_fills', [], 'lon_for_replaced_fills', [], 'lat_for_replaced_fills', []);
end
%substruct = struct('date','','num_dif_vals',0,'num_new_nans',0,'values_that_became_nans',[],'num_new_fills',0,'values_that_became_fills',[],...
% 'num_old_nans',0,'values_that_replaced_nans',[],'num_old_fills',0,'values_that_replaced_fills',[],'differences',[],'percent_differences',[],...
% 'value_pairs',[],'Longitude',[],'Latitude',[]);
substruct = struct('date', '', 'Longitude', [], 'Latitude', [], 'difference_stats', diff_struct, 'fill_and_nan_changes', fills_struct);
overall_stats = make_empty_struct_from_cell(fields_to_check, rmfield(substruct,'date'));
indiv_stats = make_empty_struct_from_cell(fields_to_check, substruct);
indiv_stats = repmat(indiv_stats, n_files, 1);
if isDisplay
wb = waitbar(0, sprintf('Sampling %d files',n_files));
end
n = 0;
safety = 0;
while n < n_files
safety = safety+1;
% Choose a random new file, make sure there is a corresponding old one,
% if so, load both and check the required fields.
r = ceil(rand * numel(F_new));
rold = dnums_old == dnums_new(r);
if sum(rold) < 1
continue
end
n = n+1;
if isDisplay
waitbar(n/n_files);
end
% Determine the file type from the extension, that will determine how
% it is loaded and if fill values are checked.
fill_vals = nan(numel(fields_to_check)+2, 2);
[D_new.Data, fill_vals(:,1)] = load_data(fullfile(new_dir, F_new(r).name), [fields_to_check, {'Longitude', 'Latitude'}], checkvar);
[D_old.Data, fill_vals(:,2)] = load_data(fullfile(old_dir, F_old(rold).name), [fields_to_check, {'Longitude', 'Latitude'}], checkvar);
lon = cat_sat_data(D_new.Data,'Longitude');
lat = cat_sat_data(D_new.Data,'Latitude');
for a = 1:numel(fields_to_check)
data_new = cat_sat_data(D_new.Data,fields_to_check{a}, 'vector', true);
data_old = cat_sat_data(D_old.Data,fields_to_check{a}, 'vector', true);
num_neq = sum(data_new(:) ~= data_old(:));
if mat_hdf_comp_bool
is_fill_or_nan_new = isnan(data_new(:)) | data_new(:) == fill_vals(a,1);
is_fill_or_nan_old = isnan(data_old(:)) | data_old(:) == fill_vals(a,2);
xx_new_nans_fills = is_fill_or_nan_new & ~is_fill_or_nan_old;
num_new_nans_fills = sum(xx_new_nans_fills);
values_now_nans_fills = data_old(xx_new_nans_fills);
xx_old_nans_fills = ~is_fill_or_nan_new & is_fill_or_nan_old;
num_old_nans_fills = sum(xx_old_nans_fills);
values_replaced_nans_fills = data_new(xx_old_nans_fills);
xx_good = ~(is_fill_or_nan_new | is_fill_or_nan_old);
else
xx_newnans = isnan(data_new(:)) & ~isnan(data_old(:));
num_new_nans = sum(xx_newnans);
values_now_nans = data_old(xx_newnans);
lon_for_now_nans = lon(xx_newnans);
lat_for_now_nans = lat(xx_newnans);
if isnan(fill_vals(a,1))
% Cannot test for fill value of NaN using == b/c nan == nan
% returns false.
is_new_fill = isnan(data_new(:));
num_new_fills = num_new_nans;
values_now_fills = values_now_nans;
lon_for_now_fills = lon_for_now_nans;
lat_for_now_fills = lat_for_now_nans;
else
is_new_fill = data_new(:) == fill_vals(a,1);
xx_newfills = data_new(:) == fill_vals(a,1) & data_old(:) ~= fill_vals(a,2);
num_new_fills = sum(xx_newfills);
values_now_fills = data_old(xx_newfills);
lon_for_now_fills = lon(xx_newfills);
lat_for_now_nans = lat(xx_newfills);
end
xx_oldnans = ~isnan(data_new(:)) & isnan(data_old(:));
num_old_nans = sum(xx_oldnans);
values_replaced_nans = data_new(xx_oldnans);
lon_for_rep_nans = lon(xx_oldnans);
lat_for_rep_nans = lat(xx_oldnans);
if isnan(fill_vals(a,2))
is_old_fill = isnan(data_old(:));
num_old_fills = num_old_nans;
values_replaced_fills = values_replaced_nans;
lon_for_rep_fills = lon_for_rep_nans;
lat_for_rep_fills = lat_for_rep_nans;
else
is_old_fill = data_old(:) == fill_vals(a,2);
xx_oldfills = data_new(:) ~= fill_vals(a,1) & data_old(:) == fill_vals(a,2);
num_old_fills = sum(xx_oldfills);
values_replaced_fills = data_new(xx_oldfills);
lon_for_rep_fills = lon(xx_oldfills);
lat_for_rep_fills = lat(xx_oldfills);
end
xx_good = ~(is_new_fill | is_old_fill);
end
del = data_new(xx_good) - data_old(xx_good);
perdel = reldiff(data_new(xx_good), data_old(xx_good))*100;
mean_diff = nanmean(del);
mean_absdiff = nanmean(abs(del));
mean_perdiff = nanmean(perdel);
mean_absperdiff = nanmean(abs(perdel));
median_diff = nanmedian(del);
median_absdiff = nanmedian(abs(del));
median_perdiff = nanmedian(perdel);
median_abs_perdiff = nanmedian(abs(perdel));
indiv_stats(n).(fields_to_check{a}).date = datestr(dnums_new(r),'yyyy-mm-dd');
indiv_stats(n).(fields_to_check{a}).Longitude = lon(xx_good);
indiv_stats(n).(fields_to_check{a}).Latitude = lat(xx_good);
indiv_stats(n).(fields_to_check{a}).difference_stats.num_dif_vals = num_neq;
indiv_stats(n).(fields_to_check{a}).difference_stats.mean_difference = mean_diff;
indiv_stats(n).(fields_to_check{a}).difference_stats.mean_absolute_difference = mean_absdiff;
indiv_stats(n).(fields_to_check{a}).difference_stats.mean_percent_difference = mean_perdiff;
indiv_stats(n).(fields_to_check{a}).difference_stats.mean_absolute_percent_difference = mean_absperdiff;
indiv_stats(n).(fields_to_check{a}).difference_stats.median_difference = median_diff;
indiv_stats(n).(fields_to_check{a}).difference_stats.median_absolute_difference = median_absdiff;
indiv_stats(n).(fields_to_check{a}).difference_stats.median_percent_difference = median_perdiff;
indiv_stats(n).(fields_to_check{a}).difference_stats.median_absolute_percent_difference = median_abs_perdiff;
indiv_stats(n).(fields_to_check{a}).difference_stats.differences = del;
indiv_stats(n).(fields_to_check{a}).difference_stats.percent_differences = perdel;
indiv_stats(n).(fields_to_check{a}).difference_stats.value_pairs = [data_new(xx_good), data_old(xx_good)];
if mat_hdf_comp_bool
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.num_new_nans_or_fills = num_new_nans_fills;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.values_that_became_nans_or_fills = values_now_nans_fills;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.num_old_nans_or_fills = num_old_nans_fills;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.values_that_replaced_nans_or_fills = values_replaced_nans_fills;
else
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.num_new_nans = num_new_nans;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.values_that_became_nans = values_now_nans;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.lon_for_became_nans = lon_for_now_nans;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.lat_for_became_nans = lat_for_now_nans;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.num_new_fills = num_new_fills;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.values_that_became_fills = values_now_fills;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.lon_for_became_fills = lon_for_now_fills;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.lat_for_became_fills = lat_for_now_fills;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.num_old_nans = num_old_nans;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.values_that_replaced_nans = values_replaced_nans;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.lon_for_replaced_nans = lon_for_rep_nans;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.lat_for_replaced_nans = lat_for_rep_nans;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.num_old_fills = num_old_fills;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.values_that_replaced_fills = values_replaced_fills;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.lon_for_replaced_fills = lon_for_rep_fills;
indiv_stats(n).(fields_to_check{a}).fill_and_nan_changes.lat_for_replaced_fills = lat_for_rep_fills;
end
overall_stats.(fields_to_check{a}).Longitude = cat(1, overall_stats.(fields_to_check{a}).Longitude, lon(xx_good));
overall_stats.(fields_to_check{a}).Latitude = cat(1, overall_stats.(fields_to_check{a}).Latitude, lat(xx_good));
overall_stats.(fields_to_check{a}).difference_stats.num_dif_vals = overall_stats.(fields_to_check{a}).difference_stats.num_dif_vals + num_neq;
overall_stats.(fields_to_check{a}).difference_stats.differences = cat(1, overall_stats.(fields_to_check{a}).difference_stats.differences, del);
overall_stats.(fields_to_check{a}).difference_stats.percent_differences = cat(1, overall_stats.(fields_to_check{a}).difference_stats.percent_differences, perdel);
overall_stats.(fields_to_check{a}).difference_stats.value_pairs = cat(1, overall_stats.(fields_to_check{a}).difference_stats.value_pairs, [data_new(xx_good), data_old(xx_good)]);
if mat_hdf_comp_bool
overall_stats.(fields_to_check{a}).fill_and_nan_changes.num_new_nans_or_fills = overall_stats.(fields_to_check{a}).fill_and_nan_changes.num_new_nans_or_fills + num_new_nans_fills;
overall_stats.(fields_to_check{a}).fill_and_nan_changes.values_that_became_nans_or_fills = cat(1, overall_stats.(fields_to_check{a}).fill_and_nan_changes.values_that_became_nans_or_fills, values_now_nans_fills);
overall_stats.(fields_to_check{a}).fill_and_nan_changes.num_old_nans_or_fills = overall_stats.(fields_to_check{a}).fill_and_nan_changes.num_old_nans_or_fills + num_old_nans_fills;
overall_stats.(fields_to_check{a}).fill_and_nan_changes.values_that_replaced_nans_or_fills = cat(1, overall_stats.(fields_to_check{a}).fill_and_nan_changes.values_that_replaced_nans_or_fills, values_replaced_nans_fills);
else
overall_stats.(fields_to_check{a}).fill_and_nan_changes.num_new_nans = overall_stats.(fields_to_check{a}).fill_and_nan_changes.num_new_nans + num_new_nans;
overall_stats.(fields_to_check{a}).fill_and_nan_changes.values_that_became_nans = cat(1, overall_stats.(fields_to_check{a}).fill_and_nan_changes.values_that_became_nans, values_now_nans);
overall_stats.(fields_to_check{a}).fill_and_nan_changes.lon_for_became_nans = cat(1, overall_stats.(fields_to_check{a}).fill_and_nan_changes.lon_for_became_nans, lon_for_now_nans);
overall_stats.(fields_to_check{a}).fill_and_nan_changes.lat_for_became_nans = cat(1, overall_stats.(fields_to_check{a}).fill_and_nan_changes.lat_for_became_nans, lat_for_now_nans);
overall_stats.(fields_to_check{a}).fill_and_nan_changes.num_new_fills = overall_stats.(fields_to_check{a}).fill_and_nan_changes.num_new_fills + num_new_fills;
overall_stats.(fields_to_check{a}).fill_and_nan_changes.values_that_became_fills = cat(1, overall_stats.(fields_to_check{a}).fill_and_nan_changes.values_that_became_fills, values_now_fills);
overall_stats.(fields_to_check{a}).fill_and_nan_changes.lon_for_became_fills = cat(1, overall_stats.(fields_to_check{a}).fill_and_nan_changes.lon_for_became_fills, lon_for_now_fills);
overall_stats.(fields_to_check{a}).fill_and_nan_changes.lat_for_became_fills = cat(1, overall_stats.(fields_to_check{a}).fill_and_nan_changes.lat_for_became_fills, lat_for_now_fills);
overall_stats.(fields_to_check{a}).fill_and_nan_changes.num_old_nans = overall_stats.(fields_to_check{a}).fill_and_nan_changes.num_old_nans + num_old_nans;
overall_stats.(fields_to_check{a}).fill_and_nan_changes.values_that_replaced_nans = cat(1, overall_stats.(fields_to_check{a}).fill_and_nan_changes.values_that_replaced_nans, values_replaced_nans);
overall_stats.(fields_to_check{a}).fill_and_nan_changes.lon_for_replaced_nans = cat(1, overall_stats.(fields_to_check{a}).fill_and_nan_changes.lon_for_replaced_nans, lon_for_rep_nans);
overall_stats.(fields_to_check{a}).fill_and_nan_changes.lat_for_replaced_nans = cat(1, overall_stats.(fields_to_check{a}).fill_and_nan_changes.lat_for_replaced_nans, lat_for_rep_nans);
overall_stats.(fields_to_check{a}).fill_and_nan_changes.num_old_fills = overall_stats.(fields_to_check{a}).fill_and_nan_changes.num_old_fills + num_old_fills;
overall_stats.(fields_to_check{a}).fill_and_nan_changes.values_that_replaced_fills = cat(1, overall_stats.(fields_to_check{a}).fill_and_nan_changes.values_that_replaced_fills, values_replaced_fills);
overall_stats.(fields_to_check{a}).fill_and_nan_changes.lon_for_replaced_fills = cat(1, overall_stats.(fields_to_check{a}).fill_and_nan_changes.lon_for_replaced_fills, lon_for_rep_fills);
overall_stats.(fields_to_check{a}).fill_and_nan_changes.lat_for_replaced_fills = cat(1, overall_stats.(fields_to_check{a}).fill_and_nan_changes.lat_for_replaced_fills, lat_for_rep_fills);
end
end
if safety > 10*n_files;
warning('Loop has executed more than 10x the number of requested files, exiting via safety condition');
break
end
F_new(r) = [];
F_old(rold) = [];
end
for a=1:numel(fields_to_check)
% Overall difference states computed at the end
all_del = overall_stats.(fields_to_check{a}).difference_stats.differences;
all_perdel = overall_stats.(fields_to_check{a}).difference_stats.percent_differences;
overall_stats.(fields_to_check{a}).difference_stats.mean_difference = nanmean(all_del);
overall_stats.(fields_to_check{a}).difference_stats.mean_absolute_difference = nanmean(abs(all_del));
overall_stats.(fields_to_check{a}).difference_stats.mean_percent_difference = nanmean(all_perdel);
overall_stats.(fields_to_check{a}).difference_stats.mean_absolute_percent_difference = nanmean(abs(all_perdel));
overall_stats.(fields_to_check{a}).difference_stats.median_difference = nanmedian(all_del);
overall_stats.(fields_to_check{a}).difference_stats.median_absolute_difference = nanmedian(abs(all_del));
overall_stats.(fields_to_check{a}).difference_stats.median_percent_difference = nanmedian(all_perdel);
overall_stats.(fields_to_check{a}).difference_stats.median_absolute_percent_difference = nanmedian(abs(all_perdel));
end
if isDisplay
close(wb)
end
% Put variables in base workspace if no outputs to the function, and plot
% overall histograms
if nargout < 1
putvar(indiv_stats,overall_stats);
end
for a=1:numel(fields_to_check)
figure; hist(overall_stats.(fields_to_check{a}).difference_stats.differences, 50);
title(sprintf('Differences in %s',fields_to_check{a}));
figure; hist(overall_stats.(fields_to_check{a}).difference_stats.percent_differences, 50);
title(sprintf('Percent differences in %s',fields_to_check{a}));
end
end
function dnums = get_file_datenums(F)
dnums = nan(size(F));
for a=1:numel(F)
[s,e] = regexp(F(a).name,'\d\d\d\d\d\d\d\d');
dnums(a) = datenum(F(a).name(s:e),'yyyymmdd');
end
end
function F = cut_down_by_date(F, start_date, end_date)
sdnum = datenum(start_date);
ednum = datenum(end_date);
fdates = get_file_datenums(F);
xx = fdates >= sdnum & fdates <= ednum;
F = F(xx);
end
function [Data, fill_vals] = load_data(filename, fields, varname)
[~,~,fileext] = fileparts(filename);
if strcmp(fileext,'.mat')
D = load(filename, varname);
Data = D.(varname);
fill_vals = nan(numel(fields, 1));
elseif strcmp(fileext,'.hdf')
[Data, fill_vals] = prod_test_load_hdf(filename, fields);
elseif strcmp(fileext,'.txt')
[Data, fill_vals] = prod_test_load_txt(filename, fields);
else
E.notimplemented('The ability to check %s files has not been implemented',fileext)
end
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
reading_priori_tests.m
|
.m
|
BEHR-core-master/Production tests/SubTests/reading_priori_tests.m
| 7,747 |
utf_8
|
95a338f40c0e0d0b250be423875e7725
|
function [ success ] = reading_priori_tests( data, DEBUG_LEVEL, fid )
%READING_PRIORI_TESTS Sanity check of data imported from NASA SP 2, etc.
% Detailed explanation goes here
if ~exist('DEBUG_LEVEL', 'var')
DEBUG_LEVEL = 2;
end
if ~exist('fid', 'var')
% An fid of 1 will make fprint print to the command window as if no fid
% was given
fid = 1;
end
success = true;
success = check_pixel_corners(data) && success;
function passfail = check_pixel_corners(data)
% Verify that:
% 1) If a pixel center is not NaN, all it's corners are not NaN
% as well
% 2) That the pixel corners are not arranged in such a way that a
% border crosses (i.e. are clockwise or counter-clockwise)
% 3) That a pixel center falls within the border defined by the
% corners
if DEBUG_LEVEL > 0
fprintf(fid, 'Checking validity of corner fields...\n');
end
passfail_swaths = true(size(data));
corner_fields = {'FoV75CornerLongitude', 'FoV75CornerLatitude';...
'TiledCornerLongitude', 'TiledCornerLatitude'};
for a=1:numel(data)
this_data = data(a);
for b=1:size(corner_fields,1)
lon = this_data.Longitude;
lat = this_data.Latitude;
loncorn = this_data.(corner_fields{b,1});
latcorn = this_data.(corner_fields{b,2});
failed_nans = false(size(lon));
failed_crossed = false(size(lon));
failed_contained = false(size(lon));
failed_other = false(size(lon));
zero_flag = false;
for c=1:numel(lon);
x = lon(c);
y = lat(c);
xall = loncorn(:,c);
yall = latcorn(:,c);
% Check NaNs
if (any(isnan([x;y])) && ~all(isnan([x;y]))) || ...
(any(isnan([x;y])) && ~all(isnan([xall; yall])))
% NaNs are considered inconsistent under two cases:
% (i) If one but not both of the center
% coordinates are NaNs
% (ii) If the center coordinates are NaNs but the
% corners are not. The reverse is not considered
% inconsistent (as of 10 May 2017) b/c there are
% days when the corner product (for whatever
% reason) is missing values but the main product
% is not.
failed_nans(c) = true;
continue
elseif all(isnan([xall;yall]))
% If all the corners are NaNs, then we cannot (and
% do not need to) do any further tests.
continue
end
% Found some orbits where multiple pixel corners are 0
% for certain rows. For now, assume that these
% correspond to some spacecraft maneuver (e.g. orbits
% 29479-29482, http://projects.knmi.nl/omi/research/calibration/instrument_status_v3/events.html)
% As long as it coincides with a NaN in the total NO2
% column, we will not consider this a failure since NO2
% was not retrieved for that pixel.
if sum(xall == 0) > 1 && sum(yall == 0) > 1
zero_flag = true;
if ~isnan(this_data.ColumnAmountNO2)
failed_other(c) = true;
end
continue
end
% Check crossed
[x_chk, y_chk] = uncross_pix_corners(xall, yall);
if ~isequal(xall, x_chk) || ~isequal(yall, y_chk)
failed_crossed(c) = true;
continue
end
% Check contained
if ~inpolygon(x,y,xall,yall)
failed_contained(c) = true;
continue % this continue not strictly necessary, but would be if expanded the tests
end
end
% Print out the results
field_stem = regexprep(corner_fields{b,1}, 'Longitude|Latitude', '');
this_field_passfail = ~(any(failed_nans(:)) || any(failed_crossed(:)) || any(failed_contained(:)));
if DEBUG_LEVEL > 0
fprintf(fid, ' Swath %d, %s fields: %s\n', a, field_stem, passfail_str(this_field_passfail));
% Print special messages before the swath
if zero_flag
fprintf(fid, [' NOTE: at least one pixel in this swath had multiple corners set to 0.\n'...
' I''ve only seen this happen during special maneuvers of the spacecraft.\n'...
' As long as no pixels fail "for other reasons", that means that the 0 corner\n'...
' pixels have fill values for total NO2 column and so can probably be ignored.\n']);
end
end
if ~this_field_passfail
passfail_swaths(a) = false;
if DEBUG_LEVEL > 1
fprintf(fid, ' %d pixels had inconsistent nans.\n', sum(failed_nans(:)));
if DEBUG_LEVEL > 2
fprintf(fid, ' Indicies are:\n');
fprintf(fid, index_str(failed_nans, ' '));
fprintf(fid, '\n');
end
fprintf(fid, ' %d pixels had crossed corners.\n', sum(failed_crossed(:)));
if DEBUG_LEVEL > 2
fprintf(fid, ' Indices are:\n');
fprintf(fid, index_str(failed_crossed, ' '));
fprintf(fid, '\n');
end
fprintf(fid, ' %d pixels had corners and centers misaligned.\n', sum(failed_contained(:)));
if DEBUG_LEVEL > 2
fprintf(fid, ' Indices are:\n');
fprintf(fid, index_str(failed_contained, ' '));
fprintf(fid, '\n');
end
fprintf(fid, ' %d pixels failed for other reasons.\n', sum(failed_other(:)));
if DEBUG_LEVEL > 2
fprintf(fid, ' Indices are:\n');
fprintf(fid, index_str(failed_other, ' '));
fprintf(fid, '\n');
end
end
end
end
end
passfail = all(passfail_swaths);
end
end
function s = passfail_str(b)
if b
s = 'PASS';
else
s = 'FAIL';
end
end
function s = index_str(failed_bool, beginning_space)
[xi,yi] = find(failed_bool);
ci = cell(1,numel(xi));
for a=1:numel(xi)
ci{a} = mat2str([xi(a), yi(a)]);
end
delim = ['\n', beginning_space];
s = [beginning_space, strjoin(ci, delim), '\n'];
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
main_priori_tests.m
|
.m
|
BEHR-core-master/Production tests/SubTests/main_priori_tests.m
| 4,434 |
utf_8
|
2d0a3d9befade7187276f72061bcaa7a
|
function [ success ] = main_priori_tests( data, DEBUG_LEVEL, fid )
%UNTITLED Summary of this function goes here
% Detailed explanation goes here
if ~exist('DEBUG_LEVEL', 'var')
DEBUG_LEVEL = 2;
end
if ~exist('fid', 'var')
% An fid of 1 will make fprint print to the command window as if no fid
% was given
fid = 1;
end
myname = mfilename();
success = check_quality_flags(data);
function passfail = check_quality_flags(data)
% Verify that:
% * The error summary bit is true everywhere that the pixel
% absolutely should not be used.
% * The quality summary bit is true everywhere the error bit is
% * The quality summary bit is also true everywhere that we
% declare the to-ground column quality will be low
% See each part of the test for exactly what meets these criteria.
if DEBUG_LEVEL > 0
fprintf(fid, 'Checking validity of quality flags...\n');
end
passfail = true;
for a=1:numel(data)
quality_bit = bitand(data(a).BEHRQualityFlags, 1) > 0;
error_bit = bitand(data(a).BEHRQualityFlags, 2) > 0;
% First verify that the error bit is correct. As of v3.0B, this
% should be set anywhere that either of the BEHR AMFs is the
% minimum value or a NaN, anywhere that the VcdQualityFlags
% indicate and error in NASA processing, or anywhere that
% XTrackQualityFlags indicates that we're in the row anomaly. If
% this changes in the future, be sure to update this test to match
% what is expected.
error_check = data(a).BEHRAMFTrop <= behr_min_amf_val | isnan(data(a).BEHRAMFTrop) | data(a).BEHRAMFTropVisOnly <= behr_min_amf_val | isnan(data(a).BEHRAMFTropVisOnly)...
| mod(data(a).VcdQualityFlags, 2) ~= 0 | data(a).XTrackQualityFlags > 0;
error_mismatch = xor(error_bit(:), error_check(:));
% Next verify that the quality bit is true everywhere the
% error bit is. This test should be the same for any future
% versions of BEHR.
qual_error_mismatch = error_bit(:) & ~quality_bit(:);
% Finally, verify that in addition to the error bit, the quality
% fit flags low quality pixels. As of v3.0B, this should include
% pixels with CloudFraction > 0.2 and low quality MODIS data
quality_check = error_bit | data(a).CloudFraction > 0.2 | data(a).MODISAlbedoQuality >= 2.5 | data(a).MODISAlbedoFillFlag;
quality_mismatch = xor(quality_bit(:), quality_check(:));
% Now summarize for this orbit and print out any necessary
% messages (true == pass)
this_passfail = ~any(error_mismatch) && ~any(qual_error_mismatch) && ~any(quality_mismatch);
if ~this_passfail
% If any orbit fails, fail the overall test, but we can't
% just do passfail = this_passfail, since then the overall
% test will just represent the last orbit.
passfail = false;
end
if DEBUG_LEVEL > 0
fprintf(fid, ' Swath %d: %s\n', a, passfail_str(this_passfail));
end
if DEBUG_LEVEL > 1
% Specific reasons
if any(error_mismatch)
fprintf(fid, ' %1$d pixels'' error bits do not agree with what we expect. If you have changed the definition of the error bit in behr_quality_flags but not %2$s, you may need to update %2$s\n', sum(error_mismatch), myname);
end
if any(qual_error_mismatch)
fprintf(fid, ' %d pixels have the error bit but not the quality bit set.\n', sum(qual_error_mismatch));
end
if any(quality_mismatch)
fprintf(fid, ' %1$d pixels'' quality bits do not agree with what we expect. If you have changed the definition of the quality bit in behr_quality_flags but not %2$s, you may need to update %2$s\n', sum(quality_mismatch), myname);
end
end
end
end
end
function s = passfail_str(b)
if b
s = 'PASS';
else
s = 'FAIL';
end
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
read_modis_albedo.m
|
.m
|
BEHR-core-master/Read_Data/read_modis_albedo.m
| 6,483 |
utf_8
|
4a608dc5a89ba07a857c8ae67dcb2687
|
function [ band3data ] = read_modis_albedo( modis_directory, date_in, lonlim, latlim, varargin )
%READ_MODIS_ALBEDO Reads MODIS MCD43C1 BRDF albedo
% DATA = READ_MODIS_ALBEDO( MODIS_DIR, COART_LUT, OCEAN_MASK, DATE_IN, DATA ) Reads
% MODIS MCD43C1 data from MODIS_DIR (which must be the path to the root
% MCD43C1 directory, containing each year in a subfolder). It identifies
% the proper file to read for the DATE_IN (a date string automatically
% understood by Matlab or date number), reads in the BRDF kernel
% coefficients, calculates the kernel values, and combines them to get
% the surface reflectivity. If a pixel is over water (which means that
% more than 50% of the coincident MCD43C1 data are fill values), the
% surface reflectance is deduced from the COART_LUT, the look up table
% struct returned by COART_SEA_REFLECTANCE. This table must be given as
% loading the HTML file is problematic in a parallel loop.
%
% Parameters:
%
% 'DEBUG_LEVEL' - increase the verbosity. Default is 0, higher
% numbers print more information.
%
% 'LoncornField', 'LatcornField' - change which fields in DATA are
% used as the definition of the pixel corners
%
% 'band3data' - supply the days MCD43D Band 3 parameters so that they
% don't have to be read in again.
%
% Important references for MODIS BRDF v006 product:
% V006 User Guide: https://www.umb.edu/spectralmass/terra_aqua_modis/v006
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% INPUT VALIDATION %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%
E = JLLErrors;
p = inputParser;
p.addParameter('DEBUG_LEVEL', 2);
p.parse(varargin{:});
pout = p.Results;
DEBUG_LEVEL = pout.DEBUG_LEVEL;
if ~ischar(modis_directory)
E.badinput('MODIS_DIRECTORY must be a string')
elseif ~exist(modis_directory, 'dir')
E.badinput('MODIS_DIRECTORY is not a directory')
end
if isnumeric(date_in)
if ~isscalar(date_in)
E.badinput('If given as a number, DATE_IN must be scalar')
end
elseif ischar(date_in)
try
datenum(date_in);
catch err
if strcmp(err.identifier, 'MATLAB:datenum:ConvertDateString')
E.badinput('DATE_IN could not be recognized as a valid format for a date string')
else
rethrow(err)
end
end
else
E.badinput('DATE_IN must be a string or number')
end
%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% MAIN FUNCTION %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%
this_year = sprintf('%04d', year(date_in));
alb_dir = fullfile(modis_directory, this_year);
julian_day = modis_date_to_day(date_in);
%To speed up processing, restrict the MODIS albedo data to only the area we
%need to worry about. This will significantly speed up the search for
%albedo values within each pixel, plus speed up loading, since fewer IO
%reads are necessary.
[band3_lons, band3_lats, in_lons, in_lats] = modis_cmg_latlon(1/120, lonlim, latlim);
% Make band3_lats a column vector since the first dim of the modis arrays
% is latitude
band3_lats = band3_lats';
% As of version 6 of MCD43, a 16-day average is produced every day, so
% unlike version 5 of MCD43 where we had to look forward and back in time
% from the current date, we should be able to just pick the file for this
% day.
if DEBUG_LEVEL > 0
fprintf('Reading MODIS BRDF data\n');
end
% Store the four MCD43D files used for traceability
modis_files = cell(1,4);
if DEBUG_LEVEL > 2; fprinf(' Reading band 3 f_iso\n'); end
mcd_filename = sprintf('MCD43D07.A%04d%03d*.hdf', year(date_in), julian_day);
[band3_iso, modis_files{1}] = read_band_parameter(alb_dir, mcd_filename, {in_lats, in_lons});
if DEBUG_LEVEL > 2; fprinf(' Reading band 3 f_vol\n'); end
mcd_filename = sprintf('MCD43D08.A%04d%03d*.hdf', year(date_in), julian_day);
[band3_vol, modis_files{2}] = read_band_parameter(alb_dir, mcd_filename, {in_lats, in_lons});
if DEBUG_LEVEL > 2; fprinf(' Reading band 3 f_geo\n'); end
mcd_filename = sprintf('MCD43D09.A%04d%03d*.hdf', year(date_in), julian_day);
[band3_geo, modis_files{3}] = read_band_parameter(alb_dir, mcd_filename, {in_lats, in_lons});
% Unlike the parameter files, the quality file has all seven bands in one
% file, so we need to handle it differently
mcd_filename = sprintf('MCD43D31.A%04d%03d*.hdf', year(date_in), julian_day);
alb_filename = fullfile(alb_dir, mcd_filename);
alb_files = dir(alb_filename);
if numel(alb_files) < 1
E.filenotfound('MODIS BRDF file matching pattern %s.', alb_filename);
elseif numel(alb_files) > 1
E.toomanyfiles('Multiple MODIS BRDF files found matching pattern %s.', alb_filename);
end
modis_files{4} = fullfile(alb_dir, alb_files(1).name);
mcd43_info = hdfinfo(modis_files{4});
brdf_quality = hdfreadmodis(modis_files{4}, hdfdsetname(mcd43_info,4,1,'BRDF_Albedo_Band_Quality_Band3'), 'log_index', {in_lats, in_lons});
% Verify that fill are the same in the 3 parameters and the quality flags.
% This assumption is used in avg_modis_alb_to_pixels in order to remove
% fill value BRDF coefficients and flag OMI pixels where >50% of the MODIS
% data is fill values.
qual_nans = isnan(brdf_quality(:));
if any(xor(qual_nans, isnan(band3_iso(:)))) || any(xor(qual_nans, isnan(band3_geo(:)))) || any(xor(qual_nans, isnan(band3_vol(:))))
E.callError('inconsistent_fills', 'Fill values are not the same in the quality flags and one or more of the BRDF parameters');
end
band3data.lons = band3_lons;
band3data.lats = band3_lats;
band3data.iso = band3_iso;
band3data.geo = band3_geo;
band3data.vol = band3_vol;
band3data.quality = brdf_quality;
band3data.files = modis_files;
end
function [band3_param, mcd_filename] = read_band_parameter(file_dir, file_pattern, logical_indices)
E = JLLErrors;
alb_filename = fullfile(file_dir, file_pattern);
alb_files = dir(alb_filename);
if numel(alb_files) < 1
E.filenotfound('MODIS BRDF file matching pattern %s.', alb_filename);
elseif numel(alb_files) > 1
E.toomanyfiles('Multiple MODIS BRDF files found matching pattern %s.', alb_filename);
end
% Each MCD43D file has only a single SDS representing a single BRDF
% parameter in a single band.
mcd43_info = hdfinfo(fullfile(file_dir,alb_files(1).name));
if numel(mcd43_info.Vgroup(1).Vgroup(1).SDS) ~= 1
E.callError('mcd43d format', 'READ_BAND_PARAMETER assumes there is only a single SDS in the file; that is not true in %s', mcd43_info.Filename);
end
band3_param = hdfreadmodis(mcd43_info.Filename, hdfdsetname(mcd43_info,1,1,1), 'log_index', logical_indices);
mcd_filename = mcd43_info.Filename;
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
read_omi_sp.m
|
.m
|
BEHR-core-master/Read_Data/read_omi_sp.m
| 12,302 |
utf_8
|
33297ac418993ee7d1f363dadc12aa3c
|
function [ data, there_are_points ] = read_omi_sp( sp_file, sp_group_path, sp_vars, data, lonlim, latlim, varargin )
%READ_OMI_SP Reads in an OMI Standard Product data file
% DATA = READ_OMI_SP( SP_FILE, SP_VARS, DATA ) Reads in a NASA OMI .he5
% (HDF version 5) file at the path SP_FILE. It will read in the variables
% specified in the cell array SP_VARS and store them in the structure
% DATA. DATA must be scalar and must have each variable name given in
% SP_VARS as a field, plus the field Row.
%
% Parameters:
% dim_order - A string indicating how the dimensions should be
% ordered, default is 'olx'. 'x' indicates across track, 'l' along
% track, and 'o' other dimensions.
%
% match_data - will match new data to the existing DATA structure
% based on the longitude and latitude points (specifically, uses
% find_submatrix2 to identify the subset of read in points that match
% to within a tolerance of the existing Longitude and Latitude fields
% in DATA
%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% INPUT CHECKING %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%
E = JLLErrors;
p = inputParser;
p.addParameter('dim_order', 'olx');
p.addParameter('match_data', false);
p.parse(varargin{:});
pout = p.Results;
dim_order = pout.dim_order;
match_data = pout.match_data;
if ~ischar(sp_file)
E.badinput('SP_FILE must be a string')
elseif ~exist(sp_file, 'file')
E.filenotfound('Could not find %s', sp_file)
end
if ~iscellstr(sp_vars)
E.badinput('SP_VARS must be a cell array of strings')
end
if ~ischar(dim_order) || any(~ismember('olx',dim_order)) || length(dim_order) ~= 3
E.badinput('The parameter DIM_ORDER must be a string consisting of the characters o, l, and x only')
end
if ~isscalar(match_data) || (~isnumeric(match_data) && ~islogical(match_data))
E.badinput('The parameter MATCH_DATA must be a scalar boolean or number');
elseif match_data && ((isscalar(data.Longitude) && data.Longitude == 0) || (isscalar(data.Latitude) && data.Latitude ==0))
warning('MATCH_DATA requested but latitude/longitude fields in DATA do not appear to be fully filled (are scalars and == 0)');
end
% By requiring that all fields are already present in the data structure,
% we ensure that the order will remain the same as we add data to it. If we
% added the fields as the variables came up, that might not be true.
if ~isstruct(data) || ~isscalar(data)
E.badinput('DATA must be a scalar structure')
end
xx = ~isfield(data, sp_vars);
if any(xx)
E.badinput('All variable names in SP_VARS must exist as fields in DATA (missing: %s)', strjoin(sp_vars(xx)', ', '));
end
%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% MAIN FUNCTION %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%
hgrp_info = h5info(sp_file, sp_group_path);
%Read in the full latitude data set; this will be used to determine
%which pixels to read in later.
lat = double(h5readomi(sp_file, find_dset(hgrp_info, 'Latitude')));
lat = lat';
lon = double(h5readomi(sp_file, find_dset(hgrp_info, 'Longitude')));
lon = lon';
%Restrict data to that which falls within the bounds specified by the lon
%and lat limits
if ~match_data
xx = lon > lonlim(1) & lon < lonlim(2);
yy = lat > latlim(1) & lat < latlim(2);
else
% NaNs mess up find_submatrix2. So what we need to do is subset out the
% non-NaN components and find the submatrix just in there, then
% translate that back into the full matrix. To do this, we're going to
% assume that if any coordinates are NaNs, that an entire across-track
% row is NaNs, which happens when doing zoom-mode days. If this
% assumption is violated, or if there are differing numbers of NaNs,
% abort.
data_nans = all(isnan(data.Longitude),1);
if any(xor(isnan(data.Longitude(:)), isnan(data.Latitude(:))))
E.callError('nan_mismatch', 'Using match_data = true: data.Longitude and data.Latitude have different NaNs')
elseif ~isequal(any(isnan(data.Longitude),1), data_nans)
E.notimplemented('Using match_data = true: data.Longitude has a row that is only partially NaNs')
end
new_nans = all(isnan(lon),1);
if any(xor(isnan(lon(:)), isnan(lat(:))))
E.callError('nan_mismatch', 'Using match_data = true: lon and lat have different NaNs')
end
if sum(data_nans(:)) ~= sum(new_nans(:))
E.notimplemented('Using match_data = true: the number of NaNs in the longitude in the DATA structure and the file being read are different')
end
[tmp_xx, tmp_yy] = find_submatrix2(data.Longitude(:,~data_nans), data.Latitude(:,~data_nans), lon(:,~new_nans), lat(:,~new_nans));
if isempty(tmp_xx) || isempty(tmp_yy)
E.callError('data_match_failure', 'Failed to find the existing data.Longitude/data.Latitude in the new files'' longitude/latitude.')
end
xx_sub = false(size(lon(:,~new_nans)));
yy_sub = false(size(lon(:,~new_nans)));
xx_sub(tmp_xx,:) = true;
yy_sub(:,tmp_yy) = true;
xx = false(size(lon));
yy = false(size(lon));
xx(:,~new_nans) = xx_sub;
yy(:,~new_nans) = yy_sub;
end
%cut_alongtrack = any(xx & yy, 2);
cut_alongtrack = any(xx,2) & any(yy,2);
cut_acrosstrack = true(1,60); % keep all elements in the across track direction for now
lat = lat(cut_alongtrack, cut_acrosstrack);
lon = lon(cut_alongtrack, cut_acrosstrack); %#ok<NASGU> lon not used, will read in Latitude and Longitude like normal variables if requested. This line here just to be clear that lon, if used, should be cut down.
there_are_points = numel(lat) > 0;
if ~there_are_points
return
end
% Row will keep track of the pixel's location in the across-track
% direction. These indices are 0 based by NASA convention.
if isfield(data,'Row')
Row = find(cut_acrosstrack)-1;
data.Row = repmat(Row, size(lat,1), 1);
end
for a=1:numel(sp_vars)
dset_name = find_dset(hgrp_info, sp_vars{a});
dset_vals = read_hdf5_dataset(sp_file, dset_name, cut_acrosstrack, cut_alongtrack, dim_order);
%dset_vals = read_hdf5_simple(sp_file, dset_name, cut_acrosstrack, cut_alongtrack, dim_order);
data.(sp_vars{a}) = dset_vals;
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% SUBFUNCTIONS %%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%
function dsetname = find_dset(search_group, varname)
% Recursive function that searches top-down through all groups subordinate
% the the input group to find the variable with name VARNAME
E = JLLErrors;
if ~isempty(search_group.Datasets)
dsetname = ''; % default return value
dsets = {search_group.Datasets.Name};
xx = strcmp(dsets, varname);
if any(xx)
dsetname = strcat(search_group.Name, '/', varname);
return
end
else
for a=1:numel(search_group.Groups)
dsetname = find_dset(search_group.Groups(a), varname);
if ~isempty(dsetname)
return
end
end
% We should only get here if we've exhausted all the groups.
E.callError('variable_not_found','Could not find variable %s in %s',varname,filename);
end
end
function pvec = order_to_permvec(order, xtrack_ind, ltrack_ind, n_array_dims)
% Turns a dimension order where 'x' means across track, 'l' along track,
% and 'o' any other dimensions into a vector that can be given to permute
% to reorder an array.
E = JLLErrors;
% If two dimensions in the array, then they should be along and across
% track.
if n_array_dims == 2
order = strrep(order, 'o', '');
elseif n_array_dims > 3
E.notimplemented('n_array_dims > 3');
end
o_ind = strfind(order, 'o');
l_ind = strfind(order, 'l');
x_ind = strfind(order, 'x');
% If one of ltrack_ind and xtrack_ind are empty, that means that we're
% probably dealing with a 1D dataset. If both are empty, that's weird.
pvec = 1:n_array_dims;
if xor(isempty(ltrack_ind), isempty(xtrack_ind))
tmp = 1:n_array_dims;
if isempty(ltrack_ind)
ltrack_ind = tmp(tmp ~= xtrack_ind);
else
xtrack_ind = tmp(tmp ~= ltrack_ind);
end
elseif isempty(ltrack_ind) && isempty(xtrack_ind)
E.notimplemented('ltrack_ind and xtrack_ind are empty')
end
pvec(l_ind) = ltrack_ind;
pvec(x_ind) = xtrack_ind;
if ~isempty(o_ind)
tmp = 1:n_array_dims;
pvec(o_ind) = tmp( tmp ~= ltrack_ind & tmp ~= xtrack_ind );
end
end
% Benchmarking results for reading a single file: using the low level HDF5
% functions offers no appreciable performance increase over the high level
% functions in terms of run time. It does reduce memory usage by 6x in one
% test, but memory usage within both subfunctions was < 1 MB. Will use low
% level functions in case the memory reduction is valuable later (for
% parallelization perhaps)
function vals = read_hdf5_dataset(filename, dsetname, xtrack_cut, ltrack_cut, dim_order)
% Read in individual variables, apply the offset, scale factor, and fill
% value, and convert to double. This uses low-level HDF functions, I should
% benchmark this versus the simple (and more transparent in my opinion)
% approach in read_hdf5_simple
E = JLLErrors;
if ~islogical(xtrack_cut) || ~isvector(xtrack_cut)
E.badinput('XTRACK_CUT must be a logical vector')
end
if ~islogical(ltrack_cut) || ~isvector(ltrack_cut)
E.badinput('LTRACK_CUT must be a logical vector')
end
fileID = H5F.open(filename, 'H5F_ACC_RDONLY', 'H5P_DEFAULT');
datasetID = H5D.open(fileID, dsetname);
dataspaceID = H5D.get_space(datasetID);
% Figure out how many dimensions the dataset has and use that to set up the
% offset and slab size
[~, slabsize] = H5S.get_simple_extent_dims(dataspaceID);
% Figure out which dimension is along track and which across track. Note:
% these indices represent C-style ordering, which is the reverse of how the
% matrices are ordered in Matlab
xtrack_ind_c = slabsize == length(xtrack_cut);
if sum(xtrack_ind_c > 1)
E.callError('swath_index','Multiple dimensions had the length expected for the across track dimension for %s in %s', dsetname, filename);
end
ltrack_ind_c = slabsize == length(ltrack_cut);
if sum(ltrack_ind_c > 1)
E.callError('swath_index','Multiple dimensions had the length expected for the along track dimension for %s in %s', dsetname, filename);
end
stride = []; % an empty array tells the H5 functions to assume 1's in all dimensions
blocksize = [];
slabsize(xtrack_ind_c) = sum(xtrack_cut);
slabsize(ltrack_ind_c) = sum(ltrack_cut);
offset = zeros(size(slabsize));
offset(xtrack_ind_c) = find(xtrack_cut,1,'first')-1; % these indices are zero based, but Matlab's are 1 based
offset(ltrack_ind_c) = find(ltrack_cut,1,'first')-1;
memspaceID = H5S.create_simple(length(slabsize), slabsize, slabsize);
H5S.select_hyperslab(dataspaceID, 'H5S_SELECT_SET', offset, stride, slabsize, blocksize);
vals = H5D.read(datasetID, 'H5ML_DEFAULT', memspaceID, dataspaceID, 'H5P_DEFAULT');
H5F.close(fileID);
vals=double(vals);
fillval = double(h5readatt(filename, dsetname, '_FillValue'));
scalefac = double(h5readatt(filename, dsetname, 'ScaleFactor'));
offset = double(h5readatt(filename, dsetname, 'Offset'));
fills = abs((vals - fillval)/fillval) < 1e-3;
vals(fills) = nan;
vals = (vals * scalefac) + offset;
% Matlab arrays use Fortran-style ordering, so the indices are flipped
% compared to what we had before
xtrack_ind_f = length(slabsize) + 1 - find(xtrack_ind_c);
ltrack_ind_f = length(slabsize) + 1 - find(ltrack_ind_c);
permvec = order_to_permvec(dim_order, xtrack_ind_f, ltrack_ind_f, ndims(vals));
vals = permute(vals, permvec);
end
function vals = read_hdf5_simple(filename, dsetname, xtrack_cut, ltrack_cut, dim_order)
E = JLLErrors;
if ~islogical(xtrack_cut) || ~isvector(xtrack_cut)
E.badinput('XTRACK_CUT must be a logical vector')
end
if ~islogical(ltrack_cut) || ~isvector(ltrack_cut)
E.badinput('LTRACK_CUT must be a logical vector')
end
vals = h5readomi(filename, dsetname);
sz = size(vals);
if isvector(vals) && numel(vals) == numel(xtrack_cut)
vals = vals(xtrack_cut);
elseif isvector(vals) && numel(vals) == numel(ltrack_cut)
vals = vals(ltrack_cut);
elseif ~isvector(vals)
vals = vals(xtrack_cut, ltrack_cut, :);
sz(1) = sum(xtrack_cut);
sz(2) = sum(ltrack_cut);
vals = reshape(vals, sz);
permvec = order_to_permvec(dim_order, 1, 2, ndims(vals));
vals = permute(vals, permvec);
else
E.notimplemented('1D variable with size not equal to across track or along track dimension')
end
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
avg_modis_alb_to_pixels.m
|
.m
|
BEHR-core-master/Read_Data/avg_modis_alb_to_pixels.m
| 8,783 |
utf_8
|
f0ca136a2118013fae266b94c49a4522
|
function [ data ] = avg_modis_alb_to_pixels( band3data, coart_lut, ocean_mask, data, varargin )
%AVG_MODIS_ALB_TO_PIXELS Calculate surface reflectivity from MODIS BRDFs
% DATA = AVG_MODIS_ALB_TO_PIXELS( BAND3DATA, COART_LUT, OCEAN_MASK, DATA
% ) Handles calculating surface reflectivity from MODIS BRDF kernels and
% averaging the result to OMI pixels. BAND3DATA must be the structure
% returned from READ_MODIS_ALBEDO for the correct day. COART_LUT is the
% look up table of ocean reflectivity returned as the second output of
% COART_SEA_REFLECTANCE. OCEAN_MASK must be a structure containing the
% fields "mask", "lon", and "lat" that define a boolean mask for ocean
% (true for ocean). DATA is the structure containing native OMI pixel
% data. This function returns that structure with fields MODISAlbedo,
% MODISAlbedoQuality, MODISAlbedoFillFlag, MODISAlbedoFile, and
% AlbedoOceanFlag added.
%
% Additional parameters:
%
% 'DEBUG_LEVEL' - scalar number that controls level of output to
% terminal. 0 (default) is none, high values give more output.
%
% 'LoncornField' - string that indicates which field of Data to use for
% longitude corners of pixels. Default is 'FoV75CornerLongitude'.
%
% 'LatcornField' - string that indicates which field of Data to use for
% latitude corners of pixels. Default is 'FoV75CornerLatitude'.
%
% 'QualityLimit' - controls which MODIS BRDF grid cells will be used by
% filtering for quality. In MCD43D31, 0 = best quality and 3 = low
% quality. Only data with quality <= QualityLimit will be used. Default
% is Inf, i.e. all data is used.
%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% INPUT VALIDATION %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%
E = JLLErrors;
p = inputParser;
p.addParameter('DEBUG_LEVEL', 0);
p.addParameter('LoncornField', 'FoV75CornerLongitude');
p.addParameter('LatcornField', 'FoV75CornerLatitude');
p.addParameter('QualityLimit', Inf);
p.parse(varargin{:});
pout = p.Results;
DEBUG_LEVEL = pout.DEBUG_LEVEL;
loncorn_field = pout.LoncornField;
latcorn_field = pout.LatcornField;
max_qual_flag = pout.QualityLimit;
s=size(data.Latitude);
c=numel(data.Latitude);
MODISAlbedo = nan(s);
MODISAlbedoQuality = nan(s);
MODISAlbedoFillFlag = false(s);
ocean_flag = false(s);
%Now actually average the MODIS albedo for each OMI pixel
if DEBUG_LEVEL > 0; disp(' Averaging MODIS albedo to OMI pixels'); end
for k=1:c;
if DEBUG_LEVEL > 3; t_total=tic; end
xall=[data.(loncorn_field)(:,k); data.(loncorn_field)(1,k)];
yall=[data.(latcorn_field)(:,k); data.(latcorn_field)(1,k)];
% If there is an invalid corner coordinate, skip because we cannot
% be sure the correct polygon will be used.
if any(isnan(xall)) || any(isnan(yall))
continue
end
% Next, check if we are over ocean using the ocean mask. If the mask
% indicates that more than 50% of the pixel is ocean, then we will
% insert a value from the look up table and move on.
if DEBUG_LEVEL > 4; t_cut = tic; end
xx_mask_lon = ocean_mask.lon >= min(xall) & ocean_mask.lon <= max(xall);
xx_mask_lat = ocean_mask.lat >= min(yall) & ocean_mask.lat <= max(yall);
if DEBUG_LEVEL > 4; fprintf(' Time to cut down ocean mask = %f\n', toc(t_cut)); end
if DEBUG_LEVEL > 4; t_mask = tic; end
[om_longrid, om_latgrid] = latlon_vec2grid(ocean_mask.lon, ocean_mask.lat, xx_mask_lon, xx_mask_lat);
xx_ocean_mask = inpolygon(om_longrid, om_latgrid, xall, yall);
if DEBUG_LEVEL > 4; fprintf(' Time to apply inpolygon to mask = %f\n', toc(t_mask)); end
if DEBUG_LEVEL > 4; t_avg_mask = tic; end
sub_mask = ocean_mask.mask(xx_mask_lat, xx_mask_lon);
avg_mask = nanmean(sub_mask(xx_ocean_mask));
if DEBUG_LEVEL > 4; fprintf(' Time to average ocean mask = %f\n', toc(t_avg_mask)); end
if avg_mask > 0.5
if DEBUG_LEVEL > 4; t_ocean = tic; end
MODISAlbedo(k) = coart_sea_reflectance(data.SolarZenithAngle(k), coart_lut);
ocean_flag(k) = true;
if DEBUG_LEVEL > 4; fprintf(' Time to look up ocean reflectance = %f\n', toc(t_ocean)); end
if DEBUG_LEVEL > 3; telap = toc(t_total); fprintf(' Time for MODIS alb --> pixel %u/%u = %g sec \n',k,c,telap); end
continue
end
if DEBUG_LEVEL > 4; t_polygon = tic; end
% If we're here, we're over a land pixel.
% should be able to speed this up by first restricting based on a
% single lat and lon vector
xx = band3data.lons >= min(xall) & band3data.lons <= max(xall);
yy = band3data.lats >= min(yall) & band3data.lats <= max(yall);
% Checking here became necessary after we switched to using lon/lat
% vectors instead of arrays because if only one of xx or yy is all
% false, then brdf_quality_k will be empty but have non-zero "length"
% in one dimension while xx_inpoly will just be a regular empty array.
% This causes the calculation of xx_alb to fail, since xx_inpoly and
% brdf_quality_k have different dimensions (but both are empty).
if all(~xx) || all(~yy)
MODISAlbedo(k) = nan;
if DEBUG_LEVEL > 3; telap = toc(t_total); fprintf(' Time for MODIS alb --> pixel %u/%u = %g sec \n',k,c,telap); end
continue
end
band3_iso_k = band3data.iso(yy,xx);
band3_geo_k = band3data.geo(yy,xx);
band3_vol_k = band3data.vol(yy,xx);
brdf_quality_k = band3data.quality(yy,xx);
[lon_grid, lat_grid] = latlon_vec2grid(band3data.lons, band3data.lats, xx, yy);
xx_inpoly = inpolygon(lon_grid,lat_grid,xall,yall);
% Also remove data that has too low a quality. The quality values are
% described in the "Description" attribute for the "BRDF_Quality" SDS.
% Lower values for the quality flag are better. Inequality comparisons
% with NaNs are always false, but this explicitly rejects BRDF values
% for which the quality value is a NaN (i.e. fill value).
xx_alb = xx_inpoly & brdf_quality_k <= max_qual_flag & ~isnan(brdf_quality_k);
if sum(xx_alb) == 0
MODISAlbedo(k) = nan;
if DEBUG_LEVEL > 3; telap = toc(t_total); fprintf(' Time for MODIS alb --> pixel %u/%u = %g sec \n',k,c,telap); end
continue
end
if DEBUG_LEVEL > 4; fprintf(' Time to identify MODIS albedo in OMI pixel = %f\n', toc(t_polygon)); end
% The 180-RAA should flip the RAA back to the standard definition (i.e.
% the supplemental angle of what's in the data product). See the help
% text for modis_brdf_kernels for why that matters.
if DEBUG_LEVEL > 4; t_kernels = tic; end
band3_vals = modis_brdf_alb(band3_iso_k(xx_alb), band3_vol_k(xx_alb), band3_geo_k(xx_alb), data.SolarZenithAngle(k), data.ViewingZenithAngle(k), 180-data.RelativeAzimuthAngle(k));
if DEBUG_LEVEL > 4; fprintf(' Time to calculate BRDF albedo = %f\n', toc(t_kernels)); end
% According to the MOD43 TBD
% (https://modis.gsfc.nasa.gov/data/atbd/atbd_mod09.pdf, p. 32) the
% Ross-Li kernel occasionally produces slightly negative albedos. In
% practice, I have seen negative values around 1e-4 for individual
% elements of band3_vals. Since this is apparently expected, I will
% keep the negative values in for the average (which both avoids any
% potential problem with biasing the albedos high and shouldn't change
% the albedo much on average) but if the average itself is negative, we
% reject it and insert NaN as a fill value, which should prevent
% retrieving that pixel.
band3_avg = nanmean(band3_vals(:));
if band3_avg < 0
warning('Negative average albedo detected. Setting albedo to NaN.');
% Although we initialized these as NaNs, forcing this to NaN here
% ensures that no changes to the initialization mess this up
MODISAlbedo(k) = NaN;
MODISAlbedoQuality(k) = NaN;
else
MODISAlbedo(k) = band3_avg;
MODISAlbedoQuality(k) = nanmean(brdf_quality_k(xx_alb));
end
% If more than 50% of the quality values are fills, set the fill
% warning flag. This will be used in behr_quality_flags to warn of low
% quality MODIS data.
if sum(isnan(brdf_quality_k(xx_inpoly)))/sum(xx_inpoly(:)) > 0.5
MODISAlbedoFillFlag(k) = true;
end
if DEBUG_LEVEL > 3; telap = toc(t_total); fprintf(' Time for MODIS alb --> pixel %u/%u = %g sec \n',k,c,telap); end
end
data.MODISAlbedo = MODISAlbedo;
data.MODISAlbedoQuality = MODISAlbedoQuality;
data.MODISAlbedoFillFlag = MODISAlbedoFillFlag;
data.MODISAlbedoFile = band3data.files;
data.AlbedoOceanFlag = ocean_flag;
end
function [longrid, latgrid] = latlon_vec2grid(lonvec, latvec, xx_lon, xx_lat)
[longrid, latgrid] = meshgrid(lonvec(xx_lon), latvec(xx_lat));
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
read_main.m
|
.m
|
BEHR-core-master/Read_Data/read_main.m
| 32,685 |
utf_8
|
c387bd4112bc8b4af12ce3cd31788543
|
function read_main(varargin)
% READ_MAIN Reads in OMI, MODIS, and GLOBE data to .mat files
%
% READ_MAIN is the first step in the BEHR workflow. It reads
% in the satellite data from the various sources, include OMI NO2, MODIS
% clouds, MODIS albedo, and GLOBE (a database, not a satellite) terrain
% elevation. These data are cut down to the US domain and, in the case of
% the MODIS and GLOBE data, averaged to the OMI pixels. The resulting
% Data structures are saved as an OMI_SP .mat file.
%
% This function is setup such that running it without arguments will
% produce any new OMI_SP files required. This requires that the necessary
% data be available either locally or via a mounted network drive. This
% behavior can be changed with the following parameters:
%
% 'start' - set the first day to process using either a date string
% that Matlab understands or a date number. Default is '2005-01-01'.
%
% 'end' - set the last day to process using either a date string that
% Matlab understands or a date number. Default is today.
%
% 'sp_mat_dir' - the directory that the OMI_SP .mat files will be
% saved to. Default is the path provided by the behr_paths class.
%
% 'omi_he5_dir' - the directory that contains the OMI NO2 HDF5 files,
% sorted into subdirectories by year and month (i.e. this directory
% itself should contain subdirectories 2005, 2006, etc., each of
% which has subdirectories 01, 02, 03, etc. that contain the HDF5
% files). Default is the path provided by the behr_paths class.
%
% 'modis_myd06_dir' - the directory that contains the MODIS MYD06
% cloud HDF4 files, sorted by year. Default is the path provided by
% the behr_paths class.
%
% 'modis_mcd43_dir' - the directory that contains the MODIS MCD43C1
% BRDF parameters files, sorted into subdirectories by year. Default
% is the path provided by the behr_paths class.
%
% 'globe_dir' - the directory that contains the GLOBE (Global Land
% One-km Base Elevation) terrain elevation data. This will contain
% files a10g through p10g and a10g.hdr through p10g.hdr. Default is
% the path provided by the behr_paths class.
%
% 'region' - which region BEHR is running in. This controls both the
% longitude and latitude limits and which orbits are skipped as
% "nighttime" orbits. This must be a string. Default (and only option
% at present) is 'US'.
%
% 'allow_no_myd' - boolean (default false) which allows the run to
% process days for which no MODIS cloud fraction data is available.
%
% 'overwrite' - scalar logical which controls whether existing files
% will be overwritten. If false, a day will be skipped if the
% corresponding OMI_SP .mat file exists in the directory given as
% 'omi_he5_dir'. If true, no days will be skipped and the data in
% omi_he5_dir will be overwritten.
%
% 'DEBUG_LEVEL' - verbosity. Default is 2; i.e. most progress
% message, but no timing messages will be printed. 0 = no messages;
% greater means more messages.
%****************************%
% CONSOLE OUTPUT LEVEL - 0 = none, 1 = minimal, 2 = all messages, 3 = times
%****************************%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% INITIALIZATION & INPUT VALIDATION %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
E = JLLErrors;
p = inputParser;
p.addParameter('start', '');
p.addParameter('end', '');
p.addParameter('sp_mat_dir', '');
p.addParameter('omi_he5_dir', '');
p.addParameter('omi_pixcor_dir', '');
p.addParameter('modis_myd06_dir', '');
p.addParameter('modis_mcd43_dir', '');
p.addParameter('globe_dir', '');
p.addParameter('region', 'US');
p.addParameter('allow_no_myd', false);
p.addParameter('overwrite', false)
p.addParameter('DEBUG_LEVEL', 2);
p.parse(varargin{:});
pout = p.Results;
date_start = pout.start;
date_end = pout.end;
sp_mat_dir = pout.sp_mat_dir;
omi_he5_dir = pout.omi_he5_dir;
omi_pixcor_dir = pout.omi_pixcor_dir;
modis_myd06_dir = pout.modis_myd06_dir;
modis_mcd43_dir = pout.modis_mcd43_dir;
globe_dir = pout.globe_dir;
allow_no_myd = pout.allow_no_myd;
region = pout.region;
overwrite = pout.overwrite;
DEBUG_LEVEL = pout.DEBUG_LEVEL;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% PARALLELIZATION OPTIONS %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Specifies whether the script is executing on a cluster; this must be set
% (globally) in the calling script. This allows for the execution of code
% needed on the cluster (i.e. adding necessary folders to the Matlab path,
% opening a parallel pool) without running them on the local machine. If
% onCluster hasn't been defined yet, set it to false.
global onCluster;
if isempty(onCluster);
if DEBUG_LEVEL > 0; fprintf('Assuming onCluster is false\n'); end
onCluster = false;
end
% Cleanup object will safely exit if there's a problem
if onCluster
cleanupobj = onCleanup(@() mycleanup());
end
% Defined the number of threads to run, this will be used to open a
% parallel pool. numThreads should be set in the calling run script,
% otherwise it will default to 1.
global numThreads;
if isempty(numThreads)
numThreads = 1;
end
%%%%%%%%%%%%%%%%%%%%%%
%%%%% VALIDATION %%%%%
%%%%%%%%%%%%%%%%%%%%%%
date_start = validate_date(date_start);
date_end = validate_date(date_end);
if ~ischar(sp_mat_dir)
E.badinput('Paramter "sp_mat_dir" must be a string')
elseif ~ischar(omi_he5_dir)
E.badinput('Paramter "omi_he5_dir" must be a string')
elseif ~ischar(omi_pixcor_dir)
E.badinput('Paramter "omi_pixcor_dir" must be a string')
elseif ~ischar(modis_myd06_dir)
E.badinput('Paramter "modis_myd06_dir" must be a string')
elseif ~ischar(modis_mcd43_dir)
E.badinput('Paramter "modis_mcd43_dir" must be a string')
elseif ~ischar(globe_dir)
E.badinput('Paramter "globe_dir" must be a string')
elseif ~ischar(region)
E.badinput('Paramter "region" must be a string')
elseif (~islogical(overwrite) && ~isnumeric(overwrite)) || ~isscalar(overwrite)
E.badinput('Parameter "overwrite" must be a scalar logical or number')
end
% Specify the longitude and latitude ranges of interest for this retrieval.
% Additionally, set the earliest and latest start time (in UTC) for the
% swaths that will be allowed. This will help
switch lower(region)
case 'us'
lonmin = -125;
lonmax = -65;
latmin = 25;
latmax = 50;
earliest_omi_starttime = 1500;
latest_omi_starttime = Inf;
case 'hk'
lonmin = 108;
lonmax = 118;
latmin = 19;
latmax = 26;
earliest_omi_starttime = -Inf;
latest_omi_starttime = 1300;
otherwise
E.badinput('Region "%s" not recognized', region)
end
if lonmin > lonmax %Just in case I enter something backwards...
E.callError('bounds', 'Lonmin is greater than lonmax')
elseif latmin > latmax
E.callError('bounds', 'Latmin is greater than latmax')
end
%Process all files between these dates, in yyyy/mm/dd format unless
%overriding dates are passed into the function.
%****************************%
if isempty(date_start) || isempty(date_end)
date_start='2013/08/01';
date_end='2013/08/06';
end
%****************************%
% This helps preallocate the Data structure, which is generally more
% efficient than expanding each time it's needed. Ideally this should be
% the maximum number of swaths expected for a given domain; extra swaths
% will be removed at the end.
estimated_num_swaths = 5;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% DATA DIRECTORIES %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% These are the directories to save or load data from. By default, they are
% taken from the behr_paths class, which can be created by running
% BEHR_initial_setup in the root folder of this repo. Alternately, paths
% can be specified as parameter inputs to this function (useful on the
% cluster where this should be called from a run script or in unit tests
% where the default save directories need to be overwritten).
%This is the directory where the final .mat file will be saved.
if isempty(sp_mat_dir)
sp_mat_dir = behr_paths.SPMatSubdir(region);
end
%This is the directory where the OMI NASA SP (OMNO2) he5 files are
%saved. It should include subfolders organized by year, which in turn
%are organized by month.
if isempty(omi_he5_dir)
omi_he5_dir = behr_paths.omno2_dir;
end
%This is the directory where the OMPIXCOR he5 files are saved. It
%should include subfolders organized by year, which in turn are
%organized by month.
if isempty(omi_pixcor_dir)
omi_pixcor_dir = behr_paths.ompixcor_dir;
end
%This is the directory where the MODIS myd06_L2*.hdf files are saved.
%It should include subfolders organized by year.
if isempty(modis_myd06_dir)
modis_myd06_dir = behr_paths.myd06_dir;
end
%This is the directory where the MODIS MCD43C3*.hdf files are saved. It
%should include subfolders organized by year.
if isempty(modis_mcd43_dir)
modis_mcd43_dir = behr_paths.mcd43d_dir;
end
%This is the directory where the GLOBE data files and their headers
%(.hdr files) are saved.
if isempty(globe_dir)
globe_dir = behr_paths.globe_dir;
end
% Verify the paths integrity.
nonexistant = {};
if ~exist(sp_mat_dir,'dir')
nonexistant{end+1} = 'sp_mat_dir';
end
if ~exist(omi_he5_dir,'dir')
nonexistant{end+1} = 'he5_dir';
end
if ~exist(omi_pixcor_dir, 'dir')
nonexistant{end+1} = 'ompixcor_dir';
end
if ~exist(modis_myd06_dir,'dir')
nonexistant{end+1} = 'modis_myd06_dir';
end
if ~exist(modis_mcd43_dir,'dir')
nonexistant{end+1} = 'modis_mcd43_dir';
end
if ~exist(globe_dir,'dir')
nonexistant{end+1} = 'globe_dir';
end
if numel(nonexistant)>0
string_spec = [repmat('\n\t%s',1,numel(nonexistant)),'\n\n'];
msg = sprintf('The following paths are not valid: %s Please double check them in the run file',string_spec);
E.callError('bad_paths',sprintf(msg,nonexistant{:}));
end
%%%%%%%%%%%%%%%%%%%%%
%%%%% MAIN BODY %%%%%
%%%%%%%%%%%%%%%%%%%%%
%Add a little buffer around the edges to make sure we have ancillary data
%everywhere that we have NO2 profiles.
ancillary_lonlim = [lonmin - 10, lonmax + 10];
ancillary_latlim = [latmin - 10, latmax + 10];
%Load the land classification map. We'll use this to decide when to use the
%ocean surface reflectance parameterization.
if DEBUG_LEVEL > 1; fprintf('Loading land/ocean classification map\n'); end
if DEBUG_LEVEL > 2; t_load_land_ocean = tic; end
[ocean_mask.mask, ocean_mask.lon, ocean_mask.lat] = get_modis_ocean_mask(ancillary_lonlim, ancillary_latlim);
if DEBUG_LEVEL > 2; fprintf(' Time to load land/ocean classification map: %f\n', toc(t_load_land_ocean)); end
%Go ahead and load the terrain pressure data - only need to do this once
if DEBUG_LEVEL > 1; fprintf('Loading globe elevations\n'); end
if DEBUG_LEVEL > 2; t_load_globe = tic; end
[globe_elevations, globe_lon_matrix, globe_lat_matrix] = load_globe_alts(ancillary_lonlim, ancillary_latlim, 'vector');
globe_elevations(isnan(globe_elevations)) = 0;
if DEBUG_LEVEL > 2; fprintf(' Time to load GLOBE elevations: %f\n', toc(t_load_globe)); end
if DEBUG_LEVEL > 1; fprintf('Loading COART sea reflectances\n'); end
if DEBUG_LEVEL > 2; t_load_coart = tic; end
[~, coart_lut] = coart_sea_reflectance(0);
if DEBUG_LEVEL > 2; fprintf(' Time to load COART look up table: %f\n', toc(t_load_coart)); end
%For loop over all days from the starting or last finished date to the end
%date. We will give the absolute paths to files rather than changing the
%active directory, as MATLAB seems to run slightly slower if the current
%working directory is on the server.
% total_days=datenum(date_end)-datenum(last_date)+1;
% for j=1:total_days;
if onCluster
n_workers=numThreads;
else
n_workers=0;
end
if onCluster && isempty(gcp('nocreate'))
parpool(numThreads);
end
onCluster_local = onCluster;
% Setup some values that either need to be computed to determine the loop
% indices or which are better calculated outside the loop.
datenums = datenum(date_start):datenum(date_end);
core_githead = git_head_hash(behr_paths.behr_core);
behrutils_githead = git_head_hash(behr_paths.behr_utils);
genutils_githead = git_head_hash(behr_paths.utils);
behr_grid = GlobeGrid(0.05, 'domain', [lonmin, lonmax, latmin, latmax]);
if DEBUG_LEVEL > 1; fprintf('Staring main loop\n'); end
t_comm = tic;
parfor(j=1:length(datenums), n_workers)
%for j=1:length(datenums)
this_task = getCurrentTask();
if isempty(this_task)
this_task.ID = -1;
end
if DEBUG_LEVEL > 2; fprintf('Worker %d: Time to enter parfor loop: %f s\n', this_task.ID, toc(t_comm)); end
if DEBUG_LEVEL > 2; t_day = tic; end
%Read the desired year, month, and day
this_dnum = datenums(j);
this_year = year(this_dnum);
this_year_str = sprintf('%04d', this_year);
this_month=month(this_dnum);
this_month_str = sprintf('%02d', this_month);
this_day=day(this_dnum);
this_day_str = sprintf('%02d', this_day);
% Check if the file already exists. If it does, and if we're set
% to not overwrite, we don't need to process this day.
savename = sp_savename(this_dnum, region);
if exist(fullfile(sp_mat_dir, savename), 'file') && ~overwrite
if DEBUG_LEVEL > 0; fprintf('File %s exists, skipping this day\n', savename); end
continue
end
% List variables that should be read directly from the OMI OMNO2 files.
% If you want an additional variable, adding it here should be
% sufficient to make that happen as long as it is under the
% /HDFEOS/SWATHS/ColumnAmountNO2 group and is spelled exactly how the
% dataset is named.
sp_variables = {'Longitude', 'Latitude', 'SpacecraftAltitude', 'SpacecraftLatitude',...
'SpacecraftLongitude', 'Time', 'ViewingZenithAngle',...
'SolarZenithAngle', 'ViewingAzimuthAngle', 'SolarAzimuthAngle',...
'AmfStrat', 'AmfTrop', 'CloudFraction', 'CloudRadianceFraction',...
'TerrainHeight', 'TerrainPressure', 'TerrainReflectivity',...
'CloudPressure', 'ColumnAmountNO2', 'SlantColumnAmountNO2',...
'ColumnAmountNO2Trop', 'ColumnAmountNO2TropStd', 'ColumnAmountNO2Strat',...
'TropopausePressure', 'VcdQualityFlags', 'XTrackQualityFlags'};
% Variables from the OMPIXCOR files. As with the SP variables, adding a
% variable here that is under the '/HDFEOS/SWATHS/OMI Ground Pixel
% Corners VIS' group should be sufficient to cause it to be read in.
pixcor_variables = {'TiledArea', 'TiledCornerLongitude', 'TiledCornerLatitude',...
'FoV75Area', 'FoV75CornerLongitude', 'FoV75CornerLatitude'};
% Variables that will be added by BEHR. These will need manual
% intervention if you choose to add more variables since they're not
% being copied directly from existing files.
behr_variables = {'Date', 'Grid', 'LatBdy', 'LonBdy', 'Row', 'Swath', 'RelativeAzimuthAngle',...
'MODISCloud', 'MODISAlbedo', 'MODISAlbedoQuality','MODISAlbedoFillFlag', 'GLOBETerrainHeight',...
'IsZoomModeSwath', 'AlbedoOceanFlag','OMPIXCORFile', 'MODISCloudFiles', 'MODISAlbedoFile',...
'GitHead_Core_Read', 'GitHead_BEHRUtils_Read', 'GitHead_GenUtils_Read', 'OMNO2File', 'BEHRRegion'};
sub_data = make_empty_struct_from_cell([sp_variables, pixcor_variables, behr_variables],0);
Data = repmat(make_empty_struct_from_cell([sp_variables, pixcor_variables, behr_variables],0), 1, estimated_num_swaths);
%Set the file path and name, assuming that the file structure is
%<he5_directory>/<year>/<month>/...files... Then figure out how many
%files there are
short_filename = sprintf('OMI-Aura_L2-OMNO2_%04dm%02d%02d*.he5', this_year, this_month, this_day);
file_dir = fullfile(omi_he5_dir, this_year_str, this_month_str); %Used both here to find all he5 files and in the swath for loop to identify each file.
file_pattern=fullfile(file_dir,short_filename);
sp_files = dir(file_pattern);
sp_files = remove_duplicate_orbits(sp_files);
n = length(sp_files);
if isempty(sp_files)
fprintf('No data available for %s\n', datestr(this_dnum));
continue
end
data_ind = 0;
for a=1:n %For loop over all the swaths in a given day.
if DEBUG_LEVEL > 2; t_orbit = tic; end
if DEBUG_LEVEL > 0
if a==1 || mod(a,10)==0; fprintf('Swath %u of %s \n', a, datestr(this_dnum)); end
end
%Read in each file, saving the hierarchy as 'hinfo'
this_sp_filename = fullfile(omi_he5_dir, this_year_str, this_month_str, sp_files(a).name);
[omi_starttime, omi_next_starttime] = get_omi_swath_times(sp_files, a);
if omi_starttime < earliest_omi_starttime || omi_starttime > latest_omi_starttime
%If start time is < 1500 and we want to look at the US, reject
%the file, as it is probably descending nodes only.
if DEBUG_LEVEL > 0; fprintf(' Swath %d: Nighttime granule skipped\n',a); end
continue
end
if DEBUG_LEVEL > 2; t_sp = tic; end
[this_data, pixels_in_domain] = read_omi_sp(this_sp_filename, '/HDFEOS/SWATHS/ColumnAmountNO2', sp_variables, sub_data, [lonmin, lonmax], [latmin, latmax]);
if DEBUG_LEVEL > 2; fprintf(' Time to read SP data on worker %d: %f\n', this_task.ID, toc(t_sp)); end
if ~pixels_in_domain
if DEBUG_LEVEL > 1; disp('No points within lat/lon boundaries'); end
continue
end
this_data.OMNO2File = this_sp_filename;
% If we've gotten here, then there are pixels in the swath that lie
% within the domain of interest. Add the OMPIXCOR data
if DEBUG_LEVEL > 2; t_pixcor = tic; end
pixcor_name = make_pixcorn_name_from_sp(this_sp_filename, fullfile(omi_pixcor_dir, this_year_str, this_month_str));
this_data = read_omi_sp(pixcor_name, '/HDFEOS/SWATHS/OMI Ground Pixel Corners VIS', pixcor_variables, this_data, [lonmin, lonmax], [latmin, latmax], 'match_data', true);
if DEBUG_LEVEL > 2; fprintf(' Time to read OMPIXCOR data on worker %d: %f\n', this_task.ID, toc(t_pixcor)); end
this_data.OMPIXCORFile = pixcor_name;
if DEBUG_LEVEL > 2; t_pixclean = tic; end
this_data = handle_corner_zeros(this_data, DEBUG_LEVEL);
% The OMNO2 and OMPIXCOR products place the zoom-mode pixels
% differently in the swath - fix that here.
this_data = align_zoom_mode_pixels(this_data);
if DEBUG_LEVEL > 2; fprintf(' Time to clean up pixel corners on worker %d: %f\n', this_task.ID, toc(t_pixclean)); end
% Add a few pieces of additional information
% Swath is given in the file name as a five digits number following
% the "-o" in the name (swath == orbit number)
swath = str2double(regexp(this_sp_filename, '(?<=-o)\d\d\d\d\d', 'match', 'once'));
this_data.Swath = swath;
raa_tmp=abs(this_data.SolarAzimuthAngle + 180 - this_data.ViewingAzimuthAngle); % the extra factor of 180 corrects for the definition of RAA in the scattering weight lookup table
raa_tmp(raa_tmp > 180)=360-raa_tmp(raa_tmp > 180);
this_data.RelativeAzimuthAngle = raa_tmp;
% Add MODIS cloud info to the files
if DEBUG_LEVEL > 0; fprintf('\n Adding MODIS cloud data \n'); end
if DEBUG_LEVEL > 2; t_modis_cld = tic; end
this_data = read_modis_cloud(modis_myd06_dir, this_dnum, this_data, omi_starttime, omi_next_starttime, [lonmin, lonmax], [latmin, latmax],...
'AllowNoFile', allow_no_myd, 'DEBUG_LEVEL', DEBUG_LEVEL);
if DEBUG_LEVEL > 2; fprintf(' Time to average MODIS clouds on worker %d: %f\n', this_task.ID, toc(t_modis_cld)); end
% Add MODIS albedo info to the files
if DEBUG_LEVEL > 0; fprintf('\n Adding MODIS albedo information \n'); end
if DEBUG_LEVEL > 2; t_modis_alb = tic; end
[orbit_lonlim, orbit_latlim] = calc_orbit_latlon_limis(this_data.FoV75CornerLongitude, this_data.FoV75CornerLatitude, ancillary_lonlim, ancillary_latlim);
% Previously we tried doing this outside the orbit loop, which used
% a lot of memory but limited the number of times that we had to
% read these files. Now, we'll try it inside the loop, but only
% read the part relevant to each orbit.
if DEBUG_LEVEL > 2; t_alb_read = tic; end
modis_brdf_data = read_modis_albedo(modis_mcd43_dir, this_dnum, orbit_lonlim, orbit_latlim);
if DEBUG_LEVEL > 2; fprintf('Worker %d: Time to read MODIS BRDF = %f\n', this_task.ID, toc(t_alb_read)); end
this_data = avg_modis_alb_to_pixels(modis_brdf_data, coart_lut, ocean_mask, this_data, 'QualityLimit', 3, 'DEBUG_LEVEL', DEBUG_LEVEL);
if DEBUG_LEVEL > 2; fprintf(' Time to average MODIS albedo on worker %d: %f\n', this_task.ID, toc(t_modis_alb)); end
% Add GLOBE terrain pressure to the files
if DEBUG_LEVEL > 0; fprintf('\n Adding GLOBE terrain data \n'); end
if DEBUG_LEVEL > 2; t_globe = tic; end
this_data = avg_globe_data_to_pixels(this_data, globe_elevations, globe_lon_matrix, globe_lat_matrix,...
'DEBUG_LEVEL', DEBUG_LEVEL);
if DEBUG_LEVEL > 2; fprintf(' Time to average GLOBE data on worker %d: %f\n', this_task.ID, toc(t_globe)); end
% Add the few attribute-like variables
this_data.Date = datestr(this_dnum, 'yyyy/mm/dd');
this_data.LonBdy = [lonmin, lonmax];
this_data.LatBdy = [latmin, latmax];
this_data.GitHead_Core_Read = core_githead;
this_data.GitHead_BEHRUtils_Read = behrutils_githead;
this_data.GitHead_GenUtils_Read = genutils_githead;
this_data.Grid = behr_grid;
this_data.BEHRRegion = lower(region);
data_ind = data_ind + 1;
Data(data_ind) = this_data;
% Clear the modis albedo structure, hopefully this will help with
% memory usage
modis_brdf_data = [];
if DEBUG_LEVEL > 2; fprintf(' Time for one orbit on worker %d: %f\n', this_task.ID, toc(t_orbit)); end
end %End the loop over all swaths in a day
% Remove preallocated but unused swaths
Data(data_ind+1:end) = [];
if DEBUG_LEVEL > 2; t_save = tic; end
saveData(fullfile(sp_mat_dir,savename), Data); % Saving must be handled as a separate function in a parfor loop because passing a variable name as a string upsets the parallelization monkey (it's not transparent).
if DEBUG_LEVEL > 2; fprintf(' Time to save on worker %d: %f\n', this_task.ID, toc(t_save)); end
if DEBUG_LEVEL > 2; fprintf(' Time for one day on worker %d: %f\n', this_task.ID, toc(t_day)); end
end %End the loop over all days
end
function saveData(filename,Data)
save(filename,'Data')
end
function mycleanup()
err=lasterror;
if ~isempty(err.message)
fprintf('MATLAB exiting due to problem: %s\n', err.message);
if ~isempty(gcp('nocreate'))
delete(gcp)
end
exit(1)
end
end
function [this_swath_time, next_swath_time] = get_omi_swath_times(sp_files, current_file_index)
% Calculate the start time for the next OMI swath.
% Usually there is at least one swath starting after
% the one overflying the US west coast for that day,
% but occasionally that swath is not present in the
% OMNO2 data. In those cases, we need to calculate the
% time it should have started, knowing that the Aura
% orbit period is ~99 min. If for some reason the
% calculated start time should end up being in the next
% day, error out so the user is aware of that.
E = JLLErrors;
% Find the orbit time in the file name: look for four numbers (hhmm)
% preceeded by "t" and succeeded by "-o". The file names have the format,
% e.g. OMI-Aura_L2-OMNO2_2013m0808t1715-o48223_v003-2013m0809t125937.he5,
% where in this case 2013m0808t1715 gives the date and UTC time at the
% beginning of the swath as Aug 8th, 2013, 17:15 UTC. Checking that it's
% followed by -o is necessary to distinguish from the processing time of
% 2013m0809t125937,
time_regex = '(?<=t)\d\d\d\d(?=-o)';
this_swath_time = regexp(sp_files(current_file_index).name, time_regex, 'match', 'once');
this_swath_time = str2double(this_swath_time);
if current_file_index < numel(sp_files) % If there is at least one more swath, get its start time from the file name
next_swath_time = regexp(sp_files(current_file_index+1).name, time_regex, 'match', 'once');
next_swath_time = str2double(next_swath_time);
else % otherwise add 99 minutes to the start time for this swath
omi_hr = floor(this_swath_time/100);
omi_min = mod(this_swath_time, 100);
next_swath_time = 100*(floor((omi_min + 99)/60)+omi_hr) + mod(omi_min + 99,60);
end
end
function sp_files = remove_duplicate_orbits(sp_files)
% From Oct 23, 2013 to Nov 4, 2013, several orbits have two files. Since I
% could find nothing about this, I'm assuming that the one with the later
% processing date is the best one to use. This subfunction will take a
% structure of OMNO2 files returned from dir() and check for duplicate
% orbits. If any are found, a warning is issued and only the one with the
% most recent processing date is kept.
orbits = zeros(size(sp_files));
for a=1:numel(sp_files)
% The orbit is identified in the file name as "-o#####"
orbits(a) = str2double(sp_files(a).name(35:39));
end
uorbits = unique(orbits);
if numel(uorbits) == numel(orbits)
% All orbits only present once, return now.
return
end
% Otherwise we need to identify the doubled orbits
rm_bool = false(size(sp_files)); % will be set to true for the orbits to be removed
for a=1:numel(uorbits)
% For each orbit that has >1 file, find the one with the most recent
% processing time; the rest for that orbit will be removed.
xx = find(orbits == uorbits(a));
if numel(xx) > 1
names = {sp_files(xx).name};
latest_proc_ind = 0;
latest_proc_datenum = 0;
for b = 1:numel(names)
proc_time = names{b}([46:49,51:54]);
proc_datenum = datenum(proc_time,'yyyymmdd');
if proc_datenum > latest_proc_datenum
latest_proc_ind = b;
latest_proc_datenum = proc_datenum;
end
end
yy = true(size(xx));
yy(latest_proc_ind) = false;
rm_bool(xx(yy)) = true;
end
end
% Give a warning for the log
if any(rm_bool)
rm_files = {sp_files(rm_bool).name};
fspec = repmat('\t%s\n',1,sum(rm_bool));
wmsg = sprintf('Duplicate orbits detected, the following files will not be used:\n%s',fspec);
warning(wmsg, rm_files{:});
end
sp_files(rm_bool) = [];
end
function pixcor_name = make_pixcorn_name_from_sp(sp_name, pixcor_dir_with_year_month)
% OMPIXCOR file names are the same as OMNO2 names, except OMNO2 is replaced
% by OMPIXCOR and the processing time is likely different
E = JLLErrors;
[~,sp_name] = fileparts(sp_name);
stem_regex = 'OMI-Aura_L2-OMNO2_\d\d\d\dm\d\d\d\dt\d\d\d\d-o\d\d\d\d\d';
pixcor_pattern = regexp(sp_name, stem_regex, 'match', 'once');
pixcor_pattern = strrep(pixcor_pattern, 'OMNO2', 'OMPIXCOR');
pixcor_pattern = strcat(pixcor_pattern, '*.he5');
F = dir(fullfile(pixcor_dir_with_year_month, pixcor_pattern));
if numel(F) == 1
pixcor_name = fullfile(pixcor_dir_with_year_month, F(1).name);
return
elseif numel(F) < 1
E.filenotfound('Could not find OMPIXCOR file matching %s in %s', pixcor_pattern, pixcor_dir_with_year_month);
elseif numel(F) > 1
E.toomanyfiles('Multiple files matched pattern %s in %s', pixcor_pattern, pixcor_dir_with_year_month);
end
end
function data = align_zoom_mode_pixels(data)
% As of 21 Apr 2017, during zoom mode operation, OMNO2 places the 30
% available pixels in rows 0-29. OMPIXCOR places them in rows 15-44. Yay
% consistency. This subfunction fixes that so that the corner coordinates
% lie in rows 0-29.
%
% I will assume that any day with entire rows of NaNs in Latitude/Longitude
% is a zoom mode day.
E = JLLErrors;
corner_fields = {'FoV75CornerLongitude', 'FoV75CornerLatitude';...
'TiledCornerLongitude', 'TiledCornerLatitude'};
area_fields = {'FoV75Area'; 'TiledArea'};
if size(corner_fields, 2) ~= 2
E.callError('bad_field_def', 'corner_fields must be n-by-2, with the Longitude field in the field column, latitude field in the second');
elseif size(corner_fields, 1) ~= size(area_fields,1)
E.notimplemented('corner fields without corresponding area fields or vice versa')
end
data.IsZoomModeSwath = false;
pix_nans = all(isnan(data.Longitude),1) & all(isnan(data.Latitude),1);
pix_nans_for_zoom = 31:60;
corn_nans_for_zoom = [1:15, 46:60];
if ~any(pix_nans)
return
end
for a=1:size(corner_fields,1)
loncorn = data.(corner_fields{a,1});
latcorn = data.(corner_fields{a,2});
corn_nans = squeeze(all(all(isnan(loncorn),1),2) & all(all(isnan(latcorn),1),2));
pixarea = data.(area_fields{a});
area_nans = isnan(pixarea);
if ~isequal(size(loncorn), [4, size(data.Longitude)]) || ~isequal(size(latcorn), [4, size(data.Latitude)])
E.callError('zoom_corners', 'The %s and %s fields are not 4 in the first dimension, and the same size as Longitude/Latitude in the second and third', corner_fields{a,1}, corner_fields{a,2});
end
if ~isvector(pixarea)
E.callError('zoom_area', 'The %s field is expected to be a vector; it is not', area_fields{a});
elseif numel(pixarea) ~= size(data.Longitude,2)
E.callError('zoom_area', 'The %s field is not the same length as Longitude in the second dimension', area_fields{a})
end
if isvecequal(find(corn_nans), corn_nans_for_zoom) && isvecequal(find(area_nans), corn_nans_for_zoom) && isvecequal(find(pix_nans), pix_nans_for_zoom)
% If all the NaNs are where we expect them to be for zoom mode,
% then assum this is a zoom mode swath and we need to move the
% corner coordinates to line up with the regular pixels.
data.IsZoomModeSwath = true;
loncorn(:,:,~pix_nans) = loncorn(:,:,~corn_nans);
loncorn(:,:,pix_nans) = nan;
latcorn(:,:,~pix_nans) = latcorn(:,:,~corn_nans);
latcorn(:,:,pix_nans) = nan;
pixarea(~pix_nans) = pixarea(~area_nans);
pixarea(pix_nans) = nan;
else
% Otherwise, we may have pixels with valid center coordinates but
% not corners, or pixels with valid corners but not center
% coordinates. I decided to NaN the corners of pixels with invalid
% center coordinates but not the other way around b/c a failure of
% the corner algorithm does not mean the NO2 retrieval is bad
% (although it does preclude BEHR retrieval b/c we won't be able to
% average MODIS albedo and GLOBE terrain pressure to the pixel) but
% if the center coordinate is NaNed for some reason, I don't want
% to do anything to suggest that that pixel is good.
loncorn(:,:,pix_nans) = nan;
latcorn(:,:,pix_nans) = nan;
pixarea(pix_nans) = nan;
end
data.(corner_fields{a,1}) = loncorn;
data.(corner_fields{a,2}) = latcorn;
data.(area_fields{a}) = pixarea;
end
end
function data = handle_corner_zeros(data, DEBUG_LEVEL)
fns = fieldnames(data);
ff = ~iscellcontents(regexpi(fns, 'corner', 'once'), 'isempty');
fns = fns(ff);
for a=1:numel(fns)
xx = all(data.(fns{a}) == 0, 1);
if any(xx(:)) && DEBUG_LEVEL > 0
fprintf(' Pixels with all corners == 0 found for field %s, setting corners to NaN\n', fns{a});
end
data.(fns{a})(:,xx) = nan;
end
end
function [lonlim, latlim] = calc_orbit_latlon_limis(lons, lats, anc_lonlim, anc_latlim)
% Figure out the lat/lon extents of the orbit, with a small buffer. Right
% now, this is just used for the MODIS BRDF data, which is at 30 arc sec
% (i.e. 1/120 of a degree or ~ 1 km) resolution, so we'll add about two
% grid cells in each direction. Also restrict it to the ancillary data
% limits so that it is consistent with ancillary data loaded for the whole
% day.
buffer = 2/120;
lonlim = [min(lons(:))-buffer, max(lons(:))+buffer];
lonlim = [max(lonlim(1), anc_lonlim(1)), min(lonlim(2), anc_lonlim(2))];
latlim = [min(lats(:))-buffer, max(lats(:))+buffer];
latlim = [max(latlim(1), anc_latlim(1)), min(latlim(2), anc_latlim(2))];
end
|
github
|
CohenBerkeleyLab/BEHR-core-master
|
convert_globe_surfpres.m
|
.m
|
BEHR-core-master/One-off Scripts/convert_globe_surfpres.m
| 1,439 |
utf_8
|
f8091327fba9be7273fa0dcf6969b9cb
|
function convert_globe_surfpres(input_dir, output_dir)
%CONVERT_GLOBE_SURFPRES Change GLOBETerpres into GLOBETerrainHeight
F = dir(fullfile(input_dir, 'OMI_SP*.mat'));
parfor i_file = 1:numel(F)
if exist(fullfile(output_dir, F(i_file).name), 'file')
fprintf('%s exists already\n', fullfile(output_dir, F(i_file).name));
continue
end
fprintf('Loading %s\n', F(i_file).name);
D = load(fullfile(input_dir, F(i_file).name));
Data = D.Data;
% store the current fieldnames so that we can put GLOBETerrainHeight in
% the same order as GLOBETerpres
data_fields = fieldnames(Data);
data_fields{strcmp(data_fields, 'GLOBETerpres')} = 'GLOBETerrainHeight';
if ~isempty(Data)
for i_orbit = 1:numel(Data)
terpres = Data(i_orbit).GLOBETerpres;
terheight = -7400 .* log(terpres ./ 1013.25);
Data(i_orbit).GLOBETerrainHeight = terheight;
end
Data = rmfield(Data, 'GLOBETerpres');
Data = orderfields(Data, data_fields);
else
% If Data is empty, then removing and reordering fields
% won't work. This should create a 1x0 empty structure
% with the right fields.
Data = make_empty_struct_from_cell(data_fields);
Data = Data(1, false);
end
saveme(fullfile(output_dir, F(i_file).name), Data);
end
end
function saveme(filename, Data) %#ok<*INUSD>
save(filename, 'Data');
end
|
github
|
gmorneault/yield-curve-interpolation-master
|
nelsonpy.m
|
.m
|
yield-curve-interpolation-master/Code/curve_fitting/nelsonpy.m
| 436 |
utf_8
|
4553d08f80b7e8547c6fcce620c30942
|
function py = nelsonpy(maturities,betas,tau)
%% Nelson-Siegel par yield, CEY
% based on Eq.7, 20 and 22 from GSW
py = 2*(1-discount(nelsony(maturities,betas,tau),maturities));
for i=1:length(maturities)
mats = (1:2*maturities(i))/2;
py(i) = py(i) / sum(discount(nelsony(mats,betas,tau),mats));
end
py = py*100;
end
function d = discount(yields, maturities)
%% discount factor
% GSW Eq. 2
d = exp(-yields.*maturities/100);
end
|
github
|
xuhuairuogu/OptimTraj-master
|
directCollocation.m
|
.m
|
OptimTraj-master/directCollocation.m
| 17,680 |
utf_8
|
99bcafa9cb7f42b2b2d8ab91d91d7e26
|
function soln = directCollocation(problem)
% soln = directCollocation(problem)
%
% OptimTraj utility function
%
% This function is designed to be called by either "trapezoid" or
% "hermiteSimpson". It actually calls FMINCON to solve the trajectory
% optimization problem.
%
% Analytic gradients are supported.
%
% NOTES:
%
% If analytic gradients are used, then the sparsity pattern is returned
% in the struct: soln.info.sparsityPattern. View it using spy().
%
%To make code more readable
G = problem.guess;
B = problem.bounds;
F = problem.func;
Opt = problem.options;
nGrid = length(F.weights);
flagGradObj = strcmp(Opt.nlpOpt.GradObj,'on');
flagGradCst = strcmp(Opt.nlpOpt.GradConstr,'on');
% Print out notice about analytic gradients
if Opt.verbose > 0
if flagGradObj
fprintf(' - using analytic gradients of objective function\n');
end
if flagGradCst
fprintf(' - using analytic gradients of constraint function\n');
end
fprintf('\n');
end
% Interpolate the guess at the grid-points for transcription:
guess.tSpan = G.time([1,end]);
guess.time = linspace(guess.tSpan(1), guess.tSpan(2), nGrid);
guess.state = interp1(G.time', G.state', guess.time')';
guess.control = interp1(G.time', G.control', guess.time')';
[zGuess, pack] = packDecVar(guess.time, guess.state, guess.control);
if flagGradCst || flagGradObj
gradInfo = grad_computeInfo(pack);
end
% Unpack all bounds:
tLow = linspace(B.initialTime.low, B.finalTime.low, nGrid);
xLow = [B.initialState.low, B.state.low*ones(1,nGrid-2), B.finalState.low];
uLow = B.control.low*ones(1,nGrid);
zLow = packDecVar(tLow,xLow,uLow);
tUpp = linspace(B.initialTime.upp, B.finalTime.upp, nGrid);
xUpp = [B.initialState.upp, B.state.upp*ones(1,nGrid-2), B.finalState.upp];
uUpp = B.control.upp*ones(1,nGrid);
zUpp = packDecVar(tUpp,xUpp,uUpp);
%%%% Set up problem for fmincon:
if flagGradObj
P.objective = @(z)( ...
myObjGrad(z, pack, F.pathObj, F.bndObj, F.weights, gradInfo) ); %Analytic gradients
[~, objGradInit] = P.objective(zGuess);
sparsityPattern.objective = (objGradInit~=0)'; % Only used for visualization!
else
P.objective = @(z)( ...
myObjective(z, pack, F.pathObj, F.bndObj, F.weights) ); %Numerical gradients
end
if flagGradCst
P.nonlcon = @(z)( ...
myCstGrad(z, pack, F.dynamics, F.pathCst, F.bndCst, F.defectCst, gradInfo) ); %Analytic gradients
[~,~,cstIneqInit,cstEqInit] = P.nonlcon(zGuess);
sparsityPattern.equalityConstraint = (cstEqInit~=0)'; % Only used for visualization!
sparsityPattern.inequalityConstraint = (cstIneqInit~=0)'; % Only used for visualization!
else
P.nonlcon = @(z)( ...
myConstraint(z, pack, F.dynamics, F.pathCst, F.bndCst, F.defectCst) ); %Numerical gradients
end
P.x0 = zGuess;
P.lb = zLow;
P.ub = zUpp;
P.Aineq = []; P.bineq = [];
P.Aeq = []; P.beq = [];
P.options = Opt.nlpOpt;
P.solver = 'fmincon';
%%%% Call fmincon to solve the non-linear program (NLP)
tic;
[zSoln, objVal,exitFlag,output] = fmincon(P);
[tSoln,xSoln,uSoln] = unPackDecVar(zSoln,pack);
nlpTime = toc;
%%%% Store the results:
soln.grid.time = tSoln;
soln.grid.state = xSoln;
soln.grid.control = uSoln;
soln.interp.state = @(t)( interp1(tSoln',xSoln',t','linear',nan)' );
soln.interp.control = @(t)( interp1(tSoln',uSoln',t','linear',nan)' );
soln.info = output;
soln.info.nlpTime = nlpTime;
soln.info.exitFlag = exitFlag;
soln.info.objVal = objVal;
if flagGradCst || flagGradObj % Then return sparsity pattern for visualization
if flagGradObj
[~, objGradInit] = P.objective(zSoln);
sparsityPattern.objective = (objGradInit~=0)';
end
if flagGradCst
[~,~,cstIneqInit,cstEqInit] = P.nonlcon(zSoln);
sparsityPattern.equalityConstraint = (cstEqInit~=0)';
sparsityPattern.inequalityConstraint = (cstIneqInit~=0)';
end
soln.info.sparsityPattern = sparsityPattern;
end
soln.problem = problem; % Return the fully detailed problem struct
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
%%%% SUB FUNCTIONS %%%%
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [z,pack] = packDecVar(t,x,u)
%
% This function collapses the time (t), state (x)
% and control (u) matricies into a single vector
%
% INPUTS:
% t = [1, nTime] = time vector (grid points)
% x = [nState, nTime] = state vector at each grid point
% u = [nControl, nTime] = control vector at each grid point
%
% OUTPUTS:
% z = column vector of 2 + nTime*(nState+nControl) decision variables
% pack = details about how to convert z back into t,x, and u
% .nTime
% .nState
% .nControl
%
nTime = length(t);
nState = size(x,1);
nControl = size(u,1);
tSpan = [t(1); t(end)];
xCol = reshape(x, nState*nTime, 1);
uCol = reshape(u, nControl*nTime, 1);
indz = reshape(2+(1:numel(u)+numel(x)),nState+nControl,nTime);
% index of time, state, control variables in the decVar vector
tIdx = 1:2;
xIdx = indz(1:nState,:);
uIdx = indz(nState+(1:nControl),:);
% decision variables
% variables are indexed so that the defects gradients appear as a banded
% matrix
z = zeros(2+numel(indz),1);
z(tIdx(:),1) = tSpan;
z(xIdx(:),1) = xCol;
z(uIdx(:),1) = uCol;
pack.nTime = nTime;
pack.nState = nState;
pack.nControl = nControl;
pack.tIdx = tIdx;
pack.xIdx = xIdx;
pack.uIdx = uIdx;
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [t,x,u] = unPackDecVar(z,pack)
%
% This function unpacks the decision variables for
% trajectory optimization into the time (t),
% state (x), and control (u) matricies
%
% INPUTS:
% z = column vector of 2 + nTime*(nState+nControl) decision variables
% pack = details about how to convert z back into t,x, and u
% .nTime
% .nState
% .nControl
%
% OUTPUTS:
% t = [1, nTime] = time vector (grid points)
% x = [nState, nTime] = state vector at each grid point
% u = [nControl, nTime] = control vector at each grid point
%
nTime = pack.nTime;
nState = pack.nState;
nControl = pack.nControl;
t = linspace(z(1),z(2),nTime);
x = z(pack.xIdx);
u = z(pack.uIdx);
% make sure x and u are returned as vectors, [nState,nTime] and
% [nControl,nTime]
x = reshape(x,nState,nTime);
u = reshape(u,nControl,nTime);
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function cost = myObjective(z,pack,pathObj,bndObj,weights)
%
% This function unpacks the decision variables, sends them to the
% user-defined objective functions, and then returns the final cost
%
% INPUTS:
% z = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% pathObj = user-defined integral objective function
% endObj = user-defined end-point objective function
%
% OUTPUTS:
% cost = scale cost for this set of decision variables
%
[t,x,u] = unPackDecVar(z,pack);
% Compute the cost integral along trajectory
if isempty(pathObj)
integralCost = 0;
else
dt = (t(end)-t(1))/(pack.nTime-1);
integrand = pathObj(t,x,u); %Calculate the integrand of the cost function
integralCost = dt*integrand*weights; %Trapazoidal integration
end
% Compute the cost at the boundaries of the trajectory
if isempty(bndObj)
bndCost = 0;
else
t0 = t(1);
tF = t(end);
x0 = x(:,1);
xF = x(:,end);
bndCost = bndObj(t0,x0,tF,xF);
end
cost = bndCost + integralCost;
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [c, ceq] = myConstraint(z,pack,dynFun, pathCst, bndCst, defectCst)
%
% This function unpacks the decision variables, computes the defects along
% the trajectory, and then evaluates the user-defined constraint functions.
%
% INPUTS:
% z = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% dynFun = user-defined dynamics function
% pathCst = user-defined constraints along the path
% endCst = user-defined constraints at the boundaries
%
% OUTPUTS:
% c = inequality constraints to be passed to fmincon
% ceq = equality constraints to be passed to fmincon
%
[t,x,u] = unPackDecVar(z,pack);
%%%% Compute defects along the trajectory:
dt = (t(end)-t(1))/(length(t)-1);
f = dynFun(t,x,u);
defects = defectCst(dt,x,f);
%%%% Call user-defined constraints and pack up:
[c, ceq] = collectConstraints(t,x,u,defects, pathCst, bndCst);
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
%%%% Additional Sub-Functions for Gradients %%%%
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function gradInfo = grad_computeInfo(pack)
%
% This function computes the matrix dimensions and indicies that are used
% to map the gradients from the user functions to the gradients needed by
% fmincon. The key difference is that the gradients in the user functions
% are with respect to their input (t,x,u) or (t0,x0,tF,xF), while the
% gradients for fmincon are with respect to all decision variables.
%
% INPUTS:
% nDeVar = number of decision variables
% pack = details about packing and unpacking the decision variables
% .nTime
% .nState
% .nControl
%
% OUTPUTS:
% gradInfo = details about how to transform gradients
%
nTime = pack.nTime;
nState = pack.nState;
nControl = pack.nControl;
nDecVar = 2 + nState*nTime + nControl*nTime;
zIdx = 1:nDecVar;
gradInfo.nDecVar = nDecVar;
[tIdx, xIdx, uIdx] = unPackDecVar(zIdx,pack);
gradInfo.tIdx = tIdx([1,end]);
gradInfo.xuIdx = [xIdx;uIdx];
%%%% Compute gradients of time:
% alpha = (0..N-1)/(N-1)
% t = alpha*tUpp + (1-alpha)*tLow
alpha = (0:(nTime-1))/(nTime-1);
gradInfo.alpha = [1-alpha; alpha];
if (gradInfo.tIdx(1)~=1 || gradInfo.tIdx(end)~=2)
error('The first two decision variables must be the initial and final time')
end
gradInfo.dtGrad = [-1; 1]/(nTime-1);
%%%% Compute gradients of state
gradInfo.xGrad = zeros(nState,nTime,nDecVar);
for iTime=1:nTime
for iState=1:nState
gradInfo.xGrad(iState,iTime,xIdx(iState,iTime)) = 1;
end
end
%%%% For unpacking the boundary constraints and objective:
gradInfo.bndIdxMap = [tIdx(1); xIdx(:,1); tIdx(end); xIdx(:,end)];
end
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function [c, ceq, cGrad, ceqGrad] = grad_collectConstraints(t,x,u,defects, defectsGrad, pathCst, bndCst, gradInfo)
% [c, ceq, cGrad, ceqGrad] = grad_collectConstraints(t,x,u,defects, defectsGrad, pathCst, bndCst, gradInfo)
%
% OptimTraj utility function.
%
% Collects the defects, calls user-defined constraints, and then packs
% everything up into a form that is good for fmincon. Additionally, it
% reshapes and packs up the gradients of these constraints.
%
% INPUTS:
% t = time vector
% x = state matrix
% u = control matrix
% defects = defects matrix
% pathCst = user-defined path constraint function
% bndCst = user-defined boundary constraint function
%
% OUTPUTS:
% c = inequality constraint for fmincon
% ceq = equality constraint for fmincon
%
ceq_dyn = reshape(defects,numel(defects),1);
ceq_dynGrad = grad_flattenPathCst(defectsGrad);
%%%% Compute the user-defined constraints:
if isempty(pathCst)
c_path = [];
ceq_path = [];
c_pathGrad = [];
ceq_pathGrad = [];
else
[c_pathRaw, ceq_pathRaw, c_pathGradRaw, ceq_pathGradRaw] = pathCst(t,x,u);
c_path = reshape(c_pathRaw,numel(c_pathRaw),1);
ceq_path = reshape(ceq_pathRaw,numel(ceq_pathRaw),1);
c_pathGrad = grad_flattenPathCst(grad_reshapeContinuous(c_pathGradRaw,gradInfo));
ceq_pathGrad = grad_flattenPathCst(grad_reshapeContinuous(ceq_pathGradRaw,gradInfo));
end
if isempty(bndCst)
c_bnd = [];
ceq_bnd = [];
c_bndGrad = [];
ceq_bndGrad = [];
else
t0 = t(1);
tF = t(end);
x0 = x(:,1);
xF = x(:,end);
[c_bnd, ceq_bnd, c_bndGradRaw, ceq_bndGradRaw] = bndCst(t0,x0,tF,xF);
c_bndGrad = grad_reshapeBoundary(c_bndGradRaw,gradInfo);
ceq_bndGrad = grad_reshapeBoundary(ceq_bndGradRaw,gradInfo);
end
%%%% Pack everything up:
c = [c_path;c_bnd];
ceq = [ceq_dyn; ceq_path; ceq_bnd];
cGrad = [c_pathGrad;c_bndGrad]';
ceqGrad = [ceq_dynGrad; ceq_pathGrad; ceq_bndGrad]';
end
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function C = grad_flattenPathCst(CC)
%
% This function takes a path constraint and reshapes the first two
% dimensions so that it can be passed to fmincon
%
if isempty(CC)
C = [];
else
[n1,n2,n3] = size(CC);
C = reshape(CC,n1*n2,n3);
end
end
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function CC = grad_reshapeBoundary(C,gradInfo)
%
% This function takes a boundary constraint or objective from the user
% and expands it to match the full set of decision variables
%
CC = zeros(size(C,1),gradInfo.nDecVar);
CC(:,gradInfo.bndIdxMap) = C;
end
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function grad = grad_reshapeContinuous(gradRaw,gradInfo)
% grad = grad_reshapeContinuous(gradRaw,gradInfo)
%
% OptimTraj utility function.
%
% This function converts the raw gradients from the user function into
% gradients with respect to the decision variables.
%
% INPUTS:
% stateRaw = [nOutput,nInput,nTime]
%
% OUTPUTS:
% grad = [nOutput,nTime,nDecVar]
%
if isempty(gradRaw)
grad = [];
else
[nOutput, ~, nTime] = size(gradRaw);
grad = zeros(nOutput,nTime,gradInfo.nDecVar);
% First, loop through and deal with time.
timeGrad = gradRaw(:,1,:); timeGrad = permute(timeGrad,[1,3,2]);
for iOutput=1:nOutput
A = ([1;1]*timeGrad(iOutput,:)).*gradInfo.alpha;
grad(iOutput,:,gradInfo.tIdx) = permute(A,[3,2,1]);
end
% Now deal with state and control:
for iOutput=1:nOutput
for iTime=1:nTime
B = gradRaw(iOutput,2:end,iTime);
grad(iOutput,iTime,gradInfo.xuIdx(:,iTime)) = permute(B,[3,1,2]);
end
end
end
end
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function [cost, costGrad] = myObjGrad(z,pack,pathObj,bndObj,weights,gradInfo)
%
% This function unpacks the decision variables, sends them to the
% user-defined objective functions, and then returns the final cost
%
% INPUTS:
% z = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% pathObj = user-defined integral objective function
% endObj = user-defined end-point objective function
%
% OUTPUTS:
% cost = scale cost for this set of decision variables
%
%Unpack the decision variables:
[t,x,u] = unPackDecVar(z,pack);
% Time step for integration:
dt = (t(end)-t(1))/(length(t)-1);
dtGrad = gradInfo.dtGrad;
nTime = length(t);
nState = size(x,1);
nControl = size(u,1);
nDecVar = length(z);
% Compute the cost integral along the trajectory
if isempty(pathObj)
integralCost = 0;
integralCostGrad = zeros(nState+nControl,1);
else
% Objective function integrand and gradients:
[obj, objGradRaw] = pathObj(t,x,u);
nInput = size(objGradRaw,1);
objGradRaw = reshape(objGradRaw,1,nInput,nTime);
objGrad = grad_reshapeContinuous(objGradRaw,gradInfo);
% integral objective function
unScaledIntegral = obj*weights;
integralCost = dt*unScaledIntegral;
% Gradient of integral objective function
dtGradTerm = zeros(1,nDecVar);
dtGradTerm(1) = dtGrad(1)*unScaledIntegral;
dtGradTerm(2) = dtGrad(2)*unScaledIntegral;
objGrad = reshape(objGrad,nTime,nDecVar);
integralCostGrad = ...
dtGradTerm + ...
dt*sum(objGrad.*(weights*ones(1,nDecVar)),1);
end
% Compute the cost at the boundaries of the trajectory
if isempty(bndObj)
bndCost = 0;
bndCostGrad = zeros(1,nDecVar);
else
t0 = t(1);
tF = t(end);
x0 = x(:,1);
xF = x(:,end);
[bndCost, bndCostGradRaw] = bndObj(t0,x0,tF,xF);
bndCostGrad = grad_reshapeBoundary(bndCostGradRaw,gradInfo);
end
% Cost function
cost = bndCost + integralCost;
% Gradients
costGrad = bndCostGrad + integralCostGrad;
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [c, ceq, cGrad, ceqGrad] = myCstGrad(z,pack,dynFun, pathCst, bndCst, defectCst, gradInfo)
%
% This function unpacks the decision variables, computes the defects along
% the trajectory, and then evaluates the user-defined constraint functions.
%
% INPUTS:
% z = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% dynFun = user-defined dynamics function
% pathCst = user-defined constraints along the path
% endCst = user-defined constraints at the boundaries
%
% OUTPUTS:
% c = inequality constraints to be passed to fmincon
% ceq = equality constraints to be passed to fmincon
%
%Unpack the decision variables:
[t,x,u] = unPackDecVar(z,pack);
% Time step for integration:
dt = (t(end)-t(1))/(length(t)-1);
dtGrad = gradInfo.dtGrad;
% Gradient of the state with respect to decision variables
xGrad = gradInfo.xGrad;
%%%% Compute defects along the trajectory:
[f, fGradRaw] = dynFun(t,x,u);
fGrad = grad_reshapeContinuous(fGradRaw,gradInfo);
[defects, defectsGrad] = defectCst(dt,x,f,...
dtGrad, xGrad, fGrad);
% Compute gradients of the user-defined constraints and then pack up:
[c, ceq, cGrad, ceqGrad] = grad_collectConstraints(t,x,u,...
defects, defectsGrad, pathCst, bndCst, gradInfo);
end
|
github
|
xuhuairuogu/OptimTraj-master
|
chebyshev.m
|
.m
|
OptimTraj-master/chebyshev.m
| 10,613 |
utf_8
|
d197906d586572ca06c189288281ded9
|
function soln = chebyshev(problem)
% soln = chebyshev(problem)
%
% This function transcribes a trajectory optimization problem Chebyshev
% orthogonal polynomials for basis functions. This is an orthogonal
% collocation method, where the entire trajectory is represented as a
% single polynomial. It is for problems where the solution can be
% gaurenteed to be smooth to the same degree as the order of the underlying
% polynomial (nColPts-1).
%
% The technique is described in detail in the paper:
%
% " A Chebyshev Technique for Solving Nonlinear Optimal Control Problems"
% ISSS Trans. Automatic Control, 1988
% by: Jacques Vlassenbroeck and Rene Van Dooren
%
% My implementation for computation of the differentiation matrix,
% quadrature rules, and interpolation are based on the following:
%
% "Barycentric Lagrange Interpolation"
% Siam Review, 2004
% Publisher: Society for Industrial and Applied Mathematics
% by: Jean-Paul Berrut and Lloyd N. Trefethen
%
% "Approximation Theory and Approximation Practice"
% Textbook by Lloyd N. Trefethen
%
% "Chebfun" Matlab toolbox
% Website: http://www.chebfun.org/
% by Lloyd N. Trefethen et al.
%
% For details on the input and output, see the help file for optimTraj.m
%
% Method specific parameters:
%
% problem.options.method = 'chebyshev'
% problem.options.chebyshev = struct with method parameters:
% .nColPts = number of collocation points
%
%To make code more readable
G = problem.guess;
B = problem.bounds;
F = problem.func;
Opt = problem.options;
nColPts = Opt.chebyshev.nColPts; %Number of grid points for transcription
% Print out some solver info if desired:
if Opt.verbose > 0
disp(' -> Transcription via Chebyshev orthogonal collocation');
fprintf(' nColPts = %d \n', nColPts);
end
% Compute the parameters for the ORTHogonal polynomial, in this case the
% Chebyshev polynomial roots, quadrature weights, interpolation weights,
% and the differentiation matrix.
try
[orth.xx, orth.ww, orth.vv] = chebpts(nColPts);
catch ME
error('Missing dependency: chebfun (http://www.chebfun.org/) ');
end
orth.D = getDifferentiationMatrix(orth.xx,orth.vv);
% Interpolate the guess at the chebyshev-points for transcription:
guess.tSpan = G.time([1,end]);
guess.time = chebpts(nColPts,guess.tSpan)';
guess.state = interp1(G.time', G.state', guess.time')';
guess.control = interp1(G.time', G.control', guess.time')';
[zGuess, pack] = packDecVar(guess.time, guess.state, guess.control);
% Unpack all bounds:
dummyMatrix = zeros(1,nColPts-2); %This just needs to be the right size
tLow = [B.initialTime.low, dummyMatrix, B.finalTime.low];
xLow = [B.initialState.low, B.state.low*ones(1,nColPts-2), B.finalState.low];
uLow = B.control.low*ones(1,nColPts);
zLow = packDecVar(tLow,xLow,uLow);
tUpp = [B.initialTime.upp, dummyMatrix, B.finalTime.upp];
xUpp = [B.initialState.upp, B.state.upp*ones(1,nColPts-2), B.finalState.upp];
uUpp = B.control.upp*ones(1,nColPts);
zUpp = packDecVar(tUpp,xUpp,uUpp);
%%%% Set up problem for fmincon:
P.objective = @(z)( ...
myObjective(z, pack, F.pathObj, F.bndObj, orth) );
P.nonlcon = @(z)( ...
myConstraint(z, pack, F.dynamics, F.pathCst, F.bndCst, orth) );
P.x0 = zGuess;
P.lb = zLow;
P.ub = zUpp;
P.Aineq = []; P.bineq = [];
P.Aeq = []; P.beq = [];
P.options = Opt.nlpOpt;
P.solver = 'fmincon';
%%%% Call fmincon to solve the non-linear program (NLP)
tic;
[zSoln, objVal,exitFlag,output] = fmincon(P);
[tSoln,xSoln,uSoln] = unPackDecVar(zSoln,pack,orth);
nlpTime = toc;
%%%% Store the results:
soln.grid.time = tSoln;
soln.grid.state = xSoln;
soln.grid.control = uSoln;
%%%% Rescale the points:
dSoln = tSoln([1,end]); %Domain of the final solution
xxSoln = orthScale(orth,dSoln);
soln.interp.state = @(t)( barycentricInterpolate(t', xSoln',xxSoln,orth.vv)' );
soln.interp.control = @(t)( barycentricInterpolate(t', uSoln',xxSoln,orth.vv)' );
soln.info = output;
soln.info.nlpTime = nlpTime;
soln.info.exitFlag = exitFlag;
soln.info.objVal = objVal;
soln.problem = problem; % Return the fully detailed problem struct
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
%%%% SUB FUNCTIONS %%%%
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [z,pack] = packDecVar(t,x,u)
%
% This function collapses the time (t), state (x)
% and control (u) matricies into a single vector
%
% INPUTS:
% t = [1, nTime] = time vector (grid points)
% x = [nState, nTime] = state vector at each grid point
% u = [nControl, nTime] = control vector at each grid point
%
% OUTPUTS:
% z = column vector of 2 + nTime*(nState+nControl) decision variables
% pack = details about how to convert z back into t,x, and u
% .nTime
% .nState
% .nControl
%
nTime = length(t);
nState = size(x,1);
nControl = size(u,1);
tSpan = [t(1); t(end)];
xCol = reshape(x, nState*nTime, 1);
uCol = reshape(u, nControl*nTime, 1);
z = [tSpan;xCol;uCol];
pack.nTime = nTime;
pack.nState = nState;
pack.nControl = nControl;
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [t,x,u,w] = unPackDecVar(z,pack,orth)
%
% This function unpacks the decision variables for
% trajectory optimization into the time (t),
% state (x), and control (u) matricies
%
% INPUTS:
% z = column vector of 2 + nTime*(nState+nControl) decision variables
% pack = details about how to convert z back into t,x, and u
% .nTime
% .nState
% .nControl
%
% OUTPUTS:
% t = [1, nTime] = time vector (grid points)
% x = [nState, nTime] = state vector at each grid point
% u = [nControl, nTime] = control vector at each grid point
% w = [1, nTime] = weights for clenshaw-curtis quadrature
%
nTime = pack.nTime;
nState = pack.nState;
nControl = pack.nControl;
nx = nState*nTime;
nu = nControl*nTime;
[t, w] = orthScale(orth,[z(1),z(2)]);
t = t';
x = reshape(z((2+1):(2+nx)),nState,nTime);
u = reshape(z((2+nx+1):(2+nx+nu)),nControl,nTime);
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function cost = myObjective(z,pack,pathObj,bndObj,cheb)
%
% This function unpacks the decision variables, sends them to the
% user-defined objective functions, and then returns the final cost
%
% INPUTS:
% z = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% pathObj = user-defined integral objective function
% endObj = user-defined end-point objective function
%
% OUTPUTS:
% cost = scale cost for this set of decision variables
%
[t,x,u,w] = unPackDecVar(z,pack,cheb);
% Compute the cost integral along trajectory
if isempty(pathObj)
integralCost = 0;
else
integrand = pathObj(t,x,u); %Calculate the integrand of the cost function
integralCost = dot(w,integrand); %Clenshw-curtise quadrature
end
% Compute the cost at the boundaries of the trajectory
if isempty(bndObj)
bndCost = 0;
else
t0 = t(1);
tF = t(end);
x0 = x(:,1);
xF = x(:,end);
bndCost = bndObj(t0,x0,tF,xF);
end
cost = bndCost + integralCost;
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [c, ceq] = myConstraint(z,pack,dynFun, pathCst, bndCst, orth)
%
% This function unpacks the decision variables, computes the defects along
% the trajectory, and then evaluates the user-defined constraint functions.
%
% INPUTS:
% z = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% dynFun = user-defined dynamics function
% pathCst = user-defined constraints along the path
% endCst = user-defined constraints at the boundaries
%
% OUTPUTS:
% c = inequality constraints to be passed to fmincon
% ceq = equality constraints to be passed to fmincon
%
[t,x,u] = unPackDecVar(z,pack,orth);
%%%% Enforce the dynamics:
% Analytic differentiation of the trajectory at chebyshev points:
d = t([1,end]); %Domain of the trajectory
[~,~,D] = orthScale(orth,d); %Scale the differentiation matrix
dxFun = (D*(x'))'; %Differentiate trajectory
% Derivative, according to the dynamics function:
dxDyn = dynFun(t,x,u);
% Add a constraint that both versions of the derivative must match:
defects = dxFun - dxDyn;
%%%% Call user-defined constraints and pack up:
[c, ceq] = collectConstraints(t,x,u,defects, pathCst, bndCst);
end
function [x,w,D] = orthScale(orth,d)
% [x,w,D] = orthScale(orth,d)
%
% This function scales the chebyshev points to an arbitrary interval
%
% INPUTS:
% xx = chebyshev points on the domain [-1,1]
% ww = chebysehv weights on the domain [-1,1]
% d = [low, upp] = new domain
%
% OUTPUTS:
% x = chebyshev points on the new domain d
% w = chebyshev weights on the new domain d
%
shift = 0.5*(d(1) + d(2));
scale = 0.5*(d(2) - d(1));
x = scale*orth.xx + shift;
if nargout > 1
w = orth.ww*scale;
end
if nargout > 2
D = orth.D/scale;
end
end
function D = getDifferentiationMatrix(x,v,d)
% D = getDifferentiationMatrix(x,v,d)
%
%
% INPUTS:
% x = [n,1] = vector of roots of the orthogonal polynomial of interest
% v = [n,1] = vector of barycentric weights corresponding to each root
% d = [1,2] = domain of the polynomial (optional)
%
% OUTPUTS:
% D = [n,n] = differentiation matrix such that dy/dx = D*y @ points in x
%
% NOTES:
% Reference:
% 1) ChebFun (http://www.chebfun.org/)
% 2) "Barycentric Lagrange Interpolation" SIAM Review 2004
% Jean-Paul Berrut and Lloyd N. Trefethen
%
% Inputs: x and v are typically produced by a call to any of:
% chebpts, trigpts, legpts, jacpts, lagpts, hermpts, lobpts, radaupts
%
if nargin == 2
d = [-1,1];
end
n = length(x);
D = zeros(n,n);
for i=1:n
D(i,:) = (v/v(i))./(x(i)-x);
D(i,i) = 0;
D(i,i) = -sum(D(i,:));
end
D = 2*D/(d(2)-d(1));
end
function y = barycentricInterpolate(x,yk,xk,vk)
% y = barycentricInterpolate(x,yk,xk,vk)
%
% Interpolates an orthogonal polynomial using barycentric interpolation
%
% INPUTS:
% x = [nTime, 1] = vector of points to evaluate polynomial at
% yk = [nGrid, nOut] = value of the function to be interpolated at each
% grid point
% xk = [nGrid, 1] = roots of orthogonal polynomial
% vk = [nGrid, 1] = barycentric interpolation weights
%
% OUTPUTS:
% y = [nTime, nOut] = value of the function at the desired points
%
% NOTES:
% xk and yk should be produced by chebfun (help chebpts)
%
nOut = size(yk,2);
nTime = length(x);
y = zeros(nTime, nOut);
for i=1:nOut
y(:,i) = bary(x,yk(:,i),xk,vk);
end
end
|
github
|
xuhuairuogu/OptimTraj-master
|
hermiteSimpson.m
|
.m
|
OptimTraj-master/hermiteSimpson.m
| 11,560 |
utf_8
|
690c510dbe95d1ee31b9a2c2fcda28f8
|
function soln = hermiteSimpson(problem)
% soln = hermiteSimpson(problem)
%
% This function transcribes a trajectory optimization problem using the
% Hermite-Simpson (Seperated) method for enforcing the dynamics. It can be
% found in chapter four of Bett's book:
%
% John T. Betts, 2001
% Practical Methods for Optimal Control Using Nonlinear Programming
%
% For details on the input and output, see the help file for optimTraj.m
%
% Method specific parameters:
%
% problem.options.method = 'hermiteSimpson'
% problem.options.hermiteSimpson = struct with method parameters:
% .nSegment = number of trajectory segments
%
% This transcription method is compatable with analytic gradients. To
% enable this option, set:
% problem.nlpOpt.GradObj = 'on'
% problem.nlpOpt.GradConstr = 'on'
%
% Then the user-provided functions must provide gradients. The modified
% function templates are as follows:
%
% [dx, dxGrad] = dynamics(t,x,u)
% dx = [nState, nTime] = dx/dt = derivative of state wrt time
% dxGrad = [nState, 1+nx+nu, nTime]
%
% [dObj, dObjGrad] = pathObj(t,x,u)
% dObj = [1, nTime] = integrand from the cost function
% dObjGrad = [1+nx+nu, nTime]
%
% [c, ceq, cGrad, ceqGrad] = pathCst(t,x,u)
% c = [nCst, nTime] = column vector of inequality constraints ( c <= 0 )
% ceq = [nCstEq, nTime] = column vector of equality constraints ( c == 0 )
% cGrad = [nCst, 1+nx+nu, nTime];
% ceqGrad = [nCstEq, 1+nx+nu, nTime];
%
% [obj, objGrad] = bndObj(t0,x0,tF,xF)
% obj = scalar = objective function for boundry points
% objGrad = [1+nx+1+nx, 1]
%
% [c, ceq, cGrad, ceqGrad] = bndCst(t0,x0,tF,xF)
% c = [nCst,1] = column vector of inequality constraints ( c <= 0 )
% ceq = [nCstEq,1] = column vector of equality constraints ( c == 0 )
% cGrad = [nCst, 1+nx+1+nx];
% ceqGrad = [nCstEq, 1+nx+1+nx];
%
% NOTES:
%
% If analytic gradients are used, then the sparsity pattern is returned
% in the struct: soln.info.sparsityPattern. View it using spy().
%
% Each segment needs an additional data point in the middle, thus:
nGrid = 2*problem.options.hermiteSimpson.nSegment+1;
% Print out some solver info if desired:
if problem.options.verbose > 0
fprintf(' -> Transcription via Hermite-Simpson method, nSegment = %d\n',...
problem.options.hermiteSimpson.nSegment);
end
%%%% Method-specific details to pass along to solver:
%Simpson quadrature for integration of the cost function:
problem.func.weights = (2/3)*ones(nGrid,1);
problem.func.weights(2:2:end) = 4/3;
problem.func.weights([1,end]) = 1/3;
% Hermite-Simpson calculation of defects:
problem.func.defectCst = @computeDefects;
%%%% The key line - solve the problem by direct collocation:
soln = directCollocation(problem);
% Use method-consistent interpolation
tSoln = soln.grid.time;
xSoln = soln.grid.state;
uSoln = soln.grid.control;
fSoln = problem.func.dynamics(tSoln,xSoln,uSoln);
soln.interp.state = @(t)( pwPoly3(tSoln,xSoln,fSoln,t) );
soln.interp.control = @(t)(pwPoly2(tSoln,uSoln,t));
% Interpolation for checking collocation constraint along trajectory:
% collocation constraint = (dynamics) - (derivative of state trajectory)
soln.interp.collCst = @(t)( ...
problem.func.dynamics(t, soln.interp.state(t), soln.interp.control(t))...
- pwPoly2(tSoln,fSoln,t) );
% Use multi-segment simpson quadrature to estimate the absolute local error
% along the trajectory.
absColErr = @(t)(abs(soln.interp.collCst(t)));
nSegment = problem.options.hermiteSimpson.nSegment;
nState = size(xSoln,1);
quadTol = 1e-12; %Compute quadrature to this tolerance
soln.info.error = zeros(nState,nSegment);
for i=1:nSegment
idx = 2*i + [-1,1];
soln.info.error(:,i) = rombergQuadrature(absColErr,tSoln([idx(1), idx(2)]),quadTol);
end
soln.info.maxError = max(max(soln.info.error));
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
%%%% SUB FUNCTIONS %%%%
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [defects, defectsGrad] = computeDefects(dt,x,f,dtGrad,xGrad,fGrad)
%
% This function computes the defects that are used to enforce the
% continuous dynamics of the system along the trajectory.
%
% INPUTS:
% dt = time step (scalar)
% x = [nState, nTime] = state at each grid-point along the trajectory
% f = [nState, nTime] = dynamics of the state along the trajectory
% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
% dtGrad = [2,1] = gradient of time step with respect to [t0; tF]
% xGrad = [nState,nTime,nDecVar] = gradient of trajectory wrt dec vars
% fGrad = [nState,nTime,nDecVar] = gradient of dynamics wrt dec vars
%
% OUTPUTS:
% defects = [nState, nTime-1] = error in dynamics along the trajectory
% defectsGrad = [nState, nTime-1, nDecVars] = gradient of defects
%
nTime = size(x,2);
nState = size(x,1);
iLow = 1:2:(nTime-1);
iMid = iLow + 1;
iUpp = iMid + 1;
xLow = x(:,iLow);
xMid = x(:,iMid);
xUpp = x(:,iUpp);
fLow = f(:,iLow);
fMid = f(:,iMid);
fUpp = f(:,iUpp);
% Mid-point constraint (Hermite)
defectMidpoint = xMid - (xUpp+xLow)/2 - dt*(fLow-fUpp)/4;
% Interval constraint (Simpson)
defectInterval = xUpp - xLow - dt*(fUpp + 4*fMid + fLow)/3;
% Pack up all defects: Arrnage for bandedness
defects = zeros(nState,nTime-1);
defects(:,iLow) = defectInterval;
defects(:,iMid) = defectMidpoint;
%%%% Gradient Calculations:
if nargout == 2
xLowGrad = xGrad(:,iLow,:);
xMidGrad = xGrad(:,iMid,:);
xUppGrad = xGrad(:,iUpp,:);
fLowGrad = fGrad(:,iLow,:);
fMidGrad = fGrad(:,iMid,:);
fUppGrad = fGrad(:,iUpp,:);
% Mid-point constraint (Hermite)
dtGradTerm = zeros(size(xMidGrad));
dtGradTerm(:,:,1) = -dtGrad(1)*(fLow-fUpp)/4;
dtGradTerm(:,:,2) = -dtGrad(2)*(fLow-fUpp)/4;
defectMidpointGrad = xMidGrad - (xUppGrad+xLowGrad)/2 + dtGradTerm + ...
- dt*(fLowGrad-fUppGrad)/4;
% Interval constraint (Simpson)
dtGradTerm = zeros(size(xUppGrad));
dtGradTerm(:,:,1) = -dtGrad(1)*(fUpp + 4*fMid + fLow)/3;
dtGradTerm(:,:,2) = -dtGrad(2)*(fUpp + 4*fMid + fLow)/3;
defectIntervalGrad = xUppGrad - xLowGrad + dtGradTerm + ...
- dt*(fUppGrad + 4*fMidGrad + fLowGrad)/3;
%Pack up the gradients of the defects:
% organize defect constraints for bandned structure
defectsGrad = zeros(nState,nTime-1,size(defectMidpointGrad,3));
defectsGrad(:,iLow,:) = defectIntervalGrad;
defectsGrad(:,iMid,:) = defectMidpointGrad;
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Functions for interpolation of the control solution
%
function x = pwPoly2(tGrid,xGrid,t)
% x = pwPoly2(tGrid,xGrid,t)
%
% This function does piece-wise quadratic interpolation of a set of data,
% given the function value at the edges and midpoint of the interval of
% interest.
%
% INPUTS:
% tGrid = [1, 2*n-1] = time grid, knot idx = 1:2:end
% xGrid = [m, 2*n-1] = function at each grid point in tGrid
% t = [1, k] = vector of query times (must be contained within tGrid)
%
% OUTPUTS:
% x = [m, k] = function value at each query time
%
% NOTES:
% If t is out of bounds, then all corresponding values for x are replaced
% with NaN
%
nGrid = length(tGrid);
if mod(nGrid-1,2)~=0 || nGrid < 3
error('The number of grid-points must be odd and at least 3');
end
% Figure out sizes
n = floor((length(tGrid)-1)/2);
m = size(xGrid,1);
k = length(t);
x = zeros(m, k);
% Figure out which segment each value of t should be on
edges = [-inf, tGrid(1:2:end), inf];
[~, bin] = histc(t,edges);
% Loop over each quadratic segment
for i=1:n
idx = bin==(i+1);
if sum(idx) > 0
gridIdx = 2*(i-1) + [1,2,3];
x(:,idx) = quadInterp(tGrid(gridIdx),xGrid(:,gridIdx),t(idx));
end
end
% Replace any out-of-bounds queries with NaN
outOfBounds = bin==1 | bin==(n+2);
x(:,outOfBounds) = nan;
% Check for any points that are exactly on the upper grid point:
if sum(t==tGrid(end))>0
x(:,t==tGrid(end)) = xGrid(:,end);
end
end
function x = quadInterp(tGrid,xGrid,t)
%
% This function computes the interpolant over a single interval
%
% INPUTS:
% tGrid = [1, 3] = time grid
% xGrid = [m, 3] = function grid
% t = [1, p] = query times, spanned by tGrid
%
% OUTPUTS:
% x = [m, p] = function at query times
%
% Rescale the query points to be on the domain [-1,1]
t = 2*(t-tGrid(1))/(tGrid(3)-tGrid(1)) - 1;
% Compute the coefficients:
a = 0.5*(xGrid(:,3) + xGrid(:,1)) - xGrid(:,2);
b = 0.5*(xGrid(:,3)-xGrid(:,1));
c = xGrid(:,2);
% Evaluate the polynomial for each dimension of the function:
p = length(t);
m = size(xGrid,1);
x = zeros(m,p);
tt = t.^2;
for i=1:m
x(i,:) = a(i)*tt + b(i)*t + c(i);
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Functions for interpolation of the state solution
%
function x = pwPoly3(tGrid,xGrid,fGrid,t)
% x = pwPoly3(tGrid,xGrid,fGrid,t)
%
% This function does piece-wise quadratic interpolation of a set of data,
% given the function value at the edges and midpoint of the interval of
% interest.
%
% INPUTS:
% tGrid = [1, 2*n-1] = time grid, knot idx = 1:2:end
% xGrid = [m, 2*n-1] = function at each grid point in time
% fGrid = [m, 2*n-1] = derivative at each grid point in time
% t = [1, k] = vector of query times (must be contained within tGrid)
%
% OUTPUTS:
% x = [m, k] = function value at each query time
%
% NOTES:
% If t is out of bounds, then all corresponding values for x are replaced
% with NaN
%
nGrid = length(tGrid);
if mod(nGrid-1,2)~=0 || nGrid < 3
error('The number of grid-points must be odd and at least 3');
end
% Figure out sizes
n = floor((length(tGrid)-1)/2);
m = size(xGrid,1);
k = length(t);
x = zeros(m, k);
% Figure out which segment each value of t should be on
edges = [-inf, tGrid(1:2:end), inf];
[~, bin] = histc(t,edges);
% Loop over each quadratic segment
for i=1:n
idx = bin==(i+1);
if sum(idx) > 0
kLow = 2*(i-1) + 1;
kMid = kLow + 1;
kUpp = kLow + 2;
h = tGrid(kUpp)-tGrid(kLow);
xLow = xGrid(:,kLow);
fLow = fGrid(:,kLow);
fMid = fGrid(:,kMid);
fUpp = fGrid(:,kUpp);
alpha = t(idx) - tGrid(kLow);
x(:,idx) = cubicInterp(h,xLow, fLow, fMid, fUpp,alpha);
end
end
% Replace any out-of-bounds queries with NaN
outOfBounds = bin==1 | bin==(n+2);
x(:,outOfBounds) = nan;
% Check for any points that are exactly on the upper grid point:
if sum(t==tGrid(end))>0
x(:,t==tGrid(end)) = xGrid(:,end);
end
end
function x = cubicInterp(h,xLow, fLow, fMid, fUpp,del)
%
% This function computes the interpolant over a single interval
%
% INPUTS:
% h = time step (tUpp-tLow)
% xLow = function value at tLow
% fLow = derivative at tLow
% fMid = derivative at tMid
% fUpp = derivative at tUpp
% del = query points on domain [0, h]
%
% OUTPUTS:
% x = [m, p] = function at query times
%
%%% Fix matrix dimensions for vectorized calculations
nx = length(xLow);
nt = length(del);
xLow = xLow*ones(1,nt);
fLow = fLow*ones(1,nt);
fMid = fMid*ones(1,nt);
fUpp = fUpp*ones(1,nt);
del = ones(nx,1)*del;
a = (2.*(fLow - 2.*fMid + fUpp))./(3.*h.^2);
b = -(3.*fLow - 4.*fMid + fUpp)./(2.*h);
c = fLow;
d = xLow;
x = d + del.*(c + del.*(b + del.*a));
end
|
github
|
xuhuairuogu/OptimTraj-master
|
trapezoid.m
|
.m
|
OptimTraj-master/trapezoid.m
| 7,774 |
utf_8
|
d24e512cdcb2f48f403da6b41b881bcb
|
function soln = trapezoid(problem)
% soln = trapezoid(problem)
%
% This function transcribes a trajectory optimization problem using the
% trapezoid method for enforcing the dynamics. It can be found in chapter
% four of Bett's book:
%
% John T. Betts, 2001
% Practical Methods for Optimal Control Using Nonlinear Programming
%
% For details on the input and output, see the help file for optimTraj.m
%
% Method specific parameters:
%
% problem.options.method = 'trapezoid'
% problem.options.trapezoid = struct with method parameters:
% .nGrid = number of grid points to use for transcription
%
%
% This transcription method is compatable with analytic gradients. To
% enable this option, set:
% problem.nlpOpt.GradObj = 'on'
% problem.nlpOpt.GradConstr = 'on'
%
% Then the user-provided functions must provide gradients. The modified
% function templates are as follows:
%
% [dx, dxGrad] = dynamics(t,x,u)
% dx = [nState, nTime] = dx/dt = derivative of state wrt time
% dxGrad = [nState, 1+nx+nu, nTime]
%
% [dObj, dObjGrad] = pathObj(t,x,u)
% dObj = [1, nTime] = integrand from the cost function
% dObjGrad = [1+nx+nu, nTime]
%
% [c, ceq, cGrad, ceqGrad] = pathCst(t,x,u)
% c = [nCst, nTime] = column vector of inequality constraints ( c <= 0 )
% ceq = [nCstEq, nTime] = column vector of equality constraints ( c == 0 )
% cGrad = [nCst, 1+nx+nu, nTime];
% ceqGrad = [nCstEq, 1+nx+nu, nTime];
%
% [obj, objGrad] = bndObj(t0,x0,tF,xF)
% obj = scalar = objective function for boundry points
% objGrad = [1+nx+1+nx, 1]
%
% [c, ceq, cGrad, ceqGrad] = bndCst(t0,x0,tF,xF)
% c = [nCst,1] = column vector of inequality constraints ( c <= 0 )
% ceq = [nCstEq,1] = column vector of equality constraints ( c == 0 )
% cGrad = [nCst, 1+nx+1+nx];
% ceqGrad = [nCstEq, 1+nx+1+nx];
%
% NOTES:
%
% If analytic gradients are used, then the sparsity pattern is returned
% in the struct: soln.info.sparsityPattern. View it using spy().
%
% Print out some solver info if desired:
nGrid = problem.options.trapezoid.nGrid;
if problem.options.verbose > 0
fprintf(' -> Transcription via trapezoid method, nGrid = %d\n',nGrid);
end
%%%% Method-specific details to pass along to solver:
% Quadrature weights for trapezoid integration:
problem.func.weights = ones(nGrid,1);
problem.func.weights([1,end]) = 0.5;
% Trapazoid integration calculation of defects:
problem.func.defectCst = @computeDefects;
%%%% The key line - solve the problem by direct collocation:
soln = directCollocation(problem);
% Use piecewise linear interpolation for the control
tSoln = soln.grid.time;
xSoln = soln.grid.state;
uSoln = soln.grid.control;
soln.interp.control = @(t)( interp1(tSoln',uSoln',t')' );
% Use piecewise quadratic interpolation for the state:
fSoln = problem.func.dynamics(tSoln,xSoln,uSoln);
soln.interp.state = @(t)( bSpline2(tSoln,xSoln,fSoln,t) );
% Interpolation for checking collocation constraint along trajectory:
% collocation constraint = (dynamics) - (derivative of state trajectory)
soln.interp.collCst = @(t)( ...
problem.func.dynamics(t, soln.interp.state(t), soln.interp.control(t))...
- interp1(tSoln',fSoln',t')' );
% Use multi-segment simpson quadrature to estimate the absolute local error
% along the trajectory.
absColErr = @(t)(abs(soln.interp.collCst(t)));
nSegment = nGrid-1;
nState = size(xSoln,1);
quadTol = 1e-12; %Compute quadrature to this tolerance
soln.info.error = zeros(nState,nSegment);
for i=1:nSegment
soln.info.error(:,i) = rombergQuadrature(absColErr,tSoln([i,i+1]),quadTol);
end
soln.info.maxError = max(max(soln.info.error));
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
%%%% SUB FUNCTIONS %%%%
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [defects, defectsGrad] = computeDefects(dt,x,f,dtGrad,xGrad,fGrad)
%
% This function computes the defects that are used to enforce the
% continuous dynamics of the system along the trajectory.
%
% INPUTS:
% dt = time step (scalar)
% x = [nState, nTime] = state at each grid-point along the trajectory
% f = [nState, nTime] = dynamics of the state along the trajectory
% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
% dtGrad = [2,1] = gradient of time step with respect to [t0; tF]
% xGrad = [nState,nTime,nDecVar] = gradient of trajectory wrt dec vars
% fGrad = [nState,nTime,nDecVar] = gradient of dynamics wrt dec vars
%
% OUTPUTS:
% defects = [nState, nTime-1] = error in dynamics along the trajectory
% defectsGrad = [nState, nTime-1, nDecVars] = gradient of defects
%
nTime = size(x,2);
idxLow = 1:(nTime-1);
idxUpp = 2:nTime;
xLow = x(:,idxLow);
xUpp = x(:,idxUpp);
fLow = f(:,idxLow);
fUpp = f(:,idxUpp);
% This is the key line: (Trapazoid Rule)
defects = xUpp-xLow - 0.5*dt*(fLow+fUpp);
%%%% Gradient Calculations:
if nargout == 2
xLowGrad = xGrad(:,idxLow,:);
xUppGrad = xGrad(:,idxUpp,:);
fLowGrad = fGrad(:,idxLow,:);
fUppGrad = fGrad(:,idxUpp,:);
% Gradient of the defects: (chain rule!)
dtGradTerm = zeros(size(xUppGrad));
dtGradTerm(:,:,1) = -0.5*dtGrad(1)*(fLow+fUpp);
dtGradTerm(:,:,2) = -0.5*dtGrad(2)*(fLow+fUpp);
defectsGrad = xUppGrad - xLowGrad + dtGradTerm + ...
- 0.5*dt*(fLowGrad+fUppGrad);
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function x = bSpline2(tGrid,xGrid,fGrid,t)
% x = bSpline2(tGrid,xGrid,fGrid,t)
%
% This function does piece-wise quadratic interpolation of a set of data.
% The quadratic interpolant is constructed such that the slope matches on
% both sides of each interval, and the function value matches on the lower
% side of the interval.
%
% INPUTS:
% tGrid = [1, n] = time grid (knot points)
% xGrid = [m, n] = function at each grid point in tGrid
% fGrid = [m, n] = derivative at each grid point in tGrid
% t = [1, k] = vector of query times (must be contained within tGrid)
%
% OUTPUTS:
% x = [m, k] = function value at each query time
%
% NOTES:
% If t is out of bounds, then all corresponding values for x are replaced
% with NaN
%
[m,n] = size(xGrid);
k = length(t);
x = zeros(m, k);
% Figure out which segment each value of t should be on
[~, bin] = histc(t,[-inf,tGrid,inf]);
bin = bin - 1;
% Loop over each quadratic segment
for i=1:(n-1)
idx = i==bin;
if sum(idx) > 0
h = (tGrid(i+1)-tGrid(i));
xLow = xGrid(:,i);
fLow = fGrid(:,i);
fUpp = fGrid(:,i+1);
delta = t(idx) - tGrid(i);
x(:,idx) = bSpline2Core(h,delta,xLow,fLow,fUpp);
end
end
% Replace any out-of-bounds queries with NaN
outOfBounds = bin==0 | bin==(n+1);
x(:,outOfBounds) = nan;
% Check for any points that are exactly on the upper grid point:
if sum(t==tGrid(end))>0
x(:,t==tGrid(end)) = xGrid(:,end);
end
end
function x = bSpline2Core(h,delta,xLow,fLow,fUpp)
%
% This function computes the interpolant over a single interval
%
% INPUTS:
% alpha = fraction of the way through the interval
% xLow = function value at lower bound
% fLow = derivative at lower bound
% fUpp = derivative at upper bound
%
% OUTPUTS:
% x = [m, p] = function at query times
%
%Fix dimensions for matrix operations...
col = ones(size(delta));
row = ones(size(xLow));
delta = row*delta;
xLow = xLow*col;
fLow = fLow*col;
fUpp = fUpp*col;
fDel = (0.5/h)*(fUpp-fLow);
x = delta.*(delta.*fDel + fLow) + xLow;
end
|
github
|
xuhuairuogu/OptimTraj-master
|
rungeKutta.m
|
.m
|
OptimTraj-master/rungeKutta.m
| 39,686 |
utf_8
|
23640bdd822c19de616e744a6202c456
|
function soln = rungeKutta(problem)
% soln = rungeKutta(problem)
%
% This function transcribes a trajectory optimization problem using the
% multiple shooting, with 4th-order Runge Kutta integration
%
% See Bett's book for details on the method
%
% For details on the input and output, see the help file for optimTraj.m
%
% Method specific parameters:
%
% problem.options.method = 'rungeKutta'
% problem.options.rungeKutta = struct with method parameters:
% .nSegment = number of trajectory segments
% .nSubStep = number of sub-steps to use in each segment
% .adaptiveDerivativeCheck = 'off' by default. Set to 'on' to enable
% numerical checks on the analytic gradients, computed using the
% derivest package, rather than fmincon's internal checks.
% Derivest is slower, but more accurate than fmincon. Derivest
% can be downloaded from the Mathworks File Exchange, file id of
% 13490 - Adaptive Robust Numerical Differentation, John D-Errico
%
%
% NOTES:
%
% Code for computing analyic gradients of the Runge Kutta method was
% contributed by Will Wehner.
%
% If analytic gradients are used, then the sparsity pattern is returned
% in the struct: soln.info.sparsityPattern. View it using spy().
%
%
%To make code more readable
G = problem.guess;
B = problem.bounds;
F = problem.func;
Opt = problem.options;
% Figure out grid size:
nSegment = Opt.rungeKutta.nSegment;
nSubStep = Opt.rungeKutta.nSubStep;
nGridControl = 2*nSegment*nSubStep + 1;
nGridState = nSegment + 1;
% Print out some solver info if desired:
if Opt.verbose > 0
fprintf(' -> Transcription via 4th-order Runge-Kutta method \n');
fprintf(' nSegments = %d \n', nSegment);
fprintf(' nSubSteps = %d \n', nSubStep);
end
% Interpolate the guess at the transcription grid points for initial guess:
guess.tSpan = G.time([1,end]);
guess.tState = linspace(guess.tSpan(1), guess.tSpan(2), nGridState);
guess.tControl = linspace(guess.tSpan(1), guess.tSpan(2), nGridControl);
guess.state = interp1(G.time', G.state', guess.tState')';
guess.control = interp1(G.time', G.control', guess.tControl')';
[zGuess, pack] = packDecVar(guess.tSpan, guess.state, guess.control);
% Unpack all bounds:
tLow = [B.initialTime.low, B.finalTime.low];
xLow = [B.initialState.low, B.state.low*ones(1,nGridState-2), B.finalState.low];
uLow = B.control.low*ones(1,nGridControl);
zLow = packDecVar(tLow,xLow,uLow);
tUpp = [B.initialTime.upp, B.finalTime.upp];
xUpp = [B.initialState.upp, B.state.upp*ones(1,nGridState-2), B.finalState.upp];
uUpp = B.control.upp*ones(1,nGridControl);
zUpp = packDecVar(tUpp,xUpp,uUpp);
%%%% Set up problem for fmincon:
flagGradObj = strcmp(Opt.nlpOpt.GradObj,'on');
flagGradCst = strcmp(Opt.nlpOpt.GradConstr,'on');
if flagGradObj || flagGradCst
gradInfo = grad_computeInfo(pack);
end
if flagGradObj
P.objective = @(z)( ...
myObjGrad(z, pack, F.dynamics, F.pathObj, F.bndObj, gradInfo) ); %Analytic gradients
[~, objGradInit] = P.objective(zGuess);
sparsityPattern.objective = (objGradInit~=0)';
else
P.objective = @(z)( ...
myObjective(z, pack, F.dynamics, F.pathObj, F.bndObj) ); %Numerical gradients
end
if flagGradCst
P.nonlcon = @(z)( ...
myCstGrad(z, pack, F.dynamics, F.pathObj, F.pathCst, F.bndCst, gradInfo) ); %Analytic gradients
[~,~,cstIneqInit,cstEqInit] = P.nonlcon(zGuess);
sparsityPattern.equalityConstraint = (cstEqInit~=0)';
sparsityPattern.inequalityConstraint = (cstIneqInit~=0)';
else
P.nonlcon = @(z)( ...
myConstraint(z, pack, F.dynamics, F.pathObj, F.pathCst, F.bndCst) ); %Numerical gradients
end
% Check analytic gradients with DERIVEST package
if strcmp(Opt.rungeKutta.adaptiveDerivativeCheck,'on')
if exist('jacobianest','file')
runGradientCheck(zGuess, pack,F.dynamics, F.pathObj, F.bndObj, F.pathCst, F.bndCst, gradInfo);
Opt.nlpOpt.DerivativeCheck = []; %Disable built-in derivative check
else
Opt.rungeKutta.adaptiveDerivativeCheck = 'cannot find jacobianest.m';
disp('Warning: the derivest package is not on search path.');
disp(' --> Using fmincon''s built-in derivative checks.');
end
end
% Build the standard fmincon problem struct
P.x0 = zGuess;
P.lb = zLow;
P.ub = zUpp;
P.Aineq = []; P.bineq = [];
P.Aeq = []; P.beq = [];
P.solver = 'fmincon';
P.options = Opt.nlpOpt;
%%%% Call fmincon to solve the non-linear program (NLP)
tic;
[zSoln, objVal,exitFlag,output] = fmincon(P);
[tSpan,~,uSoln] = unPackDecVar(zSoln,pack);
nlpTime = toc;
%%%% Store the results:
[tGrid,xGrid,uGrid] = simulateSystem(zSoln, pack, F.dynamics, F.pathObj);
soln.grid.time = tGrid;
soln.grid.state = xGrid;
soln.grid.control = uGrid;
% Quadratic interpolation over each sub-step for the control:
tSoln = linspace(tSpan(1),tSpan(2),nGridControl);
soln.interp.control = @(t)( interp1(tSoln', uSoln', t','pchip')' );
% Cubic spline representation of the state over each substep:
dxGrid = F.dynamics(tGrid,xGrid,uGrid);
xSpline = pwch(tGrid, xGrid, dxGrid);
soln.interp.state = @(t)( ppval(xSpline,t) );
% General information about the optimization run
soln.info = output;
soln.info.nlpTime = nlpTime;
soln.info.exitFlag = exitFlag;
soln.info.objVal = objVal;
if flagGradCst || flagGradObj
soln.info.sparsityPattern = sparsityPattern;
end
soln.problem = problem; % Return the fully detailed problem struct
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
%%%% SUB FUNCTIONS %%%%
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [decVars,pack] = packDecVar(tSpan,state,control)
%
% This function collapses the time (t), state (x)
% and control (u) matricies into a single vector
%
% INPUTS:
% tSpan = [1, 2] = time bounds
% state = [nState, nGridState] = state vector at each grid point
% control = [nControl, nGridControl] = control vector at each grid point
%
% OUTPUTS:
% decVars = column vector of 2 + nState*nGridState + nControl*nGridControl) decision variables
% pack = details about how to convert z back into t,x, and u
% .nState
% .nGridState
% .nControl
% .nGridControl
%
% NOTES:
% nGridControl = 2*nSegment*nSubStep + 1;
% nGridState = nSegment + 1;
%
[nState, nGridState] = size(state);
[nControl, nGridControl] = size(control);
nSegment = nGridState - 1;
nSubStep = (nGridControl - 1)/(2*nSegment);
xCol = reshape(state, nState*nGridState, 1);
uCol = reshape(control, nControl*nGridControl, 1);
indz = 1:numel(control)+numel(state)+numel(tSpan);
% index of time in decVar
indt = 1:2;
% the z index of the first element of each state over time
indtemp = 2 + (1 : (nState + (2*nSubStep)*nControl ) : numel(control)+numel(state));
% remaining state elements at each time
indx = repmat(indtemp,nState,1) + cumsum(ones(nState,nGridState),1) - 1;
% index of control in decVar
indu = indz;
indu([indt(:);indx(:)])=[];
indu = reshape(indu,nControl,nGridControl);
% pack up decVars
decVars = zeros(numel(indz),1);
decVars(indt(:),1) = tSpan;
decVars(indx(:),1) = xCol;
decVars(indu(:),1) = uCol;
% pack structure
pack.nState = nState;
pack.nGridState = nGridState;
pack.nControl = nControl;
pack.nGridControl = nGridControl;
pack.nSegment = nGridState - 1;
pack.nSubStep = (nGridControl-1)/(2*pack.nSegment);
pack.indt = indt;
pack.indx = indx;
pack.indu = indu;
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [tSpan, state, control] = unPackDecVar(decVars,pack)
%
% This function unpacks the decision variables for
% trajectory optimization into the time (t),
% state (x), and control (u) matricies
%
% INPUTS:
% decVars = column vector of 2 + nState*nGridState + nControl*nGridControl) decision variables
% pack = details about how to convert z back into t,x, and u
% .nState
% .nGridState
% .nControl
% .nGridControl
%
% OUTPUTS:
% tSpan = [1, 2] = time bounds
% state = [nState, nGridState] = state vector at each grid point
% control = [nControl, nGridControl] = control vector at each grid point
%
tSpan = [decVars(1),decVars(2)];
% state = reshape(decVars((2+1):(2+nx)), pack.nState, pack.nGridState);
% control = reshape(decVars((2+nx+1):(2+nx+nu)), pack.nControl, pack.nGridControl);
state = decVars(pack.indx);
control = decVars(pack.indu);
% make sure x and u are returned as vectors, [nState,nTime] and
% [nControl,nTime]
state = reshape(state,pack.nState,pack.nGridState);
control = reshape(control,pack.nControl,pack.nGridControl);
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function cost = myObjective(decVars, pack,dynamics, pathObj, bndObj)
%
% This function unpacks the decision variables, sends them to the
% user-defined objective functions, and then returns the final cost
%
% INPUTS:
% decVars = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% dynamics = user-defined dynamics function handle
% pathObj = user-defined path-objective function
% bndObj = user-defined boundary objective function
%
% OUTPUTS:
% cost = scalar cost for this set of decision variables
%
%
% All of the real work happens inside this function:
[t,x,~,~,pathCost] = simulateSystem(decVars, pack, dynamics, pathObj);
% Compute the cost at the boundaries of the trajectory
if isempty(bndObj)
bndCost = 0;
else
t0 = t(1);
tF = t(end);
x0 = x(:,1);
xF = x(:,end);
bndCost = bndObj(t0,x0,tF,xF);
end
cost = bndCost + pathCost;
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [c, ceq] = myConstraint(decVars, pack, dynamics, pathObj, pathCst, bndCst)
%
% This function unpacks the decision variables, computes the defects along
% the trajectory, and then evaluates the user-defined constraint functions.
%
% INPUTS:
% decVars = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% dynamics = user-defined dynamics function handle
% pathObj = user-defined path-objective function
% pathCst = user-defined path-constraint function
% bndCst = user-defined boundary constraint function
%
% OUTPUTS:
% c = non-linear inequality constraint
% ceq = non-linear equatlity cosntraint
%
% NOTE:
% - path constraints are satisfied at the start and end of each sub-step
%
[t,x,u,defects] = simulateSystem(decVars, pack, dynamics, pathObj);
%%%% Call user-defined constraints and pack up:
[c, ceq] = collectConstraints(t,x,u,...
defects,...
pathCst, bndCst);
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [t,x,u,defects,pathCost] = simulateSystem(decVars, pack, dynFun, pathObj)
%
% This function does the real work of the transcription method. It
% simulates the system forward in time across each segment of the
% trajectory, computes the integral of the cost function, and then matches
% up the defects between the end of each segment and the start of the next.
%
% INPUTS:
% decVars = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% dynamics = user-defined dynamics function handle
% pathObj = user-defined path-objective function
%
% OUTPUTS:
% t = [1 x nGrid] = time vector for the edges of the sub-step grid
% x = [nState x nGrid] = state vector
% u = [nControl x nGrid] = control vector
% defects = [nState x nSegment] = defect matrix
% pathCost = scalar cost for the path integral
%
% NOTES:
% - nGrid = nSegment*nSubStep+1
% - This function is usually called twice for each combination of
% decision variables: once by the objective function and once by the
% constraint function. To keep the code fast I cache the old values and
% only recompute when the inputs change.
%
%%%% CODE OPTIMIZATION %%%%
%
% Prevents the same exact code from being called twice by caching the
% solution and reusing it when appropriate.
%
global RUNGE_KUTTA_t RUNGE_KUTTA_x RUNGE_KUTTA_u
global RUNGE_KUTTA_defects RUNGE_KUTTA_pathCost
global RUNGE_KUTTA_decVars
%
usePreviousValues = false;
if ~isempty(RUNGE_KUTTA_decVars)
if length(RUNGE_KUTTA_decVars) == length(decVars)
if ~any(RUNGE_KUTTA_decVars ~= decVars)
usePreviousValues = true;
end
end
end
%
if usePreviousValues
t = RUNGE_KUTTA_t;
x = RUNGE_KUTTA_x;
u = RUNGE_KUTTA_u;
defects = RUNGE_KUTTA_defects;
pathCost = RUNGE_KUTTA_pathCost;
else
%
%
%%%% END CODE OPTIMIZATION %%%%
[tSpan, state, control] = unPackDecVar(decVars,pack);
nState = pack.nState;
nSegment = pack.nSegment;
nSubStep = pack.nSubStep;
% NOTES:
% The following bit of code is a bit confusing, mostly due to the
% need for vectorization to make things run at a reasonable speed in
% Matlab. Part of the confusion comes because the decision variables
% include the state at the beginning of each segment, but the control
% at the beginning and middle of each substep - thus there are more
% control grid-points than state grid points. The calculations are
% vectorized over segments, but not sub-steps, since the result of
% one sub-step is required for the next.
% time, state, and control at the ends of each substep
nTime = 1+nSegment*nSubStep;
t = linspace(tSpan(1), tSpan(2), nTime);
x = zeros(nState, nTime);
u = control(:,1:2:end); % Control a the endpoints of each segment
uMid = control(:,2:2:end); %Control at the mid-points of each segment
c = zeros(1, nTime-1); %Integral cost for each segment
dt = (t(end)-t(1))/(nTime-1);
idx = 1:nSubStep:(nTime-1); %Indicies for the start of each segment
x(:,[idx,end]) = state; %Fill in the states that we already know
for iSubStep = 1:nSubStep
% March forward Runge-Kutta step
t0 = t(idx);
x0 = x(:,idx);
k0 = combinedDynamics(t0, x0, u(:,idx), dynFun,pathObj);
k1 = combinedDynamics(t0+0.5*dt, x0 + 0.5*dt*k0(1:nState,:), uMid(:,idx), dynFun,pathObj);
k2 = combinedDynamics(t0+0.5*dt, x0 + 0.5*dt*k1(1:nState,:), uMid(:,idx), dynFun,pathObj);
k3 = combinedDynamics(t0+dt, x0 + dt*k2(1:nState,:), u(:,idx+1), dynFun,pathObj);
z = (dt/6)*(k0 + 2*k1 + 2*k2 + k3); %Change over the sub-step
xNext = x0 + z(1:nState,:); %Next state
c(idx) = z(end,:); %Integral of the cost function over this step
if iSubStep == nSubStep %We've reached the end of the interval
% Compute the defect vector:
defects = xNext - x(:,idx+1);
else
% Store the state for next step in time
idx = idx+1; % <-- This is important!!
x(:,idx) = xNext;
end
end
pathCost = sum(c); %Sum up the integral cost over each segment
%%%% Cache results to use on the next call to this function.
RUNGE_KUTTA_t = t;
RUNGE_KUTTA_x = x;
RUNGE_KUTTA_u = u;
RUNGE_KUTTA_defects = defects;
RUNGE_KUTTA_pathCost = pathCost;
end
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function dz = combinedDynamics(t,x,u,dynFun,pathObj)
% dz = combinedDynamics(t,x,u,dynFun,pathObj)
%
% This function packages the dynamics and the cost function together so
% that they can be integrated at the same time.
%
% INPUTS:
% t = [1, nTime] = time vector (grid points)
% x = [nState, nTime] = state vector at each grid point
% u = [nControl, nTime] = control vector at each grid point
% dynamics(t,x,u) = dynamics function handle
% dx = [nState, nTime] = dx/dt = derivative of state wrt time
% pathObj(t,x,u) = integral cost function handle
% dObj = [1, nTime] = integrand from the cost function
%
% OUTPUTS:
% dz = [dx; dObj] = combined dynamics of state and cost
dx = dynFun(t,x,u);
if isempty(pathObj)
dc = zeros(size(t));
else
dc = pathObj(t,x,u);
end
dz = [dx;dc]; %Combine and return
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
%%%% Analytic Gradient Stuff %%%%
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function gradInfo = grad_computeInfo(pack)
%
% This function computes the matrix dimensions and indicies that are used
% to map the gradients from the user functions to the gradients needed by
% fmincon. The key difference is that the gradients in the user functions
% are with respect to their input (t,x,u) or (t0,x0,tF,xF), while the
% gradients for fmincon are with respect to all decision variables.
%
% INPUTS:
% nDeVar = number of decision variables
% pack = details about packing and unpacking the decision variables
% .nTime
% .nState
% .nControl
%
% OUTPUTS:
% gradInfo = details about how to transform gradients
%
%nTime = pack.nTime;
nState = pack.nState;
nGridState = pack.nGridState;
nControl = pack.nControl;
nGridControl = pack.nGridControl;
nDecVar = 2 + nState*nGridState + nControl*nGridControl;
zIdx = 1:nDecVar;
gradInfo.nDecVar = nDecVar;
[tIdx, xIdx, uIdx] = unPackDecVar(zIdx,pack);
gradInfo.tIdx = tIdx([1,end]);
gradInfo.xIdx = xIdx;
gradInfo.uIdx = uIdx;
nSegment = pack.nSegment;
nSubStep = pack.nSubStep;
% indices of decVars associated with u
indu = 1:2:(1+2*nSegment*nSubStep);
gradInfo.indu = uIdx(:,indu);
% indices of decVars associated with uMid
indumid = 2:2:(1+2*nSegment*nSubStep);
gradInfo.indumid = uIdx(:,indumid);
%%%% For unpacking the boundary constraints and objective:
gradInfo.bndIdxMap = [tIdx(1); xIdx(:,1); tIdx(end); xIdx(:,end)];
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [fail] = runGradientCheck(z_test, pack,dynamics, pathObj, bndObj, pathCst, bndCst, gradInfo)
%
% This function tests the analytic gradients of the objective and
% nonlinear constraints with the DERIVEST package. The finite difference
% calculations in matlab's optimization package were not sufficiently
% accurate.
%
GradientCheckTol = 1e-6; %Analytic gradients must match numerical within this bound
fail = 0;
fprintf('\n%s\n','____________________________________________________________')
fprintf('%s\n',' DerivativeCheck Information with DERIVEST Package ')
% analytic gradient
[~, dcost] = myObjGrad(z_test, pack, dynamics, pathObj, bndObj, gradInfo);
% check gradient with derivest package
deriv = gradest(@(z) myObjGrad(z, pack, dynamics, pathObj, bndObj, gradInfo),z_test);
% print largest difference in numerical and analytic gradients
fprintf('\n%s\n','Objective function derivatives:')
fprintf('%s\n','Maximum relative difference between user-supplied')
fprintf('%s %1.5e \n','and finite-difference derivatives = ',max(abs(dcost-deriv')))
if any(abs(dcost-deriv') > GradientCheckTol)
error('Objective gradient did not pass')
end
% analytic nonlinear constraints
[c, ceq,dc, dceq] = myCstGrad(z_test, pack, dynamics, pathObj, pathCst, bndCst, gradInfo);
% check nonlinear inequality constraints with 'jacobianest'
if ~isempty(c)
jac = jacobianest(@(z) myConstraint(z, pack, dynamics, pathObj, pathCst, bndCst),z_test);
% print largest difference in numerical and analytic gradients
fprintf('\n%s\n','Nonlinear inequality constraint function derivatives:')
fprintf('%s\n','Maximum relative difference between user-supplied')
fprintf('%s %1.5e \n','and finite-difference derivatives = ',max(max(abs(dc-jac'))))
if any(any(abs(dc - jac') > GradientCheckTol))
error('Nonlinear inequality constraint did not pass')
end
end
% check nonlinear equality constraints with 'jacobianest'
if ~isempty(ceq)
jac = jacobianest(@(z) myCstGradCheckEq(z, pack, dynamics, pathObj, pathCst, bndCst),z_test);
% print largest difference in numerical and analytic gradients
fprintf('\n%s\n','Nonlinear equality constraint function derivatives:')
fprintf('%s\n','Maximum relative difference between user-supplied')
fprintf('%s %1.5e \n','and finite-difference derivatives = ',max(max(abs(dceq-jac'))))
if any(any(abs(dceq - jac') > GradientCheckTol))
error('Nonlinear equality constraint did not pass')
end
end
fprintf('\n%s\n','DerivativeCheck successfully passed.')
fprintf('%s\n','____________________________________________________________')
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function ceq = myCstGradCheckEq(decVars, pack, dynamics, pathObj, pathCst, bndCst)
% This function is necessary for runGradientCheck function
% return only equality constraint (ceq) for use with jacobest.m
[t,x,u,defects] = simulateSystem(decVars, pack, dynamics, pathObj);
%%%% Call user-defined constraints and pack up:
[~, ceq] = collectConstraints(t,x,u,...
defects,...
pathCst, bndCst);
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [cost, dcost] = myObjGrad(decVars, pack,dynamics, pathObj, bndObj, gradInfo)
%
% This function unpacks the decision variables, sends them to the
% user-defined objective functions, and then returns the final cost
%
% INPUTS:
% decVars = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% dynamics = user-defined dynamics function handle
% pathObj = user-defined path-objective function
% bndObj = user-defined boundary objective function
% gradInfo =
%
% OUTPUTS:
% cost = scalar cost for this set of decision variables
% dcost = gradient of cost
% NOTE: gradients are only available for pathCost that depends only on
% input parameters not states.
%
%
% All of the real work happens inside this function:
[t,x,~,~,pathCost,dxdalpha,dJdalpha] = simSysGrad(decVars, pack, dynamics, pathObj, gradInfo); %#ok<ASGLU>
% dxdalpha is included in outputs to make sure subsequent calls to
% simulateSystem without change a to decVars have access to the correct value
% of dxdalpha - see simulateSystem in which dxdalpha is not calculated unless
% nargout > 5
% Compute the cost at the boundaries of the trajectory
if isempty(bndObj)
bndCost = 0;
else
t0 = t(1);
tF = t(end);
x0 = x(:,1);
xF = x(:,end);
bndCost = bndObj(t0,x0,tF,xF);
end
cost = pathCost + bndCost;
% calculate gradient of cost function
if nargout > 1
nState = pack.nState;
nControl = pack.nControl;
nSegment = pack.nSegment;
nSubStep = pack.nSubStep;
nDecVar = 2+nState*(1+nSegment)+nControl*(1+nSegment*nSubStep*2);
% allocate gradient of cost
dcost_pth = zeros(nDecVar,1);
dcost_bnd = zeros(nDecVar,1);
% gradient assocated with bound objective
if ~isempty(bndObj)
% bound costs and gradients w.r.t. t0, x0, tF, xF
[~, d_bnd] = bndObj(t0,x0,tF,xF);
% gradients of t0, x0, tF, xF w.r.t. decision parameters (labeled alpha)
dt0_dalpha = zeros(1,nDecVar);
dt0_dalpha(1) = 1; % t0 is always the first decVar
%
dx0_dalpha = zeros(nState,nDecVar);
dx0_dalpha(1:nState,gradInfo.xIdx(:,end)) = eye(nState);
%
dtF_dalpha = zeros(1,nDecVar);
dtF_dalpha(2) = 1; % tF is always the second decVar
%
dxF_dalpha = zeros(nState,nDecVar);
dxF_dalpha(1:nState,gradInfo.xIdx(:,end)) = eye(nState);
% gradient of bound cost
dcost_bnd(:) = [dt0_dalpha; dx0_dalpha; dtF_dalpha; dxF_dalpha]' * d_bnd';
end
% gradient assocated with path objective
if ~isempty(pathObj)
dcost_pth = dJdalpha';
end
dcost = dcost_pth + dcost_bnd;
end
end
function [c, ceq, dc, dceq] = myCstGrad(decVars, pack, dynamics, pathObj, pathCst, bndCst, gradInfo)
%
% This function unpacks the decision variables, computes the defects along
% the trajectory, and then evaluates the user-defined constraint functions.
%
% INPUTS:
% decVars = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% dynamics = user-defined dynamics function handle
% pathObj = user-defined path-objective function
% pathCst = user-defined path-constraint function
% bndCst = user-defined boundary constraint function
% gradInfo =
%
% OUTPUTS:
% c = non-linear inequality constraint
% ceq = non-linear equatlity cosntraint
% dc = gradient of c w.r.t. decVars
% dceq = gradient of ceq w.r.t. decVars
%
% NOTE:
% - path constraints are satisfied at the start and end of each sub-step
%
[t,x,u,defects,pathcost,dxdalpha] = simSysGrad(decVars, pack, dynamics, pathObj, gradInfo); %#ok<ASGLU>
%%%% Call user-defined constraints and pack up:
if nargout <= 2
[c, ceq] = collectConstraints(t,x,u,...
defects,...
pathCst, bndCst);
else
[c, ceq, dc, dceq] = collectConstraintsGrad(t,x,u,...
defects,...
pathCst, bndCst, pack, gradInfo, dxdalpha);
end
end
function [c, ceq, dc, dceq] = collectConstraintsGrad(t,x,u,defects, pathCst, bndCst, pack, gradInfo, dxdalpha)
% [c, ceq, dc, dceq] = collectConstraints(t,x,u,defects, pathCst, bndCst, pack, gradInfo, dxdalpha)
%
% OptimTraj utility function.
%
% Collects the defects, calls user-defined constraints, and then packs
% everything up into a form that is good for fmincon.
%
% INPUTS:
% t = time vector (time at each substep) nTime = 1+nSegment*nSubStep
% x = state matrix (states at each time in t)
% u = control matrix (control at each time in t)
% defects = defects matrix
% pathCst = user-defined path constraint function
% bndCst = user-defined boundary constraint function
% pack =
% gradInfo =
% dxdalpha = partial derivative of state at each substep w.r.t. decVars
%
% OUTPUTS:
% c = inequality constraint for fmincon
% ceq = equality constraint for fmincon
% dc = gradient of c w.r.t. decVars
% dceq = gradient of ceq w.r.t. decVars
%
% problem dimensions
nState = pack.nState;
nControl = pack.nControl;
nSegment = pack.nSegment;
nSubStep = pack.nSubStep;
nDecVar = 2+nState*(1+nSegment)+nControl*(1+nSegment*nSubStep*2);
%%%% defect constraints
ceq_dyn = reshape(defects,numel(defects),1);
dceq_dyn = zeros(nDecVar,length(ceq_dyn));
Inx = eye(nState);
for j = 1:nSegment
rows = gradInfo.xIdx(:,j+1);
cols = (j-1)*nState+(1:nState);
dceq_dyn(:,cols) = dxdalpha{j}(:,:,end)'; % gradient w.r.t. to x_i(+)
dceq_dyn(rows,cols) = -Inx; % gradient w.r.t. to x_i
end
%%%% Compute the user-defined constraints:
%%%% path constraints
if isempty(pathCst)
c_path = [];
ceq_path = [];
dc_path = [];
dceq_path = [];
else
[c_pathRaw, ceq_pathRaw, c_pathGradRaw, ceq_pathGradRaw] = pathCst(t,x,u);
c_path = reshape(c_pathRaw,numel(c_pathRaw),1);
ceq_path = reshape(ceq_pathRaw,numel(ceq_pathRaw),1);
dc_path = zeros(nDecVar,length(c_path));
dceq_path = zeros(nDecVar,length(ceq_path));
% dt/dalpha : gradient of time w.r.t. decVars
dt_dalpha = zeros(1,nDecVar);
nTime = 1+nSegment*nSubStep;
n_time = 0:nTime-1;
% gradients of path constraints
nc = size(c_pathRaw,1); % number path constraints at each time
nceq = size(ceq_pathRaw,1);
for j = 1:(nSegment+1)
for i = 1:nSubStep
% d(t[n])/dalpha
n_time0 = n_time((j-1)*nSubStep+i);
dt_dalpha(1) = (1 - n_time0/(nTime-1));
dt_dalpha(2) = (n_time0/(nTime-1));
%
if j < nSegment+1
dxi_dalpha = dxdalpha{j}(:,:,i);
else
dxi_dalpha = zeros(nState,nDecVar);
cols = gradInfo.xIdx(:,j);
dxi_dalpha(:,cols) = eye(nState);
end
%
dui_dalpha = zeros(nControl,nDecVar);
cols = gradInfo.indu(:,(j-1)*nSubStep+i);
dui_dalpha(:,cols) = eye(nControl);
% inequality path constraints
if nc > 0
cols = (1:nc) + nc*((j-1)*nSubStep+i-1);
dc_path(:,cols) = [dt_dalpha; dxi_dalpha; dui_dalpha]' * c_pathGradRaw(:,:,nSubStep*(j-1)+i)';
end
% equality path constraints
if nceq > 0
cols = (1:nceq) + nceq*((j-1)*nSubStep+i-1);
dceq_path(:,cols) = [dt_dalpha; dxi_dalpha; dui_dalpha]' * ceq_pathGradRaw(:,:,nSubStep*(j-1)+i)';
end
% no need to continue with inner loop.
if j == nSegment+1
break;
end
end
end
end
%%%% bound constraints
if isempty(bndCst)
c_bnd = [];
ceq_bnd = [];
dc_bnd = [];
dceq_bnd = [];
else
t0 = t(1);
tF = t(end);
x0 = x(:,1);
xF = x(:,end);
% bound constraints and gradients w.r.t. t0, x0, tF, xF
[c_bnd, ceq_bnd, d_bnd, deq_bnd] = bndCst(t0,x0,tF,xF);
% gradients of t0, x0, tF, xF w.r.t. decision parameters (labeled alpha)
dt0_dalpha = zeros(1,nDecVar);
dt0_dalpha(1) = 1; % t0 is always the first decVar
%
dx0_dalpha = zeros(nState,nDecVar);
cols = gradInfo.xIdx(:,1);
dx0_dalpha(1:nState,cols) = eye(nState);
%
dtF_dalpha = zeros(1,nDecVar);
dtF_dalpha(2) = 1; % tF is always the second decVar
%
dxF_dalpha = zeros(nState,nDecVar);
cols = gradInfo.xIdx(:,end);
dxF_dalpha(1:nState,cols) = eye(nState);
% inequality bound constraints
dc_bnd = [dt0_dalpha; dx0_dalpha; dtF_dalpha; dxF_dalpha]' * d_bnd';
% equality bound constraints
dceq_bnd = [dt0_dalpha; dx0_dalpha; dtF_dalpha; dxF_dalpha]' * deq_bnd';
end
%%%% Pack everything up:
c = [c_path;c_bnd];
ceq = [ceq_dyn; ceq_path; ceq_bnd];
dc = [dc_path, dc_bnd];
dceq = [dceq_dyn, dceq_path, dceq_bnd];
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [t,x,u,defects,pathCost,dxdalpha,dJdalpha] = simSysGrad(decVars, pack, dynFun, pathObj, gradInfo)
%
% This function does the real work of the transcription method. It
% simulates the system forward in time across each segment of the
% trajectory, computes the integral of the cost function, and then matches
% up the defects between the end of each segment and the start of the next.
%
% INPUTS:
% decVars = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% dynamics = user-defined dynamics function handle
% pathObj = user-defined path-objective function
%
% OUTPUTS:
% t = [1 x nGrid] = time vector for the edges of the sub-step grid
% x = [nState x nGrid] = state vector
% u = [nControl x nGrid] = control vector
% defects = [nState x nSegment] = defect matrix
% pathCost = scalar cost for the path integral
%
% NOTES:
% - nGrid = nSegment*nSubStep+1
% - This function is usually called twice for each combination of
% decision variables: once by the objective function and once by the
% constraint function. To keep the code fast I cache the old values and
% only recompute when the inputs change.
%
%%%% CODE OPTIMIZATION %%%%
%
% Prevents the same exact code from being called twice by caching the
% solution and reusing it when appropriate.
%
global RUNGE_KUTTA_t RUNGE_KUTTA_x RUNGE_KUTTA_u
global RUNGE_KUTTA_defects RUNGE_KUTTA_pathCost
global RUNGE_KUTTA_decVars RUNGE_KUTTA_dxdalpha RUNGE_KUTTA_dJdalpha
%
usePreviousValues = false;
if ~isempty(RUNGE_KUTTA_decVars)
if length(RUNGE_KUTTA_decVars) == length(decVars)
if ~any(RUNGE_KUTTA_decVars ~= decVars)
usePreviousValues = true;
end
end
end
%
if usePreviousValues
t = RUNGE_KUTTA_t;
x = RUNGE_KUTTA_x;
u = RUNGE_KUTTA_u;
defects = RUNGE_KUTTA_defects;
pathCost = RUNGE_KUTTA_pathCost;
dxdalpha = RUNGE_KUTTA_dxdalpha;
dJdalpha = RUNGE_KUTTA_dJdalpha;
else
%
%
%%%% END CODE OPTIMIZATION %%%%
[tSpan, state, control] = unPackDecVar(decVars,pack);
nState = pack.nState;
nControl = pack.nControl;
nSegment = pack.nSegment;
nSubStep = pack.nSubStep;
% NOTES:
% The following bit of code is a bit confusing, mostly due to the
% need for vectorization to make things run at a reasonable speed in
% Matlab. Part of the confusion comes because the decision variables
% include the state at the beginning of each segment, but the control
% at the beginning and middle of each substep - thus there are more
% control grid-points than state grid points. The calculations are
% vectorized over segments, but not sub-steps, since the result of
% one sub-step is required for the next.
% time, state, and control at the ends of each substep
nTime = 1+nSegment*nSubStep;
t = linspace(tSpan(1), tSpan(2), nTime);
x = zeros(nState, nTime);
u = control(:,1:2:end); % Control a the endpoints of each segment
uMid = control(:,2:2:end); %Control at the mid-points of each segment
c = zeros(1, nTime-1); %Integral cost for each segment
dt = (t(end)-t(1))/(nTime-1);
idx = 1:nSubStep:(nTime-1); %Indicies for the start of each segment
x(:,[idx,end]) = state; %Fill in the states that we already know
% VARIABLES for analytic gradient evaluations.
% size of decicion parameters (2 for time), nstate*(nSegment+1), ...
% dxdalpha = partial derivative of state w.r.t. decVars (alpha)
nalpha = 2 + nState*(1+nSegment) + nControl*(1+2*nSubStep*nSegment);
dxdalpha = cell(1,nSegment);
for i = 1:nSegment
dxdalpha{i} = zeros(nState,nalpha,nSubStep+1);
cols = gradInfo.xIdx(:,i);
dxdalpha{i}(:,cols,1) = eye(nState);
end
dTdalpha = zeros(1,nalpha); dTdalpha(1:2) = [-1,1];
dt_dalpha = zeros(1,nalpha);
n_time = 0:nTime-1;
% gradient of path cost
dJdalpha = zeros(1,nalpha);
for iSubStep = 1:nSubStep
% March forward Runge-Kutta step
t0 = t(idx);
x0 = x(:,idx);
%------------------------------------------
% Code for calculating dxdalpha (partial derivative of state w.r.t.
% the descision parameters): dxdalpha = nstate x nalpha
% assume nargout <=5 when using finite difference calculation for
% gradients in which case dxdalpha is unnecessary.
% Gradient of time w.r.t. decVars
% ------------------------------------------------------------
% dt = (tF-t0)/(nTime-1)
% t = t0 + n*dt
% t = t0 + n*(tF-t0)/(nTime-1)
% t = t0*(1-n/(nTime-1)) + tF*(n/(nTime-1))
%
% alpha = [t0, tF, x0, x1, ..., xN, u0, uM0, u1, ..., uN]
% dt/dalpha = [1 - n/(nTime-1), n/(nTime-1), 0, 0, ... 0]
% ------------------------------------------------------------
n_time0 = n_time(idx);
[k0, dk0] = combinedDynGrad(t0, x0, u(:,idx), dynFun,pathObj);
[k1, dk1] = combinedDynGrad(t0+0.5*dt, x0 + 0.5*dt*k0(1:nState,:), uMid(:,idx), dynFun,pathObj);
[k2, dk2] = combinedDynGrad(t0+0.5*dt, x0 + 0.5*dt*k1(1:nState,:), uMid(:,idx), dynFun,pathObj);
[k3, dk3] = combinedDynGrad(t0+dt, x0 + dt*k2(1:nState,:), u(:,idx+1), dynFun,pathObj);
z = (dt/6)*(k0 + 2*k1 + 2*k2 + k3); %Change over the sub-step
for j = 1:nSegment
% d(t[n])/dalpha
dt_dalpha(1) = (1 - n_time0(j)/(nTime-1));
dt_dalpha(2) = (n_time0(j)/(nTime-1));
% du[n]/dalpha
du_dalpha = zeros(nControl,nalpha);
du_dalpha(:,gradInfo.indu(:,idx(j))) = eye(nControl);
% duMid[n]/dalpha
duMid_dalpha = zeros(nControl,nalpha);
duMid_dalpha(:,gradInfo.indumid(:,idx(j))) = eye(nControl);
% du[n+1]/dalpha
du1_dalpha = zeros(nControl,nalpha);
du1_dalpha(:,gradInfo.indu(:,idx(j)+1)) = eye(nControl);
% dk0/dalpha
dk0da = dk0(:,:,j) * [dt_dalpha; dxdalpha{j}(:,:,iSubStep); du_dalpha];
% dk1/dalpha
dk1da = dk1(:,:,j) * [dt_dalpha + 0.5/(nTime-1)*dTdalpha; dxdalpha{j}(:,:,iSubStep) + 0.5*dt*dk0da(1:nState,:) + 0.5/(nTime-1)*k0(1:nState,j)*dTdalpha; duMid_dalpha];
% dk2/dalpha
dk2da = dk2(:,:,j) * [dt_dalpha + 0.5/(nTime-1)*dTdalpha; dxdalpha{j}(:,:,iSubStep) + 0.5*dt*dk1da(1:nState,:) + 0.5/(nTime-1)*k1(1:nState,j)*dTdalpha; duMid_dalpha];
% dk3/dalpha
dk3da = dk3(:,:,j) * [dt_dalpha + 1/(nTime-1)*dTdalpha; dxdalpha{j}(:,:,iSubStep) + dt*dk2da(1:nState,:) + 1/(nTime-1)*k2(1:nState,j)*dTdalpha; du1_dalpha];
dz = (dt/6)*(dk0da + 2*dk1da + 2*dk2da + dk3da)...
+ 1/(6*(nTime-1))*(k0(:,j)+2*k1(:,j)+2*k2(:,j)+k3(:,j))*dTdalpha;
% update dxdalpha
dxdalpha{j}(:,:,iSubStep+1) = dxdalpha{j}(:,:,iSubStep) + dz(1:nState,:);
% update dJdalpha
dJdalpha = dJdalpha + dz(nState+1,:);
end
xNext = x0 + z(1:nState,:); %Next state
c(idx) = z(end,:); %Integral of the cost function over this step
if iSubStep == nSubStep %We've reached the end of the interval
% Compute the defect vector:
defects = xNext - x(:,idx+1);
else
% Store the state for next step in time
idx = idx+1; % <-- This is important!!
x(:,idx) = xNext;
end
end
pathCost = sum(c); %Sum up the integral cost over each segment
%%%% Cache results to use on the next call to this function.
RUNGE_KUTTA_t = t;
RUNGE_KUTTA_x = x;
RUNGE_KUTTA_u = u;
RUNGE_KUTTA_defects = defects;
RUNGE_KUTTA_pathCost = pathCost;
RUNGE_KUTTA_dxdalpha = dxdalpha;
RUNGE_KUTTA_dJdalpha = dJdalpha;
end
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [dz, J] = combinedDynGrad(t,x,u,dynFun,pathObj)
% [dz, dJ] = combinedDynGrad(t,x,u,dynFun,pathObj)
%
% This function packages the dynamics and the cost function together so
% that they can be integrated at the same time.
%
% INPUTS:
% t = [1, nTime] = time vector (grid points)
% x = [nState, nTime] = state vector at each grid point
% u = [nControl, nTime] = control vector at each grid point
% dynamics(t,x,u) = dynamics function handle
% dx = [nState, nTime] = dx/dt = derivative of state wrt time
% pathObj(t,x,u) = integral cost function handle
% dObj = [1, nTime] = integrand from the cost function
%
% OUTPUTS:
% dz = [dx; dObj] = combined dynamics of state and cost
% dJ = [JAC(dynamics), JAC(objective)] = combined jacobian of dynamics
% and objective w.r.t. (t,x,u)
nState = size(x,1);
nControl = size(u,1);
[dx,Jx] = dynFun(t,x,u);
if isempty(pathObj)
dc = zeros(size(t));
Jc = zeros(1,1+nState+nControl,length(t));
else
[dc,Jc] = pathObj(t,x,u);
Jc = reshape(Jc,1,1+nState+nControl,length(t));
end
dz = [dx;dc];
J = cat(1,Jx,Jc);
end
|
github
|
xuhuairuogu/OptimTraj-master
|
getDefaultOptions.m
|
.m
|
OptimTraj-master/getDefaultOptions.m
| 8,511 |
UNKNOWN
|
b0e50d99e831c558cf728ae9bb423345
|
function problem = getDefaultOptions(problem)
% problem = getDefaultOptions(problem)
%
% This function fills in any blank entries in the problem.options struct.
% It is designed to be called from inside of optimTraj.m, and not by the
% user.
%
%%%% Top-level default options:
OPT.method = 'trapezoid';
OPT.verbose = 2;
OPT.defaultAccuracy = 'medium';
%%%% Basic setup
% ensure that options is not empty
if ~isfield(problem,'options')
problem.options.method = OPT.method;
end
opt = problem.options;
% Loop over each options struct and fill in top-level options
for i=1:length(opt)
if ~isfield(opt(i),'method')
opt(i).method = OPT.method;
elseif isempty(opt(i).method)
opt(i).method = OPT.method;
end
if ~isfield(opt(i),'verbose')
opt(i).verbose = OPT.verbose;
elseif isempty(opt(i).verbose)
opt(i).verbose = OPT.verbose;
end
if ~isfield(opt(i),'defaultAccuracy')
opt(i).defaultAccuracy = OPT.defaultAccuracy;
elseif isempty(opt(i).defaultAccuracy)
opt(i).defaultAccuracy = OPT.defaultAccuracy;
end
end
% Figure out basic problem size:
nState = size(problem.guess.state,1);
nControl = size(problem.guess.control,1);
% Loop over opt and fill in nlpOpt struct:
for i=1:length(opt)
switch opt(i).verbose
case 0
NLP_display = 'notify';
case 1
NLP_display = 'final-detailed';
case 2
NLP_display = 'iter';
case 3
NLP_display = 'iter-detailed';
otherwise
error('Invalid value for options.verbose');
end
switch opt(i).defaultAccuracy
case 'low'
OPT.nlpOpt = optimset(...
'Display',NLP_display,...
'TolFun',1e-4,...
'MaxIter',200,...
'MaxFunEvals',1e4*(nState+nControl));
case 'medium'
OPT.nlpOpt = optimset(...
'Display',NLP_display,...
'TolFun',1e-6,...
'MaxIter',400,...
'MaxFunEvals',5e4*(nState+nControl));
case 'high'
OPT.nlpOpt = optimset(...
'Display',NLP_display,...
'TolFun',1e-8,...
'MaxIter',800,...
'MaxFunEvals',1e5*(nState+nControl));
otherwise
error('Invalid value for options.defaultAccuracy')
end
if isfield(opt(i),'nlpOpt')
if isstruct(opt(i).nlpOpt) && ~isempty(opt(i).nlpOpt)
names = fieldnames(opt(i).nlpOpt);
for j=1:length(names)
if ~isfield(OPT.nlpOpt,names{j})
disp(['WARNING: options.nlpOpt.' names{j} ' is not a valid option']);
else
OPT.nlpOpt.(names{j}) = opt(i).nlpOpt.(names{j});
end
end
end
end
opt(i).nlpOpt = OPT.nlpOpt;
end
% Check ChebFun dependency:
missingChebFun = false;
for i=1:length(opt)
if strcmp(opt(i).method,'chebyshev')
try
chebpts(3); %Test call to chebfun
catch ME %#ok<NASGU>
missingChebFun = true;
opt(i).method = 'trapezoid'; %Force default method
end
end
end
if missingChebFun
warning('''chebyshev'' method requires the Chebfun toolbox');
disp(' --> Install Chebfun toolbox: (http://www.chebfun.org/)');
disp(' --> Running with default method instead (''trapezoid'')');
end
% Fill in method-specific paramters:
for i=1:length(opt)
OPT_method = opt(i).method;
switch OPT_method
case 'trapezoid'
OPT.trapezoid = defaults_trapezoid(opt(i).defaultAccuracy);
case 'hermiteSimpson'
OPT.hermiteSimpson = defaults_hermiteSimpson(opt(i).defaultAccuracy);
case 'chebyshev'
OPT.chebyshev = defaults_chebyshev(opt(i).defaultAccuracy);
case 'multiCheb'
OPT.multiCheb = defaults_multiCheb(opt(i).defaultAccuracy);
case 'rungeKutta'
OPT.rungeKutta = defaults_rungeKutta(opt(i).defaultAccuracy);
case 'gpops'
OPT.gpops = defaults_gpops(opt(i).defaultAccuracy);
otherwise
error('Invalid value for options.method');
end
if isfield(opt(i),OPT_method)
if isstruct(opt(i).(OPT_method)) && ~isempty(opt(i).(OPT_method))
names = fieldnames(opt(i).(OPT_method));
for j=1:length(names)
if ~isfield(OPT.(OPT_method),names{j})
disp(['WARNING: options.' OPT_method '.' names{j} ' is not a valid option']);
else
OPT.(OPT_method).(names{j}) = opt(i).(OPT_method).(names{j});
end
end
end
end
opt(i).(OPT_method) = OPT.(OPT_method);
end
problem.options = opt;
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Method-specific parameters %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function OPT_trapezoid = defaults_trapezoid(accuracy)
switch accuracy
case 'low'
OPT_trapezoid.nGrid = 12;
case 'medium'
OPT_trapezoid.nGrid = 30;
case 'high'
OPT_trapezoid.nGrid = 60;
otherwise
error('Invalid value for options.defaultAccuracy')
end
OPT_trapezoid.adaptiveDerivativeCheck = 'off';
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function OPT_hermiteSimpson = defaults_hermiteSimpson(accuracy)
switch accuracy
case 'low'
OPT_hermiteSimpson.nSegment = 10;
case 'medium'
OPT_hermiteSimpson.nSegment = 20;
case 'high'
OPT_hermiteSimpson.nSegment = 40;
otherwise
error('Invalid value for options.defaultAccuracy')
end
OPT_hermiteSimpson.adaptiveDerivativeCheck = 'off';
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function OPT_chebyshev = defaults_chebyshev(accuracy)
switch accuracy
case 'low'
OPT_chebyshev.nColPts = 9;
case 'medium'
OPT_chebyshev.nColPts = 13;
case 'high'
OPT_chebyshev.nColPts = 23;
otherwise
error('Invalid value for options.defaultAccuracy')
end
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function OPT_multiCheb = defaults_multiCheb(accuracy)
switch accuracy
case 'low'
OPT_multiCheb.nColPts = 6;
OPT_multiCheb.nSegment = 3;
case 'medium'
OPT_multiCheb.nColPts = 8;
OPT_multiCheb.nSegment = 6;
case 'high'
OPT_multiCheb.nColPts = 8;
OPT_multiCheb.nSegment = 12;
otherwise
error('Invalid value for options.defaultAccuracy')
end
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function OPT_rungeKutta = defaults_rungeKutta(accuracy)
switch accuracy
case 'low'
OPT_rungeKutta.nSegment = 10;
OPT_rungeKutta.nSubStep = 2;
case 'medium'
OPT_rungeKutta.nSegment = 20;
OPT_rungeKutta.nSubStep = 2;
case 'high'
OPT_rungeKutta.nSegment = 20;
OPT_rungeKutta.nSubStep = 4;
otherwise
error('Invalid value for options.defaultAccuracy')
end
OPT_rungeKutta.adaptiveDerivativeCheck = 'off';
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function OPT_gpops = defaults_gpops(accuracy)
OPT_gpops.bounds.phase.integral.lower = -inf;
OPT_gpops.bounds.phase.integral.upper = inf;
OPT_gpops.guess.phase.integral = 0;
OPT_gpops.name = 'OptimTraj_GPOPS';
OPT_gpops.auxdata = [];
OPT_gpops.nlp.solver = 'ipopt'; % {'ipopt','snopt'}
OPT_gpops.derivatives.dependencies = 'full'; %�full�, �sparse� or �sparseNaN�
OPT_gpops.derivatives.supplier = 'sparseCD'; %'sparseCD'; %'adigator'
OPT_gpops.derivatives.derivativelevel = 'first'; %'second';
OPT_gpops.mesh.method = 'hp-PattersonRao';
OPT_gpops.method = 'RPM-Integration';
OPT_gpops.mesh.phase.colpoints = 10*ones(1,10);
OPT_gpops.mesh.phase.fraction = ones(1,10)/10;
OPT_gpops.scales.method = 'none'; % { 'none' , automatic-hybridUpdate' , 'automatic-bounds';
switch accuracy
case 'low'
OPT_gpops.mesh.tolerance = 1e-2;
OPT_gpops.mesh.maxiterations = 0;
case 'medium'
OPT_gpops.mesh.tolerance = 1e-3;
OPT_gpops.mesh.maxiterations = 1;
case 'high'
OPT_gpops.mesh.tolerance = 1e-4;
OPT_gpops.mesh.maxiterations = 3;
otherwise
error('Invalid value for options.defaultAccuracy')
end
end
|
github
|
xuhuairuogu/OptimTraj-master
|
gpopsWrapper.m
|
.m
|
OptimTraj-master/gpopsWrapper.m
| 5,777 |
utf_8
|
fb8e22a03bfa72046ab9bf5458b31b1f
|
function soln = gpopsWrapper(problem)
% soln = gpopsWrapper(problem)
%
% This function is a wrapper that converts the standard input for optimTraj
% into a call to GPOPS2, a commercially available transcription software
% for matlab. You can purchase and download it at http://www.gpops2.com/
%
% GPOPS2 implements an adaptive transcription method - it adjusts both the
% number of trajectory segments and the order of the interpolating
% polynomial in each segment. Many GPOPS features are available in OptimTraj,
% but not all. Notably, OptimTraj cannot solve multi-phase problems.
%
% Set any special GPOPS options by storing the 'setup' sturuct in the
% problem.options.gpops struct.
%
% If using SNOPT, be careful about any constant terms in your constraints.
% When using numerical gradients, SNOPT drops any constant terms in your
% constraints, which is why it has non-zero bounds. This is exactly the
% opposite of the convention that FMINCON uses, where all constraint bounds
% must be zero. If your constraints have non-zero bounds, and you would
% like to use GPOPS with SNOPT as the solver, then manually set the fields
% in problem.gpops.phase.bounds.path and problem.gpops.eventgroup to
% include these bounds, and then remove them from the constraint function.
%
% Print out some solver info if desired:
if problem.options.verbose > 0
disp('Transcription using GPOPS2');
end
% Copy the problem specification
setup = problem.options.gpops;
setup.bounds.phase.initialtime.lower = problem.bounds.initialTime.low';
setup.bounds.phase.initialtime.upper = problem.bounds.initialTime.upp';
setup.bounds.phase.finaltime.lower = problem.bounds.finalTime.low';
setup.bounds.phase.finaltime.upper = problem.bounds.finalTime.upp';
setup.bounds.phase.initialstate.lower = problem.bounds.initialState.low';
setup.bounds.phase.initialstate.upper = problem.bounds.initialState.upp';
setup.bounds.phase.finalstate.lower = problem.bounds.finalState.low';
setup.bounds.phase.finalstate.upper = problem.bounds.finalState.upp';
setup.bounds.phase.state.lower = problem.bounds.state.low';
setup.bounds.phase.state.upper = problem.bounds.state.upp';
setup.bounds.phase.control.lower = problem.bounds.control.low';
setup.bounds.phase.control.upper = problem.bounds.control.upp';
setup.guess.phase.time = problem.guess.time';
setup.guess.phase.state = problem.guess.state';
setup.guess.phase.control = problem.guess.control';
% Configure bounds on the path constraints
if ~isempty(problem.func.pathCst)
if ~isfield(setup.bounds.phase, 'path')
[cTest, ceqTest] = problem.func.pathCst(...
problem.guess.time,problem.guess.state,problem.guess.control);
nc = size(cTest,1);
nceq = size(ceqTest,1);
setup.bounds.phase.path.lower = [-inf(1,nc), zeros(1,nceq)];
setup.bounds.phase.path.upper = zeros(1,nc+nceq);
end
end
% Configure bounds on the endpoint constraints
if ~isempty(problem.func.bndCst)
if ~isfield(setup.bounds, 'eventgroup')
t0 = problem.guess.time(1); tF = problem.guess.time(end);
x0 = problem.guess.state(:,1); xF = problem.guess.state(:,end);
[cTest, ceqTest] = problem.func.bndCst(t0, x0,tF,xF);
nc = size(cTest,1);
nceq = size(ceqTest,1);
setup.bounds.eventgroup.lower = [-inf(1,nc), zeros(1,nceq)];
setup.bounds.eventgroup.upper = zeros(1,nc+nceq);
end
end
F = problem.func;
setup.functions.continuous = @(input)( gpops_continuous(input,F.dynamics,F.pathObj,F.pathCst) );
setup.functions.endpoint = @(input)( gpops_endpoint(input,F.bndObj,F.bndCst) );
%%%% KEY LINE: Solve the optimization problem with GPOPS II
output = gpops2(setup);
% Pack up the results:
soln.grid.time = output.result.solution.phase.time';
soln.grid.state = output.result.solution.phase.state';
soln.grid.control = output.result.solution.phase.control';
tSoln = output.result.interpsolution.phase.time';
xSoln = output.result.interpsolution.phase.state';
uSoln = output.result.interpsolution.phase.control';
soln.interp.state = @(t)( interp1(tSoln',xSoln',t','pchip',nan)' );
soln.interp.control = @(t)( interp1(tSoln',uSoln',t','pchip',nan)' );
soln.info.nlpTime = output.totaltime;
soln.info.objVal = output.result.objective;
soln.info.gpops.meshcounts = output.meshcounts;
soln.info.gpops.result.maxerror = output.result.maxerror;
soln.info.gpops.result.nlpinfo = output.result.nlpinfo;
soln.info.gpops.result.setup = output.result.setup;
soln.problem = problem;
soln.problem.options.nlpOpt = []; % did not use the fmincon options
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
%%%% SUB FUNCTIONS %%%%
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function output = gpops_endpoint(input,bndObj,bndCst)
%
% The endpoint function contains the boundary constraints and objective
% functions for the trajectory optimization problem.
%
t0 = input.phase.initialtime;
tF = input.phase.finaltime;
x0 = input.phase.initialstate';
xF = input.phase.finalstate';
if isempty(bndObj)
output.objective = input.phase.integral;
else
output.objective = input.phase.integral + bndObj(t0,x0,tF,xF);
end
if ~isempty(bndCst)
[c, ceq] = bndCst(t0,x0,tF,xF);
output.eventgroup.event = [c;ceq]';
end
end
function output = gpops_continuous(input,dynamics,pathObj,pathCst)
%
% The continuous function contains the path objective, dynamics, and path
% constraint functions.
%
t = input.phase.time';
x = input.phase.state';
u = input.phase.control';
f = dynamics(t,x,u);
c = pathObj(t,x,u);
output.dynamics = f';
output.integrand = c';
if ~isempty(pathCst)
[c, ceq] = pathCst(t,x,u);
output.path = [c;ceq]';
end
end
|
github
|
xuhuairuogu/OptimTraj-master
|
inputValidation.m
|
.m
|
OptimTraj-master/inputValidation.m
| 4,319 |
utf_8
|
394cd18a2f88465b4d3adbfe71561f8c
|
function problem = inputValidation(problem)
%
% This function runs through the problem struct and sets any missing fields
% to the default value. If a mandatory field is missing, then it throws an
% error.
%
% INPUTS:
% problem = a partially completed problem struct
%
% OUTPUTS:
% problem = a complete problem struct, with validated fields
%
%%%% Check the function handles:
if ~isfield(problem,'func')
error('Field ''func'' cannot be ommitted from ''problem''');
else
if ~isfield(problem.func,'dynamics')
error('Field ''dynamics'' cannot be ommitted from ''problem.func'''); end
if ~isfield(problem.func,'pathObj'), problem.func.pathObj = []; end
if ~isfield(problem.func,'bndObj'), problem.func.bndObj = []; end
if ~isfield(problem.func,'pathCst'), problem.func.pathCst = []; end
if ~isfield(problem.func,'bndCst'), problem.func.bndCst = []; end
end
%%%% Check the initial guess (also compute nState and nControl):
if ~isfield(problem, 'guess')
error('Field ''guess'' cannot be ommitted from ''problem''');
else
if ~isfield(problem.guess,'time')
error('Field ''time'' cannot be ommitted from ''problem.guess'''); end
if ~isfield(problem.guess, 'state')
error('Field ''state'' cannot be ommitted from ''problem.guess'''); end
if ~isfield(problem.guess, 'control')
error('Field ''control'' cannot be ommitted from ''problem.guess'''); end
% Compute the size of the time, state, and control based on guess
[checkOne, nTime] = size(problem.guess.time);
[nState, checkTimeState] = size(problem.guess.state);
[nControl, checkTimeControl] = size(problem.guess.control);
if nTime < 2 || checkOne ~= 1
error('guess.time must have dimensions of [1, nTime], where nTime > 1');
end
if checkTimeState ~= nTime
error('guess.state must have dimensions of [nState, nTime]');
end
if checkTimeControl ~= nTime
error('guess.control must have dimensions of [nControl, nTime]');
end
end
%%%% Check the problem bounds:
if ~isfield(problem,'bounds')
problem.bounds.initialTime = [];
problem.bounds.finalTime = [];
problem.bounds.state = [];
problem.bounds.initialState = [];
problem.bounds.finalState = [];
problem.bounds.control = [];
else
if ~isfield(problem.bounds,'initialTime')
problem.bounds.initialTime = []; end
problem.bounds.initialTime = ...
checkLowUpp(problem.bounds.initialTime,1,1,'initialTime');
if ~isfield(problem.bounds,'finalTime')
problem.bounds.finalTime = []; end
problem.bounds.finalTime = ...
checkLowUpp(problem.bounds.finalTime,1,1,'finalTime');
if ~isfield(problem.bounds,'state')
problem.bounds.state = []; end
problem.bounds.state = ...
checkLowUpp(problem.bounds.state,nState,1,'state');
if ~isfield(problem.bounds,'initialState')
problem.bounds.initialState = []; end
problem.bounds.initialState = ...
checkLowUpp(problem.bounds.initialState,nState,1,'initialState');
if ~isfield(problem.bounds,'finalState')
problem.bounds.finalState = []; end
problem.bounds.finalState = ...
checkLowUpp(problem.bounds.finalState,nState,1,'finalState');
if ~isfield(problem.bounds,'control')
problem.bounds.control = []; end
problem.bounds.control = ...
checkLowUpp(problem.bounds.control,nControl,1,'control');
end
end
function input = checkLowUpp(input,nRow,nCol,name)
%
% This function checks that input has the following is true:
% size(input.low) == [nRow, nCol]
% size(input.upp) == [nRow, nCol]
if ~isfield(input,'low')
input.low = -inf(nRow,nCol);
end
if ~isfield(input,'upp')
input.upp = inf(nRow,nCol);
end
[lowRow, lowCol] = size(input.low);
if lowRow ~= nRow || lowCol ~= nCol
error(['problem.bounds.' name ...
'.low must have size = [' num2str(nRow) ', ' num2str(nCol) ']']);
end
[uppRow, uppCol] = size(input.upp);
if uppRow ~= nRow || uppCol ~= nCol
error(['problem.bounds.' name ...
'.upp must have size = [' num2str(nRow) ', ' num2str(nCol) ']']);
end
if sum(sum(input.upp-input.low < 0))
error(...
['problem.bounds.' name '.upp must be >= problem.bounds.' name '.low!']);
end
end
|
github
|
xuhuairuogu/OptimTraj-master
|
multiCheb.m
|
.m
|
OptimTraj-master/multiCheb.m
| 21,236 |
utf_8
|
f3b52105bdd4fc219954b4149df07295
|
function soln = multiCheb(problem)
% soln = multiCheb(problem)
%
% DEPRICATED
%
%
% *************************************************************************
% This file is no longer used, and is preserved for reference only. The
% numerical methods for connecting segments are not the most stable,
% particularily for low-order polynomials. This file will later be replaced
% with HP orthogonal collocation, based on Legendre polynomials.
% *************************************************************************
%
%
% This function transcribes a trajectory optimization problem Chebyshev
% orthogonal polynomials for basis functions. This is an orthogonal
% collocation method. This method is similiar to Chebyshev, except that
% here I break the trajectory into several segments, rahter than just one.
%
% The technique is similar to the one described in detail in the paper:
%
% " A Chebyshev Technique for Solving Nonlinear Optimal Control Problems"
% ISSS Trans. Automatic Control, 1988
% by: Jacques Vlassenbroeck and Rene Van Dooren
%
% My implementation for computation of the differentiation matrix,
% quadrature rules, and interpolation are based on the following:
%
% "Barycentric Lagrange Interpolation"
% Siam Review, 2004
% Publisher: Society for Industrial and Applied Mathematics
% by: Jean-Paul Berrut and Lloyd N. Trefethen
%
% "Approximation Theory and Approximation Practice"
% Textbook by Lloyd N. Trefethen
%
% "Chebfun" Matlab toolbox
% Website: http://www.chebfun.org/
% by Lloyd N. Trefethen et al.
%
% For details on the input and output, see the help file for optimTraj.m
%
% Method specific parameters:
%
% problem.options.method = 'multiCheb'
% problem.options.multiCheb = struct with method parameters:
% .nColPts = number of collocation points in each trajectory segment
% .nSegment = number of segments to break the trajectory into
%
%
% *************************************************************************
% DEPRICATED
% *************************************************************************
%
%To make code more readable
G = problem.guess;
B = problem.bounds;
F = problem.func;
Opt = problem.options;
nColPts = Opt.multiCheb.nColPts; %Number of collocation points in each segment
nSegment = Opt.multiCheb.nSegment; %Fraction of the duration spent in each segment
% Print out some solver info if desired:
if Opt.verbose > 0
disp(' -> Transcription via Multiple-segment Chebyshev orthogonal collocation');
disp(' ');
end
% This method seems to fail if a low-order polynomial is used.
% It gives reasonable solutions for medium-high order polynomials
if nColPts < 6
disp(' WARNING: using fewer than six collocation points per interval can lead to numerical problems!');
end
% Chebyshev points and weights on the default domain
[xx,ww] = chebyshevPoints(nColPts,[-1,1]);
cheb.xx = xx;
cheb.ww = ww;
cheb.nSegment = nSegment;
cheb.nColPts = nColPts;
% Interpolate the guess at the chebyshev-points for transcription:
guess.tSpan = G.time([1,end]);
guess.time = getMultiChebTime(cheb,guess.tSpan);
guess.state = interp1(G.time', G.state', guess.time')';
guess.control = interp1(G.time', G.control', guess.time')';
[zGuess, pack] = packDecVar(guess.time, guess.state, guess.control);
% Unpack all bounds:
nGrid = nSegment*nColPts;
tLow = getMultiChebTime(cheb,[B.initialTime.low, B.finalTime.low]);
xLow = [B.initialState.low, B.state.low*ones(1,nGrid-2), B.finalState.low];
uLow = B.control.low*ones(1,nGrid);
zLow = packDecVar(tLow,xLow,uLow);
tUpp = getMultiChebTime(cheb,[B.initialTime.upp, B.finalTime.upp]);
xUpp = [B.initialState.upp, B.state.upp*ones(1,nGrid-2), B.finalState.upp];
uUpp = B.control.upp*ones(1,nGrid);
zUpp = packDecVar(tUpp,xUpp,uUpp);
%%%% Set up problem for fmincon:
P.objective = @(z)( ...
myObjective(z, pack, F.pathObj, F.bndObj, cheb) );
P.nonlcon = @(z)( ...
myConstraint(z, pack, F.dynamics, F.pathCst, F.bndCst, cheb) );
P.x0 = zGuess;
P.lb = zLow;
P.ub = zUpp;
P.Aineq = []; P.bineq = [];
P.Aeq = []; P.beq = [];
P.options = Opt.nlpOpt;
P.solver = 'fmincon';
%%%% Call fmincon to solve the non-linear program (NLP)
tic;
[zSoln, objVal,exitFlag,output] = fmincon(P);
nlpTime = toc;
soln = formatTrajectory(zSoln, pack, cheb);
soln.info = output;
soln.info.nlpTime = nlpTime;
soln.info.exitFlag = exitFlag;
soln.info.objVal = objVal;
soln.problem = problem; % Return the fully detailed problem struct
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
%%%% SUB FUNCTIONS %%%%
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [z,pack] = packDecVar(t,x,u)
%
% This function collapses the time (t), state (x)
% and control (u) matricies into a single vector
%
% INPUTS:
% t = [1, nTime] = time vector (grid points)
% x = [nState, nTime] = state vector at each grid point
% u = [nControl, nTime] = control vector at each grid point
%
% OUTPUTS:
% z = column vector of 2 + nTime*(nState+nControl) decision variables
% pack = details about how to convert z back into t,x, and u
% .nTime
% .nState
% .nControl
%
nTime = length(t);
nState = size(x,1);
nControl = size(u,1);
tSpan = [t(1); t(end)];
xCol = reshape(x, nState*nTime, 1);
uCol = reshape(u, nControl*nTime, 1);
z = [tSpan;xCol;uCol];
pack.nTime = nTime;
pack.nState = nState;
pack.nControl = nControl;
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [t,x,u,w] = unPackDecVar(z,pack,cheb)
%
% This function unpacks the decision variables for
% trajectory optimization into the time (t),
% state (x), and control (u) matricies
%
% INPUTS:
% z = column vector of 2 + nTime*(nState+nControl) decision variables
% pack = details about how to convert z back into t,x, and u
% .nTime
% .nState
% .nControl
%
% OUTPUTS:
% t = [1, nTime] = time vector (grid points)
% x = [nState, nTime] = state vector at each grid point
% u = [nControl, nTime] = control vector at each grid point
% w = [1, nTime] = weights for clenshaw-curtis quadrature
%
nTime = pack.nTime;
nState = pack.nState;
nControl = pack.nControl;
nx = nState*nTime;
nu = nControl*nTime;
[t, w] = getMultiChebTime(cheb,[z(1),z(2)]);
x = reshape(z((2+1):(2+nx)),nState,nTime);
u = reshape(z((2+nx+1):(2+nx+nu)),nControl,nTime);
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [time, weights] = getMultiChebTime(cheb,tSpan)
%
% This function computes the time grid for a trajectory that is made up of
% a series of chebyshev orthogonal polynomials.
%
% INPUTS:
% cheb = struct of information about the chebyshev basis functions
% .xx = chebyshev points, on the domain [-1,1]
% .ww = chebyshev weights, on the domain [-1,1]
% .nSegment = number of trajectory segments
% tSpan = [tInitial, tFinal]
%
% OUTPUTS:
% time = [timeSegment_1, timeSegment_2, ... timeSegment_nSegment];
%
% NOTES:
% This function will return duplicate times at the boundary to each
% segment, so that the dynamics of each segment can easily be solved
% independantly. These redundant points must be removed before returning
% the output to the user.
%
% For example, time should like something like:
% time = [0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9];
%
d = [0, (tSpan(2)-tSpan(1))/cheb.nSegment]; %Domain for the scaled points
[x, w] = chebyshevScalePoints(cheb.xx,cheb.ww,d); %Scaled points
offset = linspace(tSpan(1),tSpan(2),cheb.nSegment+1); %Starting time for each segment
time = x'*ones(1,cheb.nSegment) + ones(cheb.nColPts,1)*offset(1:(end-1));
nGrid = numel(time);
time = reshape(time,1,nGrid);
weights = reshape(w'*ones(1,cheb.nSegment),1,nGrid);
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [x,w] = chebyshevScalePoints(xx,ww,d)
% [x,w] = chebyshevScalePoints(xx,ww,d)
%
% This function scales the chebyshev points to an arbitrary interval
%
% INPUTS:
% xx = chebyshev points on the domain [-1,1]
% ww = chebysehv weights on the domain [-1,1]
% d = [low, upp] = new domain
%
% OUTPUTS:
% x = chebyshev points on the new domain d
% w = chebyshev weights on the new domain d
%
x = ((d(2)-d(1))*xx + sum(d))/2;
w = ww*(d(2)-d(1))/2;
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function cost = myObjective(z,pack,pathObj,bndObj,cheb)
%
% This function unpacks the decision variables, sends them to the
% user-defined objective functions, and then returns the final cost
%
% INPUTS:
% z = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% pathObj = user-defined integral objective function
% endObj = user-defined end-point objective function
%
% OUTPUTS:
% cost = scale cost for this set of decision variables
%
[t,x,u,w] = unPackDecVar(z,pack,cheb);
% Compute the cost integral along trajectory
if isempty(pathObj)
integralCost = 0;
else
integrand = pathObj(t,x,u); %Calculate the integrand of the cost function
integralCost = dot(w,integrand); %Clenshw-curtise quadrature
end
% Compute the cost at the boundaries of the trajectory
if isempty(bndObj)
bndCost = 0;
else
t0 = t(1);
tF = t(end);
x0 = x(:,1);
xF = x(:,end);
bndCost = bndObj(t0,x0,tF,xF);
end
cost = bndCost + integralCost;
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [c, ceq] = myConstraint(z,pack,dynFun, pathCst, bndCst, cheb)
%
% This function unpacks the decision variables, computes the defects along
% the trajectory, and then evaluates the user-defined constraint functions.
%
% INPUTS:
% z = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% dynFun = user-defined dynamics function
% pathCst = user-defined constraints along the path
% endCst = user-defined constraints at the boundaries
%
% OUTPUTS:
% c = inequality constraints to be passed to fmincon
% ceq = equality constraints to be passed to fmincon
%
[t,x,u] = unPackDecVar(z,pack,cheb);
nSegment = cheb.nSegment;
%%%% Enforce the dynamics:
% Analytic differentiation of the trajectory at chebyshev points:
domain = [0, (t(end)-t(1))/nSegment]; %Domain for the scaled points
xTemplate = chebyshevScalePoints(cheb.xx,cheb.ww,domain); %Scaled points
D = chebyshevDifferentiationMatrix(xTemplate);
dxFun = zeros(size(x));
idx = 1:cheb.nColPts;
for i=1:nSegment %Loop over each segment of the trajectory
dxFun(:,idx) = (D*x(:,idx)')';
idx = idx + cheb.nColPts;
end
% Derivative, according to the dynamics function:
dxDyn = dynFun(t,x,u);
% Add a constraint that both versions of the derivative must match.
% This ensures that the dynamics inside of each segment are correct.
dxError = dxFun - dxDyn;
% Add an additional defect that makes the state at the end of one
% segment match the state at the beginning of the next segment.
idxLow = cheb.nColPts*(1:(nSegment-1));
idxUpp = idxLow + 1;
stitchState = x(:,idxLow)-x(:,idxUpp);
stitchControl = u(:,idxLow)-u(:,idxUpp); %Also need continuous control
defects = [...
reshape(dxError, numel(dxError),1);
reshape(stitchState, numel(stitchState),1);
reshape(stitchControl, numel(stitchControl),1)];
%%%% Call user-defined constraints and pack up:
[c, ceq] = collectConstraints(t,x,u, defects, pathCst, bndCst);
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function soln = formatTrajectory(zSoln, pack, cheb)
%
% This function formats the result of the trajectory optimization so that
% it is easy to use for plotting and analysis by the user.
%
[tSoln,xSoln,uSoln] = unPackDecVar(zSoln,pack,cheb);
nSegment = cheb.nSegment;
nColPts = cheb.nColPts;
%%%% Need to remove the duplicate data points between segments:
idxLow = nColPts*(1:(nSegment-1));
idxUpp = idxLow + 1;
tSoln(idxUpp) = [];
xSoln(:,idxLow) = 0.5*(xSoln(:,idxLow)+xSoln(:,idxUpp));
xSoln(:,idxUpp) = [];
uSoln(:,idxLow) = 0.5*(uSoln(:,idxLow)+uSoln(:,idxUpp));
uSoln(:,idxUpp) = [];
%%%% Store the results:
soln.grid.time = tSoln;
soln.grid.state = xSoln;
soln.grid.control = uSoln;
%%%% Set up interpolating of the chebyshev trajectory:
idxKnot = (nColPts-1)*(1:(nSegment-1)) + 1;
soln.interp.state = @(t)( chebyshevMultiInterpolate(xSoln,tSoln,idxKnot,t) );
soln.interp.control = @(t)( chebyshevMultiInterpolate(uSoln,tSoln,idxKnot,t) );
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [x, w] = chebyshevPoints(n,d)
%[x, w] = chebyshevPoints(n,d)
%
% This function is a light-weight version of the function: chebpts.m
% written by Lloyd Trefethen as part of his Chebyshev Polynomial matlab
% toolbox: chebyfun, which can be downloaded from:
% http://www2.maths.ox.ac.uk/chebfun/download/
%
% The algorithm for computing the quadrature weights is also from
% trefethen's toolbox, with the citation:
% Jörg Waldvogel, "Fast construction of the Fejér and Clenshaw-Curtis
% quadrature rules", BIT Numerical Mathematics 43 (1), p. 001-018 (2004).
% http://www2.maths.ox.ac.uk/chebfun/and_beyond/programme/slides/wald.pdf
%
% Slight modifications made by Matthew Kelly
% October 27, 2013
% Cornell University
%
% This function returns the n chebyshev points, over the interval d. Error
% checking has not been included on the inputs.
%
% INPUTS:
% n = [1x1] the desired number of chebyshev points
% d = [1x2] domain of the polynomial. Default = [-1,1]
%
% OUTPUTS: (2nd-kind chebyshev points and weights)
% x = [1xn] the n chebyshev points over the interval d
% w = [1xn] the n chebyshev weights for Clenshaw-Curtis quadrature
%
if n == 1, x = 0; return, end % Special case
%Compute the chebyshev points on the domain [-1,1]:
m = n-1;
x = sin(pi*(-m:2:m)/(2*m)); % Chebyshev points
%Rescale (if necessary):
if nargin~=1
x = (diff(d)*x + sum(d))/2;
end
%Check if weights are needed:
if nargout==2
w = chebyshevWeights(n)*diff(d)/2;
end
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function w = chebyshevWeights(n) % 2nd-kind Chebyshev wieghts
% Jörg Waldvogel, "Fast construction of the Fejér and Clenshaw-Curtis
% quadrature rules", BIT Numerical Mathematics 43 (1), p. 001-018 (2004).
% http://www2.maths.ox.ac.uk/chebfun/and_beyond/programme/slides/wald.pdf
if n == 1
w = 2;
else
% new
n = n-1;
u0 = 1/(n^2-1+mod(n,2)); % Boundary weights
L = 0:n-1; r = 2./(1-4*min(L,n-L).^2); % Auxiliary vectors
w = [ifft(r-u0) u0]; % C-C weights
end
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function D = chebyshevDifferentiationMatrix(x)
%
% Computes the chebyshev differentiation matrix
%
% NOTES:
%
% Example usage: Df = (D*f')';
% where f = [nState x nPoints] values at each chebyshev node
%
n = length(x);
%Get the weight vector
w = ones(1,n);
w(2:2:n) = -1;
w([1,end]) = w([1,end])/2;
%First, compute the weighting matrix:
W = (1./w)'*w;
%Next, compute the matrix with inverse of the node distance
X = zeros(n);
for i=1:n
idx = (i+1):n;
X(i,idx) = 1./(x(i)-x(idx));
end
%Use the property that this matrix is anti-symetric
X = X - X';
%Compute the i~=j case:
D = W.*X;
%Deal with the i=j case:
D = D - diag(sum(D,2));
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function Df = chebyshevDerivative(f,x)
%Df = chebyshevDerivative(f,x)
%
% FUNCTION:
% This function computes the derivative of the chebyshev interpolant
% at each of the chebyshev nodes.
%
% INPUTS:
% f = [nState x nPoints] values at each chebyshev node
% x = chebyshev points
%
% OUTPUTS:
% Df = the derivative of the chebyshev interpolant at each chebyshev node
%
% NOTES:
% The derivative at each node is computed by multiplying f by a
% differentiation matrix. This matrix is [nPoints x nPoints]. If f is a
% very large order interpolant then computing this matrix may max out the
% memory available to matlab.
%
D = chebyshevDifferentiationMatrix(x);
%Apply the differentiation matrix
Df = (D*f')';
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function y = chebyshevMultiInterpolate(yData,tData,idxKnot,t)
%
% This function is a wrapper for chebyshevInterpolate that handles the
% piece-wise chebyshev polynomial trajectory
%
% All points are considered valid, so extend edge bins:
Tbins = [-inf, tData(idxKnot), inf];
%Figure out which bins each query is in:
[~, idx] = histc(t,Tbins);
% Loop over each segment of the trajectory:
ny = size(yData,1);
nt = length(t);
gridIdx = [1, idxKnot, length(tData)];
nSegment = length(gridIdx)-1;
y = zeros(ny,nt);
for i=1:nSegment
if sum(idx==i)>0 % Then there are points to evaluate here!
y(:,(idx==i)) = chebyshevInterpolate(...
yData(:,gridIdx(i):gridIdx(i+1)),...
t(idx==i),...
tData(gridIdx([i,i+1])) );
end
end
end
%%%%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%%%%
function [y, Dy, DDy, DDDy] = chebyshevInterpolate(f,t,d)
%[y, Dy, DDy, DDDy] = chebyshevInterpolate(f,t,d)
%
% This function uses barycentric interpolation to evaluate a chebyshev
% polynomial. Technical details are taken from the book: Approximation
% Theory and Approximation Practice by Lloyd Trefethen. The core equation
% that I use can be found as Theorem 5.2 in this book, and is called
% Barycentric Interpolation.
%
% Written by Matthew Kelly
% October 27, 2013
% Updated: November 8, 2013
% Cornell University
%
% This function computes the value of the chebyshev polynomial that is
% defined by the vector f, at the inputs t. It can also be used to return
% the first and second derivatives of y with respect to t.
%
% INPUTS:
% f = [KxN] value of the chebyshev polynomial at each of the chebyshev
% points (these can be computed using chebyshevPoints.m). Each row
% represents a single set of chebyshev node values for a single
% state.
% t = [1xM] vector of inputs to be evaluated (monotonically increasing)
% d = [1x2] vector specifying the domain of the polynomial
%
% OUTPUTS:
% y = [KxM] the value of the interpolant at each point in t
% Dy = first derivative of y with respect to t
% DDy = second derivative of y with respect to t
% DDDy = third derivative of y with respect to t
%
% What is happening here, in plain english:
%
% There are several Chebyshev points (or nodes) that are spread out
% across the interval d using a special grid spacing. We will call these
% points x. For each of the points in x, there is a corresponding value
% of the chebyshev function, we'll call it f.
%
% Let's say that we want to find the value of the approximation for some
% input that is not in x. This can be done using a interpolation of the
% values in f:
% y = c(t,x).*f
%
% Note that the weighting terms are a function of both the desired input
% and the chebyshev points. It turns out that there is a more stable and
% efficient way to calculate this interpolation, which is roughly of the
% form:
% y = (k(t,x).*f)/sum(k(t,x))
%
% This code evaluates k(t,x) which it then uses to evaluate and return y.
%
%
% NOTE - The base algorithm (described above) is not defined for points in
% t that are close to the chebyshev nodes in x. If a point in t matches a
% point in x, then an additional claculation is required. If a point in t
% is very close to a gridpoint, then there is a slight loss of accuracy,
% which is more pronounced.
%
%Check to see if the t vector is on the proper domain:
idxBndFail = t<min(d) | t>max(d);
%Get the chebyshev points
[k,n] = size(f);
x = chebyshevPoints(n,d);
ONE1 = ones(k,1);
ONE2 = ones(1,length(t));
%Loop through each chebyshev node.
num = zeros(k,length(t));
den = zeros(k,length(t));
for i=1:n
val = ONE1*(1./(t-x(i)));
if mod(i,2)==1, val=-val; end;
if i==1 || i==n
num = num + 0.5*(f(:,i)*ONE2).*val;
den = den + 0.5*(val);
else
num = num + (f(:,i)*ONE2).*val;
den = den + val;
end
end
%compute the solution:
y = num./den;
%Check for any values that were too close to nodes and correct them
nanIdx = isnan(y);
if sum(sum(nanIdx))>0
nanRowIdx = max(nanIdx,[],1);
y(:,nanRowIdx) = interp1(x',f',t(nanRowIdx)')';
end
%%%% Replace any out-of-bound queries with NaN:
y(:,idxBndFail) = nan;
%%%% Derivative Calculations %%%%
if nargout == 2
Df = chebyshevDerivative(f,d);
Dy = chebyshevInterpolate(Df,t,d);
Dy(:,idxBndFail) = nan;
elseif nargout == 3
[Df, DDf] = chebyshevDerivative(f,d);
Dy = chebyshevInterpolate(Df,t,d);
DDy = chebyshevInterpolate(DDf,t,d);
Dy(:,idxBndFail) = nan;
DDy(:,idxBndFail) = nan;
elseif nargout == 4
[Df, DDf, DDDf] = chebyshevDerivative(f,d);
Dy = chebyshevInterpolate(Df,t,d);
DDy = chebyshevInterpolate(DDf,t,d);
DDDy = chebyshevInterpolate(DDDf,t,d);
Dy(:,idxBndFail) = nan;
DDy(:,idxBndFail) = nan;
DDDy(:,idxBndFail) = nan;
end
end
|
github
|
xuhuairuogu/OptimTraj-master
|
drawCartPoleAnim.m
|
.m
|
OptimTraj-master/demo/cartPole/drawCartPoleAnim.m
| 2,133 |
utf_8
|
2334402558a3114d7f969148319c70cd
|
function drawCartPoleAnim(~,p,xLow, xUpp, yLow, yUpp)
% drawCartPoleTraj(t,p,xLow, xUpp, yLow, yUpp)
%
% INPUTS:
% t = [1,n] = time stamp for the data in p1 and p2
% p = [4,n] = [p1;p2];
%
clf; hold on;
Cart_Width = 0.15;
Cart_Height = 0.05;
p1 = p(1:2,:);
p2 = p(3:4,:);
Pole_Width = 4; %pixels
%%%% Figure out the window size:
xLow = xLow - 0.7*Cart_Width;
xUpp = xUpp + 0.7*Cart_Width;
yLow = yLow - 0.7*Cart_Height;
yUpp = yUpp + 0.7*Cart_Height;
Limits = [xLow,xUpp,yLow,yUpp];
%%%% Get color map for the figure
% map = colormap;
% tMap = linspace(t(1),t(end),size(map,1))';
%%%% Plot Rails
plot([Limits(1) Limits(2)],-0.5*Cart_Height*[1,1],'k-','LineWidth',2)
%%%% Draw the trace of the pendulum tip (continuously vary color)
% nTime = length(t);
% for i=1:(nTime-1)
% idx = i:(i+1);
% x = p2(1,idx);
% y = p2(2,idx);
% c = interp1(tMap,map,mean(t(idx)));
% plot(x,y,'Color',c);
% end
%%%% Compute the frames for plotting:
% tFrame = linspace(t(1), t(end), nFrame);
% cart = interp1(t',p1',tFrame')';
% pole = interp1(t',p2',tFrame')';
cart = p1;
pole = p2;
% for i = 1:nFrame
% Compute color:
color = [0.2,0.7,0.1]; %interp1(tMap,map,tFrame(i));
%Plot Cart
x = cart(1) - 0.5*Cart_Width;
y = -0.5*Cart_Height;
w = Cart_Width;
h = Cart_Height;
hCart = rectangle('Position',[x,y,w,h],'LineWidth',2);
set(hCart,'FaceColor',color);
set(hCart,'EdgeColor',0.8*color);
%Plot Pendulum
Rod_X = [cart(1), pole(1)];
Rod_Y = [cart(2), pole(2)];
plot(Rod_X,Rod_Y,'k-','LineWidth',Pole_Width,'Color',color)
%Plot Bob and hinge
plot(pole(1),pole(2),'k.','MarkerSize',40,'Color',color)
plot(cart(1),cart(2),'k.','MarkerSize',60,'Color',color)
% end
%These commands keep the window from automatically rescaling in funny ways.
axis(Limits);
axis('equal');
axis manual;
axis off;
end
function [xLow, xUpp, yLow, yUpp] = getBounds(p1,p2)
%
% Returns the upper and lower bound on the data in val
%
val = [p1,p2];
xLow = min(val(1,:));
xUpp = max(val(1,:));
yLow = min(val(2,:));
yUpp = max(val(2,:));
end
|
github
|
xuhuairuogu/OptimTraj-master
|
drawCartPoleTraj.m
|
.m
|
OptimTraj-master/demo/cartPole/drawCartPoleTraj.m
| 2,226 |
utf_8
|
d998353b28a3858bf2e12e289f80f3a0
|
function drawCartPoleTraj(t,p1,p2,nFrame)
% drawCartPoleTraj(t,p1,p2,nFrame)
%
% INPUTS:
% t = [1,n] = time stamp for the data in p1 and p2
% p1 = [2,n] = [x;y] = position of center of the cart
% p2 = [2,n] = [x;y] = position of tip of the pendulum
% nFrame = scalar integer = number of "freeze" frames to display
%
clf; hold on;
Cart_Width = 0.15;
Cart_Height = 0.05;
Pole_Width = 4; %pixels
%%%% Figure out the window size:
[xLow, xUpp, yLow, yUpp] = getBounds(p1,p2);
xLow = xLow - 0.7*Cart_Width;
xUpp = xUpp + 0.7*Cart_Width;
yLow = yLow - 0.7*Cart_Height;
yUpp = yUpp + 0.7*Cart_Height;
Limits = [xLow,xUpp,yLow,yUpp];
%%%% Get color map for the figure
map = colormap;
tMap = linspace(t(1),t(end),size(map,1))';
%%%% Plot Rails
plot([Limits(1) Limits(2)],-0.5*Cart_Height*[1,1],'k-','LineWidth',2)
%%%% Draw the trace of the pendulum tip (continuously vary color)
nTime = length(t);
for i=1:(nTime-1)
idx = i:(i+1);
x = p2(1,idx);
y = p2(2,idx);
c = interp1(tMap,map,mean(t(idx)));
plot(x,y,'Color',c);
end
%%%% Compute the frames for plotting:
tFrame = linspace(t(1), t(end), nFrame);
cart = interp1(t',p1',tFrame')';
pole = interp1(t',p2',tFrame')';
for i = 1:nFrame
% Compute color:
color = interp1(tMap,map,tFrame(i));
%Plot Cart
x = cart(1,i) - 0.5*Cart_Width;
y = -0.5*Cart_Height;
w = Cart_Width;
h = Cart_Height;
hCart = rectangle('Position',[x,y,w,h],'LineWidth',2);
set(hCart,'FaceColor',color);
set(hCart,'EdgeColor',0.8*color);
%Plot Pendulum
Rod_X = [cart(1,i), pole(1,i)];
Rod_Y = [cart(2,i), pole(2,i)];
plot(Rod_X,Rod_Y,'k-','LineWidth',Pole_Width,'Color',color)
%Plot Bob and hinge
plot(pole(1,i),pole(2,i),'k.','MarkerSize',40,'Color',color)
plot(cart(1,i),cart(2,i),'k.','MarkerSize',60,'Color',color)
end
%These commands keep the window from automatically rescaling in funny ways.
axis(Limits);
axis('equal');
axis manual;
axis off;
end
function [xLow, xUpp, yLow, yUpp] = getBounds(p1,p2)
%
% Returns the upper and lower bound on the data in val
%
val = [p1,p2];
xLow = min(val(1,:));
xUpp = max(val(1,:));
yLow = min(val(2,:));
yUpp = max(val(2,:));
end
|
github
|
xuhuairuogu/OptimTraj-master
|
Derive_Equations.m
|
.m
|
OptimTraj-master/demo/fiveLinkBiped/Derive_Equations.m
| 22,822 |
utf_8
|
db9aaefe0015ed46a21528cd1f049d49
|
function Derive_Equations()
%%%% Derive Equations - Five Link Biped Model %%%%
%
% This function derives the equations of motion, as well as some other useful
% equations (kinematics, contact forces, ...) for the five-link biped
% model.
%
%
% Nomenclature:
%
% - There are five links, which will be numbered starting with "1" for the
% stance leg tibia, increasing as the links are father from the base joint,
% and ending with "5" for the swing leg tibia.
% 1 - stance leg tibia (lower leg)
% 2 - stance leg femur (upper leg)
% 3 - torso
% 4 - swing leg femur
% 5 - swing leg tibia
%
% - This script uses absolute angles, which are represented with "q". All
% angles use positive convention, with the zero angle corresponding to a
% vertically aligned link configuration. [q] = [0] has the torso balanced
% upright, with both legs fully extended straight below it.
%
% - Derivatives with respect to time are notated by prepending a "d". For
% example the rate of change in an absolute angle is "dq" and angular
% acceleration would be "ddq"
%
% - Joint positions are given with "P", center of mass positions are "G"
%
clc; clear;
disp('Creating variables and derivatives...')
%%%% Absolute orientation (angle) of each link
q1 = sym('q1', 'real');
q2 = sym('q2','real');
q3 = sym('q3','real');
q4 = sym('q4','real');
q5 = sym('q5','real');
%%%% Absolute angular rate of each link
dq1 = sym('dq1','real');
dq2 = sym('dq2','real');
dq3 = sym('dq3','real');
dq4 = sym('dq4','real');
dq5 = sym('dq5','real');
%%%% Absolute angular acceleration of each linke
ddq1 = sym('ddq1','real');
ddq2 = sym('ddq2','real');
ddq3 = sym('ddq3','real');
ddq4 = sym('ddq4','real');
ddq5 = sym('ddq5','real');
%%%% Torques at each joint
u1 = sym('u1','real'); %Stance foot
u2 = sym('u2','real'); %Stance knee
u3 = sym('u3','real'); %Stance hip
u4 = sym('u4','real'); %Swing hip
u5 = sym('u5','real'); %Swing knee
%%%% Mass of each link
m1 = sym('m1','real');
m2 = sym('m2','real');
m3 = sym('m3','real');
m4 = sym('m4','real');
m5 = sym('m5','real');
%%%% Distance between parent joint and link center of mass
c1 = sym('c1','real');
c2 = sym('c2','real');
c3 = sym('c3','real');
c4 = sym('c4','real');
c5 = sym('c5','real');
%%%% Length of each link
l1 = sym('l1','real');
l2 = sym('l2','real');
l3 = sym('l3','real');
l4 = sym('l4','real');
l5 = sym('l5','real');
%%%% Moment of inertia of each link about its own center of mass
I1 = sym('I1','real');
I2 = sym('I2','real');
I3 = sym('I3','real');
I4 = sym('I4','real');
I5 = sym('I5','real');
g = sym('g','real'); % Gravity
Fx = sym('Fx','real'); %Horizontal contact force at stance foot
Fy = sym('Fy','real'); %Vertical contact force at stance foot
empty = sym('empty','real'); %Used for vectorization, user should pass a vector of zeros
t = sym('t','real'); %dummy continuous time
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Set up coordinate system and unit vectors %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
i = sym([1;0]); %Horizontal axis
j = sym([0;1]); %Vertical axis
e1 = cos(q1)*(j) + sin(q1)*(-i); %unit vector from P0 -> P1, (contact point to stance knee)
e2 = cos(q2)*(j) + sin(q2)*(-i); %unit vector from P1 -> P2, (stance knee to hip)
e3 = cos(q3)*(j) + sin(q3)*(-i); %unit vector from P2 -> P3, (hip to shoulders);
e4 = -cos(q4)*(j) - sin(q4)*(-i); %unit vector from P2 -> P4, (hip to swing knee);
e5 = -cos(q5)*(j) - sin(q5)*(-i); %unit vector from P4 -> P5, (swing knee to swing foot);
P0 = 0*i + 0*j; %stance foot = Contact point = origin
P1 = P0 + l1*e1; %stance knee
P2 = P1 + l2*e2; %hip
P3 = P2 + l3*e3; %shoulders
P4 = P2 + l4*e4; %swing knee
P5 = P4 + l5*e5; %swing foot
G1 = P1 - c1*e1; % CoM stance leg tibia
G2 = P2 - c2*e2; % CoM stance leg febur
G3 = P3 - c3*e3; % CoM torso
G4 = P2 + c4*e4; % CoM swing leg femur
G5 = P4 + c5*e5; % CoM swing leg tibia
G = (m1*G1 + m2*G2 + m3*G3 + m4*G4 + m5*G5)/(m1+m2+m3+m4+m5); %Center of mass for entire robot
%%%% Define a function for doing '2d' cross product: dot(a x b, k)
cross2d = @(a,b)(a(1)*b(2) - a(2)*b(1));
%%%% Weight of each link:
w1 = -m1*g*j;
w2 = -m2*g*j;
w3 = -m3*g*j;
w4 = -m4*g*j;
w5 = -m5*g*j;
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Derivatives %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
q = [q1;q2;q3;q4;q5];
dq = [dq1;dq2;dq3;dq4;dq5];
ddq = [ddq1;ddq2;ddq3;ddq4;ddq5];
u = [u1;u2;u3;u4;u5];
z = [t;q;dq;u]; % time-varying vector of inputs
% Neat trick to compute derivatives using the chain rule
derivative = @(in)( jacobian(in,[q;dq])*[dq;ddq] );
% Velocity of the swing foot (used for step constraints)
dP5 = derivative(P5);
% Compute derivatives for the CoM of each link:
dG1 = derivative(G1); ddG1 = derivative(dG1);
dG2 = derivative(G2); ddG2 = derivative(dG2);
dG3 = derivative(G3); ddG3 = derivative(dG3);
dG4 = derivative(G4); ddG4 = derivative(dG4);
dG5 = derivative(G5); ddG5 = derivative(dG5);
dG = derivative(G); ddG = derivative(dG);
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Calculations: %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
singleStanceDynamics();
objectiveFunctions();
heelStrikeDynamics();
mechanicalEnergy();
contactForces();
kinematics();
disp('Done!');
%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Single-Stance Dynamics %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% I solve the dynamics here by carefully selecting angular momentum balance
% equations about each joint, working my way out the kinematic tree from
% the root.
function singleStanceDynamics()
disp('Deriving single stance dynamics...')
%%%% AMB - entire system @ P0
eqnTorque0 = ...
cross2d(G1-P0,w1) + ...
cross2d(G2-P0,w2) + ...
cross2d(G3-P0,w3) + ...
cross2d(G4-P0,w4) + ...
cross2d(G5-P0,w5) + ...
u1;
eqnInertia0 = ...
cross2d(G1-P0,m1*ddG1) + ddq1*I1 + ...
cross2d(G2-P0,m2*ddG2) + ddq2*I2 + ...
cross2d(G3-P0,m3*ddG3) + ddq3*I3 + ...
cross2d(G4-P0,m4*ddG4) + ddq4*I4 + ...
cross2d(G5-P0,m5*ddG5) + ddq5*I5;
%%%% AMB - swing leg, torso, stance femer @ stance knee
eqnTorque1 = ...
cross2d(G2-P1,w2) + ...
cross2d(G3-P1,w3) + ...
cross2d(G4-P1,w4) + ...
cross2d(G5-P1,w5) + ...
u2;
eqnInertia1 = ...
cross2d(G2-P1,m2*ddG2) + ddq2*I2 + ...
cross2d(G3-P1,m3*ddG3) + ddq3*I3 + ...
cross2d(G4-P1,m4*ddG4) + ddq4*I4 + ...
cross2d(G5-P1,m5*ddG5) + ddq5*I5 ;
%%%% AMB - swing leg, torso @ hip
eqnTorque2 = ...
cross2d(G3-P2,w3) + ...
cross2d(G4-P2,w4) + ...
cross2d(G5-P2,w5) + ...
u3;
eqnInertia2 = ...
cross2d(G3-P2,m3*ddG3) + ddq3*I3 + ...
cross2d(G4-P2,m4*ddG4) + ddq4*I4 + ...
cross2d(G5-P2,m5*ddG5) + ddq5*I5 ;
%%%% AMB - swing leg @ hip
eqnTorque3 = ...
cross2d(G4-P2,w4) + ...
cross2d(G5-P2,w5) + ...
u4;
eqnInertia3 = ...
cross2d(G4-P2,m4*ddG4) + ddq4*I4 + ...
cross2d(G5-P2,m5*ddG5) + ddq5*I5 ;
%%%% AMB - swing tibia % swing knee
eqnTorque4 = ...
cross2d(G5-P4,w5) + ...
u5;
eqnInertia4 = ...
cross2d(G5-P4,m5*ddG5) + ddq5*I5 ;
%%%% Collect and solve equations:
eqns = [...
eqnTorque0 - eqnInertia0;
eqnTorque1 - eqnInertia1;
eqnTorque2 - eqnInertia2;
eqnTorque3 - eqnInertia3;
eqnTorque4 - eqnInertia4];
[MM, FF] = equationsToMatrix(eqns,ddq); % ddq = MM\ff;
%%%% Compute gradients:
[m, mi, mz, mzi, mzd] = computeGradients(MM,z,empty);
[f, fi, fz, fzi, fzd] = computeGradients(FF,z,empty);
% Write function file:
matlabFunction(m, mi, f, fi,... %dynamics
mz, mzi, mzd, fz, fzi, fzd,... %gradients
'file','autoGen_dynSs.m',...
'vars',{...
'q1','q2','q3','q4','q5',...
'dq1','dq2','dq3','dq4','dq5',...
'u1','u2','u3','u4','u5',...
'm1','m2','m3','m4','m5',...
'I1','I2','I3','I4','I5',...
'l1','l2','l3','l4',...
'c1','c2','c3','c4','c5',...
'g','empty'});
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Objective Functions %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function objectiveFunctions()
%%%% Torque-squared objective function
F = u1*u1 + u2*u2 + u3*u3 + u4*u4 + u5*u5;
[f, ~, fz, fzi, ~] = computeGradients(F,z,empty);
matlabFunction(f,fz,fzi,...
'file','autoGen_obj_torqueSquared.m',...
'vars',{'u1','u2','u3','u4','u5'});
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Heel-Strike Dynamics %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function heelStrikeDynamics()
disp('Deriving heel-strike dynamics...')
%%%% Notes:
% xF - heelStrike(xI) --> constraint --> 0
% xF - collision(footSwap(xI));
%
% Angles before heel-strike:
q1m = sym('q1m','real');
q2m = sym('q2m','real');
q3m = sym('q3m','real');
q4m = sym('q4m','real');
q5m = sym('q5m','real');
qm = [q1m;q2m;q3m;q4m;q5m];
% Angles after heel-strike
q1p = sym('q1p','real');
q2p = sym('q2p','real');
q3p = sym('q3p','real');
q4p = sym('q4p','real');
q5p = sym('q5p','real');
qp = [q1p;q2p;q3p;q4p;q5p];
% Angular rates before heel-strike:
dq1m = sym('dq1m','real');
dq2m = sym('dq2m','real');
dq3m = sym('dq3m','real');
dq4m = sym('dq4m','real');
dq5m = sym('dq5m','real');
dqm = [dq1m;dq2m;dq3m;dq4m;dq5m];
% Angular rates after heel-strike
dq1p = sym('dq1p','real');
dq2p = sym('dq2p','real');
dq3p = sym('dq3p','real');
dq4p = sym('dq4p','real');
dq5p = sym('dq5p','real');
dqp = [dq1p;dq2p;dq3p;dq4p;dq5p];
% Compute kinematics before heel-strike:
inVars = {'q1','q2','q3','q4','q5','dq1','dq2','dq3','dq4','dq5'};
outVarsM = {'q1m','q2m','q3m','q4m','q5m','dq1m','dq2m','dq3m','dq4m','dq5m'};
% P0m = subs(P0,inVars,outVarsM);
P1m = subs(P1,inVars,outVarsM);
P2m = subs(P2,inVars,outVarsM);
% P3m = subs(P3,inVars,outVarsM);
P4m = subs(P4,inVars,outVarsM);
P5m = subs(P5,inVars,outVarsM);
dP5m = subs(dP5,inVars,outVarsM);
G1m = subs(G1,inVars,outVarsM);
G2m = subs(G2,inVars,outVarsM);
G3m = subs(G3,inVars,outVarsM);
G4m = subs(G4,inVars,outVarsM);
G5m = subs(G5,inVars,outVarsM);
dG1m = subs(dG1,inVars,outVarsM);
dG2m = subs(dG2,inVars,outVarsM);
dG3m = subs(dG3,inVars,outVarsM);
dG4m = subs(dG4,inVars,outVarsM);
dG5m = subs(dG5,inVars,outVarsM);
% Compute kinematics after heel-strike:
outVarsP = {'q1p','q2p','q3p','q4p','q5p','dq1p','dq2p','dq3p','dq4p','dq5p'};
P0p = subs(P0,inVars,outVarsP);
P1p = subs(P1,inVars,outVarsP);
P2p = subs(P2,inVars,outVarsP);
% P3p = subs(P3,inVars,outVarsP);
P4p = subs(P4,inVars,outVarsP);
% P5p = subs(P5,inVars,outVarsP);
dP5p = subs(dP5,inVars,outVarsP);
G1p = subs(G1,inVars,outVarsP);
G2p = subs(G2,inVars,outVarsP);
G3p = subs(G3,inVars,outVarsP);
G4p = subs(G4,inVars,outVarsP);
G5p = subs(G5,inVars,outVarsP);
dG1p = subs(dG1,inVars,outVarsP);
dG2p = subs(dG2,inVars,outVarsP);
dG3p = subs(dG3,inVars,outVarsP);
dG4p = subs(dG4,inVars,outVarsP);
dG5p = subs(dG5,inVars,outVarsP);
%%%% AMB - entire system @ New stance foot
eqnHs0m = ... %Before collision
cross2d(G1m-P5m,m1*dG1m) + dq1m*I1 + ...
cross2d(G2m-P5m,m2*dG2m) + dq2m*I2 + ...
cross2d(G3m-P5m,m3*dG3m) + dq3m*I3 + ...
cross2d(G4m-P5m,m4*dG4m) + dq4m*I4 + ...
cross2d(G5m-P5m,m5*dG5m) + dq5m*I5;
eqnHs0 = ... %After collision
cross2d(G1p-P0p,m1*dG1p) + dq1p*I1 + ...
cross2d(G2p-P0p,m2*dG2p) + dq2p*I2 + ...
cross2d(G3p-P0p,m3*dG3p) + dq3p*I3 + ...
cross2d(G4p-P0p,m4*dG4p) + dq4p*I4 + ...
cross2d(G5p-P0p,m5*dG5p) + dq5p*I5;
%%%% AMB - new swing leg, torso, stance femer @ stance knee
eqnHs1m = ... %Before collision
cross2d(G1m-P4m,m1*dG1m) + dq1m*I1 + ...
cross2d(G2m-P4m,m2*dG2m) + dq2m*I2 + ...
cross2d(G3m-P4m,m3*dG3m) + dq3m*I3 + ...
cross2d(G4m-P4m,m4*dG4m) + dq4m*I4;
eqnHs1 = ... %After collision
cross2d(G2p-P1p,m2*dG2p) + dq2p*I2 + ...
cross2d(G3p-P1p,m3*dG3p) + dq3p*I3 + ...
cross2d(G4p-P1p,m4*dG4p) + dq4p*I4 + ...
cross2d(G5p-P1p,m5*dG5p) + dq5p*I5;
%%%% AMB - swing leg, torso @ new hip
eqnHs2m = ... %Before collision
cross2d(G3m-P2m,m3*dG3m) + dq3m*I3 + ...
cross2d(G2m-P2m,m2*dG2m) + dq2m*I2 + ...
cross2d(G1m-P2m,m1*dG1m) + dq1m*I1;
eqnHs2 = ... %After collision
cross2d(G3p-P2p,m3*dG3p) + dq3p*I3 + ...
cross2d(G4p-P2p,m4*dG4p) + dq4p*I4 + ...
cross2d(G5p-P2p,m5*dG5p) + dq5p*I5;
%%%% AMB - swing leg @ new hip
eqnHs3m = ... %Before collision
cross2d(G1m-P2m,m1*dG1m) + dq1m*I1 + ...
cross2d(G2m-P2m,m2*dG2m) + dq2m*I2;
eqnHs3 = ... %After collision
cross2d(G4p-P2p,m4*dG4p) + dq4p*I4 + ...
cross2d(G5p-P2p,m5*dG5p) + dq5p*I5;
%%%% AMB - swing tibia @ new swing knee
eqnHs4m = ... %Before collision
cross2d(G1m-P1m,m1*dG1m) + dq1m*I1;
eqnHs4 = ... %After collision
cross2d(G5p-P4p,m5*dG5p) + dq5p*I5;
%%%% Collect and solve equations:
eqnHs = [...
eqnHs0m - eqnHs0;
eqnHs1m - eqnHs1;
eqnHs2m - eqnHs2;
eqnHs3m - eqnHs3;
eqnHs4m - eqnHs4];
[MM, FF] = equationsToMatrix(eqnHs,dqp);
%%%% Compute gradients:
tp = sym('tp','real'); %Initial trajectory time
tm = sym('tm','real'); %Final trajectory time
zBnd = [tp;qp;dqp;tm;qm;dqm];
[m, mi, mz, mzi, mzd] = computeGradients(MM,zBnd,empty);
[f, fi, fz, fzi, fzd] = computeGradients(FF,zBnd,empty);
% Heel-strike
matlabFunction(m, mi, f, fi,... %dynamics
mz, mzi, mzd, fz, fzi, fzd,... %gradients
'file','autoGen_cst_heelStrike.m',...
'vars',{...
'q1p','q2p','q3p','q4p','q5p',...
'q1m','q2m','q3m','q4m','q5m',...
'dq1m','dq2m','dq3m','dq4m','dq5m',...
'm1','m2','m3','m4','m5',...
'I1','I2','I3','I4','I5',...
'l1','l2','l3','l4','l5',...
'c1','c2','c3','c4','c5','empty'});
% Collision velocity of the swing foot:
cst = [-dP5p(2); dP5m(2)]; %Swing foot velocity before and after collision (negative sign is intentional, since output is constrained to be negative);
cstJac = jacobian(cst,zBnd); %Gradient
matlabFunction(cst, cstJac,...
'file','autoGen_cst_footVel.m',...
'vars',{...
'q1p','q2p','q4p','q5p',...
'q1m','q2m','q4m','q5m',...
'dq1p','dq2p','dq4p','dq5p',...
'dq1m','dq2m','dq4m','dq5m',...
'l1','l2','l4','l5'});
% Step length and height constraint:
stepLength = sym('stepLength','real');
ceq = [P5m(1)-stepLength; P5m(2)];
ceqJac = jacobian(ceq,zBnd); %Gradient
matlabFunction(ceq, ceqJac,...
'file','autoGen_cst_steplength.m',...
'vars',{...
'q1m','q2m','q4m','q5m',...
'l1','l2','l4','l5','stepLength'});
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Mechanical Energy %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function mechanicalEnergy()
disp('Deriving mechanical energy...')
%%%% Energy:
KineticEnergy = ...
0.5*m1*dot(dG1,dG1) + 0.5*I1*dq1^2 + ...
0.5*m2*dot(dG2,dG2) + 0.5*I2*dq2^2 + ...
0.5*m3*dot(dG3,dG3) + 0.5*I3*dq3^2 + ...
0.5*m4*dot(dG4,dG4) + 0.5*I4*dq4^2 + ...
0.5*m5*dot(dG5,dG5) + 0.5*I5*dq5^2;
PotentialEnergy = ...
m1*g*G1(2) + ...
m2*g*G2(2) + ...
m3*g*G3(2) + ...
m4*g*G4(2) + ...
m5*g*G5(2);
matlabFunction(KineticEnergy, PotentialEnergy,...
'file','autoGen_energy.m',...
'vars',{...
'q1','q2','q3','q4','q5',...
'dq1','dq2','dq3','dq4','dq5',...
'm1','m2','m3','m4','m5',...
'I1','I2','I3','I4','I5',...
'l1','l2','l3','l4',...
'c1','c2','c3','c4','c5',...
'g'},...
'outputs',{'KE','PE'});
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Contact Forces %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function contactForces()
%%%% Contact Forces:
eqnForce5 = w1 + w2 + w3 + w4 + w5 + Fx*i + Fy*j;
eqnInertia5 = (m1+m2+m3+m4+m5)*ddG;
[AA,bb] = equationsToMatrix(eqnForce5-eqnInertia5,[Fx;Fy]);
ContactForces = AA\bb;
matlabFunction(ContactForces(1),ContactForces(2),...
'file','autoGen_contactForce.m',...
'vars',{...
'q1','q2','q3','q4','q5',...
'dq1','dq2','dq3','dq4','dq5',...
'ddq1','ddq2','ddq3','ddq4','ddq5',...
'm1','m2','m3','m4','m5',...
'l1','l2','l3','l4',...
'c1','c2','c3','c4','c5',...
'g'},...
'outputs',{'Fx','Fy'});
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Write Kinematics Files %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function kinematics()
disp('Writing kinematics files...')
P = [P1; P2; P3; P4; P5];
Gvec = [G1; G2; G3; G4; G5];
% Used for plotting and animation
matlabFunction(P,Gvec,'file','autoGen_getPoints.m',...
'vars',{...
'q1','q2','q3','q4','q5',...
'l1','l2','l3','l4','l5',...
'c1','c2','c3','c4','c5'},...
'outputs',{'P','Gvec'});
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Helper Functions %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [m, mi, mz, mzi, dim] = computeGradients(M,z,empty)
%
% This function computes the gradients of a matrix M with respect the the
% variables in z, and then returns both the matrix and its gradient as
% column vectors of their non-zero elements, along with the linear indicies
% to unpack them. It also simplifies m and mz.
%
% INPUTS:
% M = [na, nb] = symbolic matrix
% z = [nc, 1] = symbolic vector
%
% OUTPUTS:
% m = [nd, 1] = symbolic vector of non-zero elements in M
% i = [nd, 1] = linear indicies to map m --> [na,nb] matrix
% mz = [ne, 1] = symbolic vector of non-zero elements in Mz
% iz = [ne, 1] = linear indicies to map mz --> [na,nb,nc] array
% dim = [3,1] = [na,nb,nc] = dimensions of 3d version of mz
%
[na, nb] = size(M);
nc = size(z,1);
M = simplify(M);
mz2 = jacobian(M(:),z); %Compute jacobian of M, by first reshaping M to be a column vector
mz3 = reshape(mz2,na,nb,nc); %Expand back out to a three-dimensional array
mz3 = simplify(mz3);
% Extract non-zero elements to a column vector:
mi = find(M);
m = M(mi);
mzi = find(mz3);
mz = mz3(mzi); mz = mz(:); %Collapse to a column vector
dim = [na,nb,nc];
% Pad any constant terms with "empty" to permit vectorization:
m = vectorizeHack(m, z, empty);
mz = vectorizeHack(mz, z, empty);
end
function x = vectorizeHack(x, z, empty)
%
% This function searches for any elements of x that are not dependent on
% any element of z. In this case, the automatically generated code will
% fail to vectorize properly. One solution is to add an array of zeros
% (empty) to the element.
%
% x = column vector of symbolic expressions
% z = column vector of symbolic variables
% z = symbolic variable, which the user will set equal to zero.
%
% Compute dependencies
g = jacobian(x,z);
% Check for rows of x with no dependence on z
[n,m] = size(g);
idxConst = true(n,1);
for i=1:n
for j=1:m
if ~isequal(sym(0),g(i,j))
idxConst(i) = false;
break;
end
end
end
% Add empty to those enteries
x(idxConst) = x(idxConst) + empty;
end
|
github
|
xuhuairuogu/OptimTraj-master
|
dirColGrad.m
|
.m
|
OptimTraj-master/demo/fiveLinkBiped/costOfTransport/dirColGrad.m
| 11,673 |
utf_8
|
f7fd60b58db9ceade9467b4c0c3233f9
|
function soln = dirColGrad(P, problem)
% soln = dirColGrad(P, problem)
%
% OptimTraj utility function - Direct Collocation with Gradients
%
% This function is core function that is called to run the transcription
% for both the "trapezoid" and the "hermiteSimpson" methods when they are
% running analytic gradients.
%
%
F = problem.func;
if isempty(P.objective)
P.objective = @(z)( ...
grad_objective(z, pack, F.pathObj, F.bndObj, gradInfo, weights) ); %Analytic gradients
end
if isempty(P.constraint)
P.nonlcon = @(z)( ...
myCstGrad(z, pack, F.dynamics, F.pathCst, F.bndCst, F.defectCst, gradInfo) ); %Analytic gradients
end
%%%% Call fmincon to solve the non-linear program (NLP)
tic;
[zSoln, objVal,exitFlag,output] = fmincon(P);
[tSoln,xSoln,uSoln] = unPackDecVar(zSoln,pack);
nlpTime = toc;
%%%% Store the results:
soln.grid.time = tSoln;
soln.grid.state = xSoln;
soln.grid.control = uSoln;
soln.info = output;
soln.info.nlpTime = nlpTime;
soln.info.exitFlag = exitFlag;
soln.info.objVal = objVal;
soln.problem = problem; % Return the fully detailed problem struct
end
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function [z,pack] = packDecVar(t,x,u)
%
% This function collapses the time (t), state (x)
% and control (u) matricies into a single vector
%
% INPUTS:
% t = [1, nTime] = time vector (grid points)
% x = [nState, nTime] = state vector at each grid point
% u = [nControl, nTime] = control vector at each grid point
%
% OUTPUTS:
% z = column vector of 2 + nTime*(nState+nControl) decision variables
% pack = details about how to convert z back into t,x, and u
% .nTime
% .nState
% .nControl
%
nTime = length(t);
nState = size(x,1);
nControl = size(u,1);
tSpan = [t(1); t(end)];
xCol = reshape(x, nState*nTime, 1);
uCol = reshape(u, nControl*nTime, 1);
z = [tSpan;xCol;uCol];
pack.nTime = nTime;
pack.nState = nState;
pack.nControl = nControl;
end
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function [t,x,u] = unPackDecVar(z,pack)
%
% This function unpacks the decision variables for
% trajectory optimization into the time (t),
% state (x), and control (u) matricies
%
% INPUTS:
% z = column vector of 2 + nTime*(nState+nControl) decision variables
% pack = details about how to convert z back into t,x, and u
% .nTime
% .nState
% .nControl
%
% OUTPUTS:
% t = [1, nTime] = time vector (grid points)
% x = [nState, nTime] = state vector at each grid point
% u = [nControl, nTime] = control vector at each grid point
%
nTime = pack.nTime;
nState = pack.nState;
nControl = pack.nControl;
nx = nState*nTime;
nu = nControl*nTime;
t = linspace(z(1),z(2),nTime);
x = reshape(z((2+1):(2+nx)),nState,nTime);
u = reshape(z((2+nx+1):(2+nx+nu)),nControl,nTime);
end
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function gradInfo = grad_computeInfo(pack)
%
% This function computes the matrix dimensions and indicies that are used
% to map the gradients from the user functions to the gradients needed by
% fmincon. The key difference is that the gradients in the user functions
% are with respect to their input (t,x,u) or (t0,x0,tF,xF), while the
% gradients for fmincon are with respect to all decision variables.
%
% INPUTS:
% nDeVar = number of decision variables
% pack = details about packing and unpacking the decision variables
% .nTime
% .nState
% .nControl
%
% OUTPUTS:
% gradInfo = details about how to transform gradients
%
nTime = pack.nTime;
nState = pack.nState;
nControl = pack.nControl;
nDecVar = 2 + nState*nTime + nControl*nTime;
zIdx = 1:nDecVar;
gradInfo.nDecVar = nDecVar;
[tIdx, xIdx, uIdx] = unPackDecVar(zIdx,pack);
gradInfo.tIdx = tIdx([1,end]);
gradInfo.xuIdx = [xIdx;uIdx];
%%%% Compute gradients of time:
% alpha = (0..N-1)/(N-1)
% t = alpha*tUpp + (1-alpha)*tLow
alpha = (0:(nTime-1))/(nTime-1);
gradInfo.alpha = [1-alpha; alpha];
%%%% Compute gradients of state
gradInfo.xGrad = zeros(nState,nTime,nDecVar);
for iTime=1:nTime
for iState=1:nState
gradInfo.xGrad(iState,iTime,xIdx(iState,iTime)) = 1;
end
end
%%%% For unpacking the boundary constraints and objective:
gradInfo.bndIdxMap = [tIdx(1); xIdx(:,1); tIdx(end); xIdx(:,end)];
end
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function [dt, dtGrad] = grad_timeStep(t,gradInfo)
%
% OptimTraj utility function
%
% Computes the time step and its gradient
%
% dt = [1,1]
% dtGrad = [1,nz]
%
nTime = length(t);
dt = (t(end)-t(1))/(nTime-1);
dtGrad = zeros(1,gradInfo.nDecVar);
dtGrad(1,gradInfo.tIdx(1)) = -1/(nTime-1);
dtGrad(1,gradInfo.tIdx(2)) = 1/(nTime-1);
end
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function [c, ceq, cGrad, ceqGrad] = grad_collectConstraints(t,x,u,defects, defectsGrad, pathCst, bndCst, gradInfo)
% [c, ceq, cGrad, ceqGrad] = grad_collectConstraints(t,x,u,defects, defectsGrad, pathCst, bndCst, gradInfo)
%
% OptimTraj utility function.
%
% Collects the defects, calls user-defined constraints, and then packs
% everything up into a form that is good for fmincon. Additionally, it
% reshapes and packs up the gradients of these constraints.
%
% INPUTS:
% t = time vector
% x = state matrix
% u = control matrix
% defects = defects matrix
% pathCst = user-defined path constraint function
% bndCst = user-defined boundary constraint function
%
% OUTPUTS:
% c = inequality constraint for fmincon
% ceq = equality constraint for fmincon
%
ceq_dyn = reshape(defects,numel(defects),1);
ceq_dynGrad = grad_flattenPathCst(defectsGrad);
%%%% Compute the user-defined constraints:
if isempty(pathCst)
c_path = [];
ceq_path = [];
c_pathGrad = [];
ceq_pathGrad = [];
else
[c_path, ceq_path, c_pathGradRaw, ceq_pathGradRaw] = pathCst(t,x,u);
c_pathGrad = grad_flattenPathCst(grad_reshapeContinuous(c_pathGradRaw,gradInfo));
ceq_pathGrad = grad_flattenPathCst(grad_reshapeContinuous(ceq_pathGradRaw,gradInfo));
end
if isempty(bndCst)
c_bnd = [];
ceq_bnd = [];
c_bndGrad = [];
ceq_bndGrad = [];
else
t0 = t(1);
tF = t(end);
x0 = x(:,1);
xF = x(:,end);
[c_bnd, ceq_bnd, c_bndGradRaw, ceq_bndGradRaw] = bndCst(t0,x0,tF,xF);
c_bndGrad = grad_reshapeBoundary(c_bndGradRaw,gradInfo);
ceq_bndGrad = grad_reshapeBoundary(ceq_bndGradRaw,gradInfo);
end
%%%% Pack everything up:
c = [c_path;c_bnd];
ceq = [ceq_dyn; ceq_path; ceq_bnd];
cGrad = [c_pathGrad;c_bndGrad]';
ceqGrad = [ceq_dynGrad; ceq_pathGrad; ceq_bndGrad]';
end
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function C = grad_flattenPathCst(CC)
%
% This function takes a path constraint and reshapes the first two
% dimensions so that it can be passed to fmincon
%
if isempty(CC)
C = [];
else
[n1,n2,n3] = size(CC);
C = reshape(CC,n1*n2,n3);
end
end
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function CC = grad_reshapeBoundary(C,gradInfo)
%
% This function takes a boundary constraint or objective from the user
% and expands it to match the full set of decision variables
%
CC = zeros(size(C,1),gradInfo.nDecVar);
CC(:,gradInfo.bndIdxMap) = C;
end
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function grad = grad_reshapeContinuous(gradRaw,gradInfo)
% grad = grad_reshapeContinuous(gradRaw,gradInfo)
%
% OptimTraj utility function.
%
% This function converts the raw gradients from the user function into
% gradients with respect to the decision variables.
%
% INPUTS:
% stateRaw = [nOutput,nInput,nTime]
%
% OUTPUTS:
% grad = [nOutput,nTime,nDecVar]
%
if isempty(gradRaw)
grad = [];
else
[nOutput, ~, nTime] = size(gradRaw);
grad = zeros(nOutput,nTime,gradInfo.nDecVar);
% First, loop through and deal with time.
timeGrad = gradRaw(:,1,:); timeGrad = permute(timeGrad,[1,3,2]);
for iOutput=1:nOutput
A = ([1;1]*timeGrad(iOutput,:)).*gradInfo.alpha;
grad(iOutput,:,gradInfo.tIdx) = permute(A,[3,2,1]);
end
% Now deal with state and control:
for iOutput=1:nOutput
for iTime=1:nTime
B = gradRaw(iOutput,2:end,iTime);
grad(iOutput,iTime,gradInfo.xuIdx(:,iTime)) = permute(B,[3,1,2]);
end
end
end
end
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function [cost, costGrad] = grad_objective(z,pack,pathObj,bndObj,gradInfo,weights)
%
% This function unpacks the decision variables, sends them to the
% user-defined objective functions, and then returns the final cost
%
% INPUTS:
% z = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% pathObj = user-defined integral objective function
% endObj = user-defined end-point objective function
%
% OUTPUTS:
% cost = scale cost for this set of decision variables
%
%Unpack the decision variables:
[t,x,u] = unPackDecVar(z,pack);
% Time step for integration:
[dt, dtGrad] = grad_timeStep(t, gradInfo);
nTime = length(t);
nState = size(x,1);
nControl = size(u,1);
nDecVar = length(z);
% Compute the cost integral along the trajectory
if isempty(pathObj)
integralCost = 0;
integralCostGrad = zeros(nState+nControl,1);
else
% Objective function integrand and gradients:
[obj, objGradRaw] = pathObj(t,x,u);
nInput = size(objGradRaw,1);
objGradRaw = reshape(objGradRaw,1,nInput,nTime);
objGrad = grad_reshapeContinuous(objGradRaw,gradInfo);
% Integration by quadrature:
integralCost = dt*obj*weights; % Integration
% Gradient of integral objective function
objGrad = reshape(objGrad,nTime,nDecVar);
integralCostGrad = ...
dtGrad*(obj*weights) + ...
dt*sum(objGrad.*(weights*ones(1,nDecVar)),1);
end
% Compute the cost at the boundaries of the trajectory
if isempty(bndObj)
bndCost = 0;
bndCostGrad = zeros(1,nDecVar);
else
t0 = t(1);
tF = t(end);
x0 = x(:,1);
xF = x(:,end);
[bndCost, bndCostGradRaw] = bndObj(t0,x0,tF,xF);
bndCostGrad = grad_reshapeBoundary(bndCostGradRaw,gradInfo);
end
% Cost function
cost = bndCost + integralCost;
% Gradients
costGrad = bndCostGrad + integralCostGrad;
end
%%%% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %%%%
function [c, ceq, cGrad, ceqGrad] = myCstGrad(z,pack,dynFun, pathCst, bndCst, defectCst, gradInfo)
%
% This function unpacks the decision variables, computes the defects along
% the trajectory, and then evaluates the user-defined constraint functions.
%
% INPUTS:
% z = column vector of decision variables
% pack = details about how to convert decision variables into t,x, and u
% dynFun = user-defined dynamics function
% pathCst = user-defined constraints along the path
% endCst = user-defined constraints at the boundaries
%
% OUTPUTS:
% c = inequality constraints to be passed to fmincon
% ceq = equality constraints to be passed to fmincon
%
%Unpack the decision variables:
[t,x,u] = unPackDecVar(z,pack);
% Time step for integration:
[dt, dtGrad] = grad_timeStep(t,gradInfo);
%%%% Compute defects along the trajectory:
[f, fGradRaw] = dynFun(t,x,u);
fGrad = grad_reshapeContinuous(fGradRaw,gradInfo);
[defects, defectsGrad] = defectCst(dt,x,f,dtGrad,xGrad,fGrad,gradInfo);
% Compute gradients of the user-defined constraints and then pack up:
[c, ceq, cGrad, ceqGrad] = grad_collectConstraints(t,x,u,...
defects, defectsGrad, pathCst, bndCst, gradInfo);
end
|
github
|
xuhuairuogu/OptimTraj-master
|
Derive_Equations.m
|
.m
|
OptimTraj-master/demo/fiveLinkBiped/costOfTransport/Derive_Equations.m
| 27,136 |
utf_8
|
2ee06d2549cae61acad48475153b4214
|
function Derive_Equations()
%%%% Derive Equations - Five Link Biped Model %%%%
%
% This function derives the equations of motion, as well as some other useful
% equations (kinematics, contact forces, ...) for the five-link biped
% model.
%
% This version of the code includes a few more complicated features for
% dealing with difficult cost functions. In particular, it adds 10 slack
% variables to compute the abs(power) term in the cost function, and the
% primary control is the derivative of torque, rather than torque itself.
% This allows for regularization by the derivative of the input.
%
%
% Nomenclature:
%
% - There are five links, which will be numbered starting with "1" for the
% stance leg tibia, increasing as the links are father from the base joint,
% and ending with "5" for the swing leg tibia.
% 1 - stance leg tibia (lower leg)
% 2 - stance leg femur (upper leg)
% 3 - torso
% 4 - swing leg femur
% 5 - swing leg tibia
%
% - This script uses absolute angles, which are represented with "q". All
% angles use positive convention, with the zero angle corresponding to a
% vertically aligned link configuration. [q] = [0] has the torso balanced
% upright, with both legs fully extended straight below it.
%
% - Derivatives with respect to time are notated by prepending a "d". For
% example the rate of change in an absolute angle is "dq" and angular
% acceleration would be "ddq"
%
% - Joint positions are given with "P", center of mass positions are "G"
%
clc; clear;
disp('Creating variables and derivatives...')
%%%% Absolute orientation (angle) of each link
q1 = sym('q1', 'real');
q2 = sym('q2','real');
q3 = sym('q3','real');
q4 = sym('q4','real');
q5 = sym('q5','real');
%%%% Absolute angular rate of each link
dq1 = sym('dq1','real');
dq2 = sym('dq2','real');
dq3 = sym('dq3','real');
dq4 = sym('dq4','real');
dq5 = sym('dq5','real');
%%%% Absolute angular acceleration of each linke
ddq1 = sym('ddq1','real');
ddq2 = sym('ddq2','real');
ddq3 = sym('ddq3','real');
ddq4 = sym('ddq4','real');
ddq5 = sym('ddq5','real');
%%%% Torques at each joint
u1 = sym('u1','real'); %Stance foot
u2 = sym('u2','real'); %Stance knee
u3 = sym('u3','real'); %Stance hip
u4 = sym('u4','real'); %Swing hip
u5 = sym('u5','real'); %Swing knee
%%%% Torques rate at each joint
du1 = sym('du1','real'); %Stance foot
du2 = sym('du2','real'); %Stance knee
du3 = sym('du3','real'); %Stance hip
du4 = sym('du4','real'); %Swing hip
du5 = sym('du5','real'); %Swing knee
%%%% Slack variables -- negative component of power
sn1 = sym('sn1','real'); %Stance foot
sn2 = sym('sn2','real'); %Stance knee
sn3 = sym('sn3','real'); %Stance hip
sn4 = sym('sn4','real'); %Swing hip
sn5 = sym('sn5','real'); %Swing knee
%%%% Slack variables -- positive component of power
sp1 = sym('sp1','real'); %Stance foot
sp2 = sym('sp2','real'); %Stance knee
sp3 = sym('sp3','real'); %Stance hip
sp4 = sym('sp4','real'); %Swing hip
sp5 = sym('sp5','real'); %Swing knee
%%%% Mass of each link
m1 = sym('m1','real');
m2 = sym('m2','real');
m3 = sym('m3','real');
m4 = sym('m4','real');
m5 = sym('m5','real');
%%%% Distance between parent joint and link center of mass
c1 = sym('c1','real');
c2 = sym('c2','real');
c3 = sym('c3','real');
c4 = sym('c4','real');
c5 = sym('c5','real');
%%%% Length of each link
l1 = sym('l1','real');
l2 = sym('l2','real');
l3 = sym('l3','real');
l4 = sym('l4','real');
l5 = sym('l5','real');
%%%% Moment of inertia of each link about its own center of mass
I1 = sym('I1','real');
I2 = sym('I2','real');
I3 = sym('I3','real');
I4 = sym('I4','real');
I5 = sym('I5','real');
g = sym('g','real'); % Gravity
Fx = sym('Fx','real'); %Horizontal contact force at stance foot
Fy = sym('Fy','real'); %Vertical contact force at stance foot
empty = sym('empty','real'); %Used for vectorization, user should pass a vector of zeros
t = sym('t','real'); %dummy continuous time
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Set up coordinate system and unit vectors %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
i = sym([1;0]); %Horizontal axis
j = sym([0;1]); %Vertical axis
e1 = cos(q1)*(j) + sin(q1)*(-i); %unit vector from P0 -> P1, (contact point to stance knee)
e2 = cos(q2)*(j) + sin(q2)*(-i); %unit vector from P1 -> P2, (stance knee to hip)
e3 = cos(q3)*(j) + sin(q3)*(-i); %unit vector from P2 -> P3, (hip to shoulders);
e4 = -cos(q4)*(j) - sin(q4)*(-i); %unit vector from P2 -> P4, (hip to swing knee);
e5 = -cos(q5)*(j) - sin(q5)*(-i); %unit vector from P4 -> P5, (swing knee to swing foot);
P0 = 0*i + 0*j; %stance foot = Contact point = origin
P1 = P0 + l1*e1; %stance knee
P2 = P1 + l2*e2; %hip
P3 = P2 + l3*e3; %shoulders
P4 = P2 + l4*e4; %swing knee
P5 = P4 + l5*e5; %swing foot
G1 = P1 - c1*e1; % CoM stance leg tibia
G2 = P2 - c2*e2; % CoM stance leg febur
G3 = P3 - c3*e3; % CoM torso
G4 = P2 + c4*e4; % CoM swing leg femur
G5 = P4 + c5*e5; % CoM swing leg tibia
G = (m1*G1 + m2*G2 + m3*G3 + m4*G4 + m5*G5)/(m1+m2+m3+m4+m5); %Center of mass for entire robot
%%%% Define a function for doing '2d' cross product: dot(a x b, k)
cross2d = @(a,b)(a(1)*b(2) - a(2)*b(1));
%%%% Weight of each link:
w1 = -m1*g*j;
w2 = -m2*g*j;
w3 = -m3*g*j;
w4 = -m4*g*j;
w5 = -m5*g*j;
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Derivatives %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
q = [q1;q2;q3;q4;q5];
dq = [dq1;dq2;dq3;dq4;dq5];
ddq = [ddq1;ddq2;ddq3;ddq4;ddq5];
u = [u1;u2;u3;u4;u5];
du = [du1;du2;du3;du4;du5];
sn = [sn1;sn2;sn3;sn4;sn5];
sp = [sp1;sp2;sp3;sp4;sp5];
z = [t;q;dq;u;du;sn;sp]; % time-varying vector of inputs
% Neat trick to compute derivatives using the chain rule
derivative = @(in)( jacobian(in,[q;dq;u])*[dq;ddq;du] );
% Velocity of the swing foot (used for step constraints)
dP5 = derivative(P5);
% Compute derivatives for the CoM of each link:
dG1 = derivative(G1); ddG1 = derivative(dG1);
dG2 = derivative(G2); ddG2 = derivative(dG2);
dG3 = derivative(G3); ddG3 = derivative(dG3);
dG4 = derivative(G4); ddG4 = derivative(dG4);
dG5 = derivative(G5); ddG5 = derivative(dG5);
dG = derivative(G); ddG = derivative(dG);
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Calculations: %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
singleStanceDynamics();
objectiveFunctions();
heelStrikeDynamics();
mechanicalEnergy();
contactForces();
kinematics();
disp('Done!');
%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Single-Stance Dynamics %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% I solve the dynamics here by carefully selecting angular momentum balance
% equations about each joint, working my way out the kinematic tree from
% the root.
function singleStanceDynamics()
disp('Deriving single stance dynamics...')
%%%% AMB - entire system @ P0
eqnTorque0 = ...
cross2d(G1-P0,w1) + ...
cross2d(G2-P0,w2) + ...
cross2d(G3-P0,w3) + ...
cross2d(G4-P0,w4) + ...
cross2d(G5-P0,w5) + ...
u1;
eqnInertia0 = ...
cross2d(G1-P0,m1*ddG1) + ddq1*I1 + ...
cross2d(G2-P0,m2*ddG2) + ddq2*I2 + ...
cross2d(G3-P0,m3*ddG3) + ddq3*I3 + ...
cross2d(G4-P0,m4*ddG4) + ddq4*I4 + ...
cross2d(G5-P0,m5*ddG5) + ddq5*I5;
%%%% AMB - swing leg, torso, stance femer @ stance knee
eqnTorque1 = ...
cross2d(G2-P1,w2) + ...
cross2d(G3-P1,w3) + ...
cross2d(G4-P1,w4) + ...
cross2d(G5-P1,w5) + ...
u2;
eqnInertia1 = ...
cross2d(G2-P1,m2*ddG2) + ddq2*I2 + ...
cross2d(G3-P1,m3*ddG3) + ddq3*I3 + ...
cross2d(G4-P1,m4*ddG4) + ddq4*I4 + ...
cross2d(G5-P1,m5*ddG5) + ddq5*I5 ;
%%%% AMB - swing leg, torso @ hip
eqnTorque2 = ...
cross2d(G3-P2,w3) + ...
cross2d(G4-P2,w4) + ...
cross2d(G5-P2,w5) + ...
u3;
eqnInertia2 = ...
cross2d(G3-P2,m3*ddG3) + ddq3*I3 + ...
cross2d(G4-P2,m4*ddG4) + ddq4*I4 + ...
cross2d(G5-P2,m5*ddG5) + ddq5*I5 ;
%%%% AMB - swing leg @ hip
eqnTorque3 = ...
cross2d(G4-P2,w4) + ...
cross2d(G5-P2,w5) + ...
u4;
eqnInertia3 = ...
cross2d(G4-P2,m4*ddG4) + ddq4*I4 + ...
cross2d(G5-P2,m5*ddG5) + ddq5*I5 ;
%%%% AMB - swing tibia % swing knee
eqnTorque4 = ...
cross2d(G5-P4,w5) + ...
u5;
eqnInertia4 = ...
cross2d(G5-P4,m5*ddG5) + ddq5*I5 ;
%%%% Collect and solve equations:
eqns = [...
eqnTorque0 - eqnInertia0;
eqnTorque1 - eqnInertia1;
eqnTorque2 - eqnInertia2;
eqnTorque3 - eqnInertia3;
eqnTorque4 - eqnInertia4];
[MM, FF] = equationsToMatrix(eqns,ddq); % ddq = MM\ff;
%%%% Compute gradients:
[m, mi, mz, mzi, mzd] = computeGradients(MM,z,empty);
[f, fi, fz, fzi, fzd] = computeGradients(FF,z,empty);
% Write function file:
matlabFunction(m, mi, f, fi,... %dynamics
mz, mzi, mzd, fz, fzi, fzd,... %gradients
'file','autoGen_dynSs.m',...
'vars',{...
'q1','q2','q3','q4','q5',...
'dq1','dq2','dq3','dq4','dq5',...
'u1','u2','u3','u4','u5',...
'm1','m2','m3','m4','m5',...
'I1','I2','I3','I4','I5',...
'l1','l2','l3','l4',...
'c1','c2','c3','c4','c5',...
'g','empty'});
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Objective Functions %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function objectiveFunctions()
% Joint rates:
v1 = dq1; % joint rate 1
v2 = dq2-dq1; % joint rate 2
v3 = dq3-dq2; % joint rate 3
v4 = dq4-dq3; % joint rate 4
v5 = dq5-dq4; % joint rate 5
% Compute the power used by each joint
pow1 = v1*u1; %Power used by joint 1
pow2 = v2*u2; %Power used by joint 2
pow3 = v3*u3; %Power used by joint 3
pow4 = v4*u4; %Power used by joint 4
pow5 = v5*u5; %Power used by joint 5
% Constraint on the slack variables:
slackCst = [...
pow1 - (sp1 - sn1);
pow2 - (sp2 - sn2);
pow3 - (sp3 - sn3);
pow4 - (sp4 - sn4);
pow5 - (sp5 - sn5)];
% Gradients of the constraint on slack variables:
[c, ~, cz, czi, ~] = computeGradients(slackCst,z,empty);
matlabFunction(c,cz,czi,...
'file','autoGen_cst_costOfTransport.m',...
'vars',{...
'dq1','dq2','dq3','dq4','dq5',...
'u1','u2','u3','u4','u5',...
'sn1','sn2','sn3','sn4','sn5',...
'sp1','sp2','sp3','sp4','sp5','empty'});
% abs(power) using slack variables:
gammaNeg = sym('gammaNeg','real'); %Torque-squared smoothing parameter
gammaPos = sym('gammaPos','real'); %Torque-squared smoothing parameter
absPower = gammaNeg*(sn1 + sn2 + sn3 + sn4 + sn5) + ...
gammaPos*(sp1 + sp2 + sp3 + sp4 + sp5);
% Cost of Transport:
weight = (m1+m2+m3+m4+m5)*g;
stepLength = sym('stepLength','real');
alpha = sym('alpha','real'); %Torque-squared smoothing parameter
beta = sym('beta','real'); %Torque-rate squared smoothing
F = absPower/(weight*stepLength) + ...
alpha*(u1^2 + u2^2 + u3^2 + u4^2 + u5^2) + ...
beta*(du1^2 + du2^2 + du3^2 + du4^2 + du5^2);
[f, ~, fz, fzi, ~] = computeGradients(F,z,empty);
matlabFunction(f,fz,fzi,...
'file','autoGen_obj_costOfTransport.m',...
'vars',{...
'm1','m2','m3','m4','m5',...
'u1','u2','u3','u4','u5',...
'du1','du2','du3','du4','du5',...
'sn1','sn2','sn3','sn4','sn5', ...
'sp1','sp2','sp3','sp4','sp5',...
'g','stepLength','gammaNeg','gammaPos','alpha','beta','empty'});
% Swing foot height:
stepHeight = sym('stepHeight','real');
yFoot = P5(2);
xFoot = P5(1);
yMin = stepHeight*(1 - (xFoot/stepLength)^2);
yCst = yMin - yFoot; %Must be negative
[y, ~, yz, yzi, ~] = computeGradients(yCst,z,empty);
matlabFunction(y,yz,yzi,...
'file','autoGen_cst_swingFootHeight.m',...
'vars',{...
'q1','q2','q4','q5',...
'l1','l2','l4','l5'...
'stepLength','stepHeight'});
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Heel-Strike Dynamics %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function heelStrikeDynamics()
disp('Deriving heel-strike dynamics...')
%%%% Notes:
% xF - heelStrike(xI) --> constraint --> 0
% xF - collision(footSwap(xI));
%
% Angles before heel-strike:
q1m = sym('q1m','real');
q2m = sym('q2m','real');
q3m = sym('q3m','real');
q4m = sym('q4m','real');
q5m = sym('q5m','real');
qm = [q1m;q2m;q3m;q4m;q5m];
% Angles after heel-strike
q1p = sym('q1p','real');
q2p = sym('q2p','real');
q3p = sym('q3p','real');
q4p = sym('q4p','real');
q5p = sym('q5p','real');
qp = [q1p;q2p;q3p;q4p;q5p];
% Angular rates before heel-strike:
dq1m = sym('dq1m','real');
dq2m = sym('dq2m','real');
dq3m = sym('dq3m','real');
dq4m = sym('dq4m','real');
dq5m = sym('dq5m','real');
dqm = [dq1m;dq2m;dq3m;dq4m;dq5m];
% Angular rates after heel-strike
dq1p = sym('dq1p','real');
dq2p = sym('dq2p','real');
dq3p = sym('dq3p','real');
dq4p = sym('dq4p','real');
dq5p = sym('dq5p','real');
dqp = [dq1p;dq2p;dq3p;dq4p;dq5p];
% torque before heel-strike:
u1m = sym('u1m','real');
u2m = sym('u2m','real');
u3m = sym('u3m','real');
u4m = sym('u4m','real');
u5m = sym('u5m','real');
um = [u1m;u2m;u3m;u4m;u5m];
% torque after heel-strike
u1p = sym('u1p','real');
u2p = sym('u2p','real');
u3p = sym('u3p','real');
u4p = sym('u4p','real');
u5p = sym('u5p','real');
up = [u1p;u2p;u3p;u4p;u5p];
% Compute kinematics before heel-strike:
inVars = {'q1','q2','q3','q4','q5','dq1','dq2','dq3','dq4','dq5'};
outVarsM = {'q1m','q2m','q3m','q4m','q5m','dq1m','dq2m','dq3m','dq4m','dq5m'};
% P0m = subs(P0,inVars,outVarsM);
P1m = subs(P1,inVars,outVarsM);
P2m = subs(P2,inVars,outVarsM);
% P3m = subs(P3,inVars,outVarsM);
P4m = subs(P4,inVars,outVarsM);
P5m = subs(P5,inVars,outVarsM);
dP5m = subs(dP5,inVars,outVarsM);
G1m = subs(G1,inVars,outVarsM);
G2m = subs(G2,inVars,outVarsM);
G3m = subs(G3,inVars,outVarsM);
G4m = subs(G4,inVars,outVarsM);
G5m = subs(G5,inVars,outVarsM);
dG1m = subs(dG1,inVars,outVarsM);
dG2m = subs(dG2,inVars,outVarsM);
dG3m = subs(dG3,inVars,outVarsM);
dG4m = subs(dG4,inVars,outVarsM);
dG5m = subs(dG5,inVars,outVarsM);
% Compute kinematics after heel-strike:
outVarsP = {'q1p','q2p','q3p','q4p','q5p','dq1p','dq2p','dq3p','dq4p','dq5p'};
P0p = subs(P0,inVars,outVarsP);
P1p = subs(P1,inVars,outVarsP);
P2p = subs(P2,inVars,outVarsP);
% P3p = subs(P3,inVars,outVarsP);
P4p = subs(P4,inVars,outVarsP);
% P5p = subs(P5,inVars,outVarsP);
dP5p = subs(dP5,inVars,outVarsP);
G1p = subs(G1,inVars,outVarsP);
G2p = subs(G2,inVars,outVarsP);
G3p = subs(G3,inVars,outVarsP);
G4p = subs(G4,inVars,outVarsP);
G5p = subs(G5,inVars,outVarsP);
dG1p = subs(dG1,inVars,outVarsP);
dG2p = subs(dG2,inVars,outVarsP);
dG3p = subs(dG3,inVars,outVarsP);
dG4p = subs(dG4,inVars,outVarsP);
dG5p = subs(dG5,inVars,outVarsP);
%%%% AMB - entire system @ New stance foot
eqnHs0m = ... %Before collision
cross2d(G1m-P5m,m1*dG1m) + dq1m*I1 + ...
cross2d(G2m-P5m,m2*dG2m) + dq2m*I2 + ...
cross2d(G3m-P5m,m3*dG3m) + dq3m*I3 + ...
cross2d(G4m-P5m,m4*dG4m) + dq4m*I4 + ...
cross2d(G5m-P5m,m5*dG5m) + dq5m*I5;
eqnHs0 = ... %After collision
cross2d(G1p-P0p,m1*dG1p) + dq1p*I1 + ...
cross2d(G2p-P0p,m2*dG2p) + dq2p*I2 + ...
cross2d(G3p-P0p,m3*dG3p) + dq3p*I3 + ...
cross2d(G4p-P0p,m4*dG4p) + dq4p*I4 + ...
cross2d(G5p-P0p,m5*dG5p) + dq5p*I5;
%%%% AMB - new swing leg, torso, stance femer @ stance knee
eqnHs1m = ... %Before collision
cross2d(G1m-P4m,m1*dG1m) + dq1m*I1 + ...
cross2d(G2m-P4m,m2*dG2m) + dq2m*I2 + ...
cross2d(G3m-P4m,m3*dG3m) + dq3m*I3 + ...
cross2d(G4m-P4m,m4*dG4m) + dq4m*I4;
eqnHs1 = ... %After collision
cross2d(G2p-P1p,m2*dG2p) + dq2p*I2 + ...
cross2d(G3p-P1p,m3*dG3p) + dq3p*I3 + ...
cross2d(G4p-P1p,m4*dG4p) + dq4p*I4 + ...
cross2d(G5p-P1p,m5*dG5p) + dq5p*I5;
%%%% AMB - swing leg, torso @ new hip
eqnHs2m = ... %Before collision
cross2d(G3m-P2m,m3*dG3m) + dq3m*I3 + ...
cross2d(G2m-P2m,m2*dG2m) + dq2m*I2 + ...
cross2d(G1m-P2m,m1*dG1m) + dq1m*I1;
eqnHs2 = ... %After collision
cross2d(G3p-P2p,m3*dG3p) + dq3p*I3 + ...
cross2d(G4p-P2p,m4*dG4p) + dq4p*I4 + ...
cross2d(G5p-P2p,m5*dG5p) + dq5p*I5;
%%%% AMB - swing leg @ new hip
eqnHs3m = ... %Before collision
cross2d(G1m-P2m,m1*dG1m) + dq1m*I1 + ...
cross2d(G2m-P2m,m2*dG2m) + dq2m*I2;
eqnHs3 = ... %After collision
cross2d(G4p-P2p,m4*dG4p) + dq4p*I4 + ...
cross2d(G5p-P2p,m5*dG5p) + dq5p*I5;
%%%% AMB - swing tibia @ new swing knee
eqnHs4m = ... %Before collision
cross2d(G1m-P1m,m1*dG1m) + dq1m*I1;
eqnHs4 = ... %After collision
cross2d(G5p-P4p,m5*dG5p) + dq5p*I5;
%%%% Collect and solve equations:
eqnHs = [...
eqnHs0m - eqnHs0;
eqnHs1m - eqnHs1;
eqnHs2m - eqnHs2;
eqnHs3m - eqnHs3;
eqnHs4m - eqnHs4];
[MM, FF] = equationsToMatrix(eqnHs,dqp);
%%%% Compute gradients:
tp = sym('tp','real'); %Initial trajectory time
tm = sym('tm','real'); %Final trajectory time
zBnd = [tp;qp;dqp;up;tm;qm;dqm;um];
[m, mi, mz, mzi, mzd] = computeGradients(MM,zBnd,empty);
[f, fi, fz, fzi, fzd] = computeGradients(FF,zBnd,empty);
% Heel-strike
matlabFunction(m, mi, f, fi,... %dynamics
mz, mzi, mzd, fz, fzi, fzd,... %gradients
'file','autoGen_cst_heelStrike.m',...
'vars',{...
'q1p','q2p','q3p','q4p','q5p',...
'q1m','q2m','q3m','q4m','q5m',...
'dq1m','dq2m','dq3m','dq4m','dq5m',...
'm1','m2','m3','m4','m5',...
'I1','I2','I3','I4','I5',...
'l1','l2','l3','l4','l5',...
'c1','c2','c3','c4','c5','empty'});
% Collision velocity of the swing foot:
cst = [-dP5p(2); dP5m(2)]; %Swing foot velocity before and after collision (negative sign is intentional, since output is constrained to be negative);
cstJac = jacobian(cst,zBnd); %Gradient
matlabFunction(cst, cstJac,...
'file','autoGen_cst_footVel.m',...
'vars',{...
'q1p','q2p','q4p','q5p',...
'q1m','q2m','q4m','q5m',...
'dq1p','dq2p','dq4p','dq5p',...
'dq1m','dq2m','dq4m','dq5m',...
'l1','l2','l4','l5'});
% Step length and height constraint:
stepLength = sym('stepLength','real');
ceq = [P5m(1)-stepLength; P5m(2)];
ceqJac = jacobian(ceq,zBnd); %Gradient
matlabFunction(ceq, ceqJac,...
'file','autoGen_cst_steplength.m',...
'vars',{...
'q1m','q2m','q4m','q5m',...
'l1','l2','l4','l5','stepLength'});
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Mechanical Energy %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function mechanicalEnergy()
disp('Deriving mechanical energy...')
%%%% Energy:
KineticEnergy = ...
0.5*m1*dot(dG1,dG1) + 0.5*I1*dq1^2 + ...
0.5*m2*dot(dG2,dG2) + 0.5*I2*dq2^2 + ...
0.5*m3*dot(dG3,dG3) + 0.5*I3*dq3^2 + ...
0.5*m4*dot(dG4,dG4) + 0.5*I4*dq4^2 + ...
0.5*m5*dot(dG5,dG5) + 0.5*I5*dq5^2;
PotentialEnergy = ...
m1*g*G1(2) + ...
m2*g*G2(2) + ...
m3*g*G3(2) + ...
m4*g*G4(2) + ...
m5*g*G5(2);
matlabFunction(KineticEnergy, PotentialEnergy,...
'file','autoGen_energy.m',...
'vars',{...
'q1','q2','q3','q4','q5',...
'dq1','dq2','dq3','dq4','dq5',...
'm1','m2','m3','m4','m5',...
'I1','I2','I3','I4','I5',...
'l1','l2','l3','l4',...
'c1','c2','c3','c4','c5',...
'g'},...
'outputs',{'KE','PE'});
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Contact Forces %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function contactForces()
%%%% Contact Forces:
eqnForce5 = w1 + w2 + w3 + w4 + w5 + Fx*i + Fy*j;
eqnInertia5 = (m1+m2+m3+m4+m5)*ddG;
[AA,bb] = equationsToMatrix(eqnForce5-eqnInertia5,[Fx;Fy]);
ContactForces = AA\bb;
matlabFunction(ContactForces(1),ContactForces(2),...
'file','autoGen_contactForce.m',...
'vars',{...
'q1','q2','q3','q4','q5',...
'dq1','dq2','dq3','dq4','dq5',...
'ddq1','ddq2','ddq3','ddq4','ddq5',...
'm1','m2','m3','m4','m5',...
'l1','l2','l3','l4',...
'c1','c2','c3','c4','c5',...
'g'},...
'outputs',{'Fx','Fy'});
end
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
% Write Kinematics Files %
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%
function kinematics()
disp('Writing kinematics files...')
P = [P1; P2; P3; P4; P5];
Gvec = [G1; G2; G3; G4; G5];
% Used for plotting and animation
matlabFunction(P,Gvec,'file','autoGen_getPoints.m',...
'vars',{...
'q1','q2','q3','q4','q5',...
'l1','l2','l3','l4','l5',...
'c1','c2','c3','c4','c5'},...
'outputs',{'P','Gvec'});
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Helper Functions %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [m, mi, mz, mzi, dim] = computeGradients(M,z,empty)
%
% This function computes the gradients of a matrix M with respect the the
% variables in z, and then returns both the matrix and its gradient as
% column vectors of their non-zero elements, along with the linear indicies
% to unpack them. It also simplifies m and mz.
%
% INPUTS:
% M = [na, nb] = symbolic matrix
% z = [nc, 1] = symbolic vector
%
% OUTPUTS:
% m = [nd, 1] = symbolic vector of non-zero elements in M
% mi = [nd, 1] = linear indicies to map m --> [na,nb] matrix
% mz = [ne, 1] = symbolic vector of non-zero elements in Mz
% mzi = [ne, 1] = linear indicies to map mz --> [na,nb,nc] array
% dim = [3,1] = [na,nb,nc] = dimensions of 3d version of mz
%
[na, nb] = size(M);
nc = size(z,1);
M = simplify(M);
mz2 = jacobian(M(:),z); %Compute jacobian of M, by first reshaping M to be a column vector
mz3 = reshape(mz2,na,nb,nc); %Expand back out to a three-dimensional array
mz3 = simplify(mz3);
% Extract non-zero elements to a column vector:
mi = find(M);
m = M(mi);
mzi = find(mz3);
mz = mz3(mzi); mz = mz(:); %Collapse to a column vector
dim = [na,nb,nc];
% Pad any constant terms with "empty" to permit vectorization:
m = vectorizeHack(m, z, empty);
mz = vectorizeHack(mz, z, empty);
end
function x = vectorizeHack(x, z, empty)
%
% This function searches for any elements of x that are not dependent on
% any element of z. In this case, the automatically generated code will
% fail to vectorize properly. One solution is to add an array of zeros
% (empty) to the element.
%
% x = column vector of symbolic expressions
% z = column vector of symbolic variables
% z = symbolic variable, which the user will set equal to zero.
%
% Compute dependencies
g = jacobian(x,z);
% Check for rows of x with no dependence on z
[n,m] = size(g);
idxConst = true(n,1);
for i=1:n
for j=1:m
if ~isequal(sym(0),g(i,j))
idxConst(i) = false;
break;
end
end
end
% Add empty to those enteries
x(idxConst) = x(idxConst) + empty;
end
|
github
|
oklachumi/octave-in-communications-master
|
PAPR_of_Chu.m
|
.m
|
octave-in-communications-master/PAPR_of_Chu.m
| 1,199 |
utf_8
|
d82b0d1ed0a6f586e6c3a6bd1ca432c2
|
% PAPR_of_Chu.m
clear,close,clc all
function [xt, time] = IFFT_oversampling(X,N,L)
if nargin < 3
L = 1;
end
NL = N*L;
T = 1/NL;
time = [0:T:1-T];
X = X(:).';
xt = L*ifft([X(1:N/2) zeros(1,NL-N) X(N/2+1:end)], NL);
endfunction
function [PAPR_dB, AvgP_dB, PeakP_dB] = PAPR(x)
% PAPR_dB : PAPR[dB]
% AvgP_dB : Average power[dB]
% PeakP_dB : Maximum power[dB]
Nx=length(x);
xI=real(x);
xQ=imag(x);
Power = xI.*xI + xQ.*xQ;
PeakP = max(Power);
PeakP_dB = 10*log10(PeakP);
AvgP = sum(Power)/Nx;
AvgP_dB = 10*log10(AvgP);
PAPR_dB = 10*log10(PeakP/AvgP);
endfunction
N = 16; % 16 point IFFT
L = 4;
i = [0:N-1];
k = 3; % gcd(k,N) = 1
X = exp(j*k*pi/N*(i.*i));
[x,time] = IFFT_oversampling(X,N);
PAPRdB = PAPR(x);
[x_os,time_os] = IFFT_oversampling(X,N,L);
PAPRdB_os = PAPR(x_os);
subplot(121)
plot(x,'ro');
hold on;
plot(x_os,'k*');
axis([-0.4 0.4 -0.4 0.4]);
%axis('equal');
plot(0.25*exp(j*pi/180*[0:359])) % circle with radius 0.25
subplot(122)
plot(time,abs(x),'ro', time_os,abs(x_os),'k:*');
title('IFFT(X_i(k)), k=3, N=16, L=1, 4');
ylabel('|IFFT(X_i(k))|');
xlabel('Time (normalized by symbol duration)');
legend('L = 1','L = 4');
PAPRdB_without_and_with_oversampling=[PAPRdB PAPRdB_os]
|
github
|
oklachumi/octave-in-communications-master
|
plot_OFDM_CCDF.m
|
.m
|
octave-in-communications-master/plot_OFDM_CCDF.m
| 1,786 |
utf_8
|
86c687d2b4ba44f0f1216e615b5357cc
|
clear,close,clc all
% CCDF of OFDM Signal
%function [mod_object] = mapper(b,N)
%% If N is given, it generates a block of N random 2^b-PSK/QAM modulated symbols.
%% Otherwise, it generates a block of 2^b-PSK/QAM modulated symbols for [0:2^b-1].
%
%M=2^b; % Modulation order or Alphabet (Symbol) size
%if b==1
% Mod='BPSK';
% A=1;
% mod_object=pskmod(N,2);
%elseif b==2
% Mod='QPSK';
% A=1;
% mod_object=pskmod(N,4,pi/4,'gray');
%else
% Mod=[num2str(2^b) 'QAM'];
% Es=1;
% A=sqrt(3/2/(M-1)*Es);
% mod_object=qammod(N,M);
%end
%endfunction
function [PAPR_dB, AvgP_dB, PeakP_dB] = PAPR(x)
% PAPR_dB : PAPR[dB]
% AvgP_dB : Average power[dB]
% PeakP_dB : Maximum power[dB]
Nx=length(x);
xI=real(x);
xQ=imag(x);
Power = xI.*xI + xQ.*xQ;
PeakP = max(Power);
PeakP_dB = 10*log10(PeakP);
AvgP = sum(Power)/Nx;
AvgP_dB = 10*log10(AvgP);
PAPR_dB = 10*log10(PeakP/AvgP);
endfunction
Ns = 2.^[6:10];
b=2;
M=2^b;
Nblk = 1e3;
zdBs = [4:0.1:10]; % x-axis
N_zdBs = length(zdBs);
CCDF_formula=inline('1-((1-exp(-z.^2)).^N)','N','z');
for n = 1:length(Ns) % n = 1:5
N=Ns(n); % N = 64, 128,..., 1024
x=zeros(Nblk,N); % 1000*N
sqN=sqrt(N);
for k = 1:Nblk
XX = randint(1,N,M);
X = pskmod(XX,M,pi/4,'gray');
x(k,:) = ifft(X,N)*sqN;
CFx(k) = PAPR(x(k,:)); % Cumulative Distribution Function
end
%s2 = mean(mean(abs(x)))^2/(pi/2);
CCDF_theoretical=CCDF_formula(N,10.^(zdBs/20)); % Complementary CDF
for i = 1:N_zdBs
CCDF_simulated(i) = sum(CFx>zdBs(i))/Nblk;
end
semilogy(zdBs,CCDF_theoretical,'k-');
hold on;
grid on;
semilogy(zdBs(1:3:end),CCDF_simulated(1:3:end),'k:*');
end
axis([zdBs([1 end]) 1e-2 1]);
title('OFDM system with N-point FFT');
xlabel('PAPR0 [dB]');
ylabel('CCDF=Probability(PAPR>PAPR0)');
legend('Theoretical','Simulated');
|
github
|
oklachumi/octave-in-communications-master
|
QPSK_in_AWGN_Rayleigh_fading_channel.m
|
.m
|
octave-in-communications-master/QPSK_in_AWGN_Rayleigh_fading_channel.m
| 3,426 |
utf_8
|
5cffd1d2f9d462307ba27e5ae80a8a73
|
clear,clc,close all
function [h]=rayleigh(fd,t)
%該程式利用改進的jakes模型來產生單徑的平坦型Rayleigh衰落信道
%IEEE Commu letters, Vol.6, NO.6, JUNE 2002
%輸入變數說明:
% fd:信道的最大多普勒頻移 單位Hz
% t:信號的抽樣時間序列 抽樣間隔單位s
% h:為輸出的Rayleigh信道函數 一個時間函數複序列
%假設的入射波數目
N=40;
wm=2*pi*fd;
%每象限的入射波數目即振盪器數目
N0=N/4;
%信道函數的實部
Tc=zeros(1,length(t));
%信道函數的虛部
Ts=zeros(1,length(t));
%歸一化功率係數
P_nor=sqrt(1/N0);
%區別個條路徑的均勻分佈隨機相位
theta=2*pi*rand(1,1)-pi;
for i=1:N0
%第i條入射波的入射角
alfa(i)=(2*pi*i-pi+theta)/N;
%對每個子載波而言在(-pi,pi)之間均勻分佈的隨機相位
fi_tc=2*pi*rand(1,1)-pi;
fi_ts=2*pi*rand(1,1)-pi;
%計算衝激響應函數
Tc=Tc+cos(cos(alfa(i))*wm*t+fi_tc);
Ts=Ts+cos(sin(alfa(i))*wm*t+fi_ts);
end;
%乘歸一化功率係數得到傳輸函數
h=P_nor*(Tc+j*Ts);
endfunction
function [DATA]=intdump(IN,num)
outidx=1;
for z=1:num:length(IN)
DATA(outidx)=sum(IN(z:z+num-1))/num;
outidx=outidx+1;
end
% return DATA
endfunction
nsamp = 8; %矩形脈衝取樣點數
numsymb = 10000; %每種SNR下的傳輸的符號數
ts=1/(numsymb*nsamp);
t=(0:numsymb*nsamp-1)*ts;
M=4;
SNR=-3:3;
grayencod=[0 1 3 2];
for i=1:length(SNR)
msg=randsrc(1,numsymb,[0:3]); %產生發送符號
%msg_gr=grayencod(msg+1); %進行Gray編碼映射
msg_tx=pskmod(msg,M,pi/M,'gray'); %QPSK調製
msg_tx=msg_tx(ones(nsamp,1),:)(:).'; %rectpulse shaping
h=rayleigh(10,t); %生成Rayleigh衰落
msg_tx_rale=h.*msg_tx; %信號通過Rayleigh衰落信道
msg_rx=awgn(msg_tx,SNR(i),'measured'); %通過AWGN信道
msg_rx_rale=awgn(msg_tx_rale,SNR(i),'measured');%Rayleigh + AWGN
msg_rx=intdump(msg_rx,nsamp); %rectpulse shaping integral&dump
msg_rx_rale=intdump(msg_rx_rale,nsamp); %rectpulse shaping integral&dump
msg_demod=pskdemod(msg_rx,M,pi/M,'gray'); %QPSK解調
msg_demod_rale=pskdemod(msg_rx_rale,M,pi/M,'gray'); %QPSK解調
[errorBit BER(i)] =biterr(msg,msg_demod,log2(M));%計算BER
[errorBit_rale BER_rale(i)]=biterr(msg,msg_demod_rale,log2(M));
[errorSym SER(i)] =symerr(msg,msg_demod); %計算SER
[errorSym_rale SER_rale(i)]=symerr(msg,msg_demod_rale);
end
scatterplot(msg_tx);
title('AWGN Tx constellation');
xlabel('I');
ylabel('Q');
scatterplot(msg_rx);
title('AWGN Rx constellation');
xlabel('I');
ylabel('Q');
scatterplot(msg_tx_rale);
title('AWGN+Rayleigh Tx constellation');
xlabel('I');
ylabel('Q');
scatterplot(msg_rx_rale);
title('AWGN+Rayleigh Rx constellation');
xlabel('I');
ylabel('Q');
figure
semilogy(SNR,BER,'-ro',SNR,SER,'-r*',SNR,BER_rale,'-b+',SNR,SER_rale,'-b^');
legend('AWGN\_BER','AWGN\_SER','Rayleigh+AWGN\_BER','Rayleigh+AWGN\_SER','location','southwest');
title('QPSK in AWGN+Rayleigh fading channel capability');
xlabel('SNR(dB)');
ylabel('BER and SER');
|
github
|
oklachumi/octave-in-communications-master
|
plot_PL_IEEE80216d.m
|
.m
|
octave-in-communications-master/plot_PL_IEEE80216d.m
| 2,880 |
utf_8
|
b3f064f66d4c27f0305c2ec30034956b
|
clear,close,clc all
function PL = PL_IEEE80216d(fc,d,type,htx,hrx,corr_fact,mod)
% IEEE 802.16d model
% Input - fc : carrier frequency
% d : between base and terminal
% type : selects 'A', 'B', or 'C'
% htx : height of transmitter
% hrx : height of receiver
% corr_fact: if shadowing exists, set to 'ATnT' or 'Okumura'. Otherwise, 'NO'
% mod : set to 'mod' to get the modified IEEE 802.16d model
% output - PL : path loss [dB]
Mod = 'UNMOD';
if nargin > 6
Mod = upper(mod);
end
if nargin == 6 && corr_fact(1) == 'm'
Mod = 'MOD';
corr_fact = 'NO';
elseif nargin < 6
corr_fact = 'NO';
if nargin == 5 && hrx(1) == 'm'
Mod = 'MOD';
hrx = 2;
elseif nargin < 5
hrx = 2;
if nargin == 4 && htx(1) == 'm'
Mod = 'MOD';
htx = 30;
elseif nargin < 4
htx = 30;
if nargin == 3 && type(1) == 'm'
Mod = 'MOD';
type = 'A';
elseif nargin < 3
type = 'A';
end
end
end
end
d0 = 100;
Type = upper(type);
if Type ~= 'A' && Type ~= 'B' && Type ~= 'C'
disp('Error: The selected type is not supported');
return;
end
switch upper(corr_fact)
case 'ATNT'
Cf = 6*log10(fc/2e9);
C_Rx = -10.8*log10(hrx/2);
case 'OKUMURA'
Cf = 6*log10(fc/2e9);
if hrx <= 3
C_Rx = -10*log10(hrx/3);
else
C_Rx = -20*log10(hrx/3);
end
case 'NO'
Cf = 0;
C_Rx = 0;
end
if Type == 'A'
a = 4.6;
b = 0.0075;
c = 12.6;
elseif Type == 'B'
a = 4;
b = 0.0065;
c = 17.1;
else
a = 3.6;
b = 0.005;
c = 20;
end
lamda = 299792458/fc;
gamma = a-b*htx+c/htx;
d0_pr = d0;
if Mod(1) == 'M'
d0_pr = d0*10^-((Cf+C_Rx)/(10*gamma));
end
A = 20*log10(4*pi*d0_pr/lamda)+Cf+C_Rx;
for k = 1:length(d)
if d(k) > d0_pr
PL(k) = A + 10*gamma*log10(d(k)/d0);
else
PL(k) = -10*log10((lamda/(4*pi*d(k)))^2);
end
end
end
% plot_PL_IEEE80216d.m
fc = 2e9;
htx = [30 30];
hrx = [2 10];
distance = [1:1000];
for k = 1:2
y_IEEE16d(k,:) = PL_IEEE80216d(fc,distance,'A',htx(k),hrx(k),'atnt');
y_MIEEE16d(k,:) = PL_IEEE80216d(fc,distance,'A',htx(k),hrx(k),'atnt','mod');
end
subplot(121)
semilogx(distance,y_IEEE16d(1,:),'k:','linewidth',1.5), hold on
semilogx(distance,y_IEEE16d(2,:),'k-','linewidth',1.5), grid on
title(['IEEE 802.16d Path loss Models, f_c=',num2str(fc/1e6),'MHz'])
axis([1 1000 10 150]), xlabel('Distance[m]'), ylabel('Pathloss[dB]')
legend('h_{Tx}=30m, h_{Rx}=2m','h_{Tx}=30m, h_{Rx}=10m',2)
subplot(122)
semilogx(distance,y_MIEEE16d(1,:),'k:','linewidth',1.5), hold on
semilogx(distance,y_MIEEE16d(2,:),'k-','linewidth',1.5), grid on
title(['Modified IEEE 802.16d Path loss Models, f_c=', num2str(fc/1e6), 'MHz'])
axis([1 1000 10 150]), xlabel('Distance[m]'), ylabel('Pathloss[dB]')
legend('h_{Tx}=30m, h_{Rx}=2m','h_{Tx}=30m, h_{Rx}=10m',2)
|
github
|
oklachumi/octave-in-communications-master
|
PDF_of_clipped_and_filtered_OFDM_signal.m
|
.m
|
octave-in-communications-master/PDF_of_clipped_and_filtered_OFDM_signal.m
| 4,563 |
utf_8
|
2713e233c01416a58e4126ccb305b50f
|
% PDF_of_clipped_and_filtered_OFDM_signal.m
% QPSK/OFDM system for analyzing the performance of clipping and filtering technique
clear,close,clc all
function [x_clipped,sigma] = clipping(x,CL,sigma)
% CL : Clipping Level
% sigma: sqrt(variance of x)
if nargin < 3
x_mean = mean(x);
x_dev = x-x_mean;
sigma = sqrt(x_dev*x_dev'/length(x));
end
CL = CL*sigma;
x_clipped = x;
ind = find(abs(x)>CL); % Indices to clip
x_clipped(ind) = x(ind)./abs(x(ind))*CL;
endfunction
function [xt,time] = IFFT_oversampling(X,N,L)
if nargin < 3
L = 1;
end
NL = N*L;
T = 1/NL;
time = [0:T:1-T];
X = X(:).';
xt = L*ifft([X(1:N/2) zeros(1,NL-N) X(N/2+1:end)], NL);
endfunction
function y = add_CP(x,Ncp)
% Add cyclic prefix
y = [x(:,end-Ncp+1:end) x];
endfunction
CR = 1.2; % Clipping Ratio
b = 2; % QPSK 2^b bits
N = 128; % FFT size
Ncp = 32; % CP size
fs = 1e6; % sampling freq
L = 8; % over sampling factor
Tsym = 1/(fs/N); % sampling freq
Ts = 1/(fs*L); % sampling period
fc = 2e6;
wc = 2*pi*fc; % Carrier frequency
t = [0:Ts:2*Tsym-Ts]/Tsym; % time vector
t0 = t((N/2-Ncp)*L); % t(256)
f = [0:fs/(N*2):L*fs-fs/(N*2)]-L*fs/2;
Fs = 8;
Norder = 104;
dens = 20; % Sampling frequency, Order, and Density factor of filter
FF = [0 1.4 1.5 2.5 2.6 Fs/2]; % Stopband/Passband/Stopband frequency edge vector
WW = [6 1 6]; % Stopband/Passband/Stopband weight vector
h = remez(Norder,FF/(Fs/2),[0 0 1 1 0 0],WW,"bandpass",dens); % BPF coefficients
XX = randint(1,N,4);
X = pskmod(XX,4,pi/4,'gray');
% X = mapper(b,N);
X(1) = 0; % QPSK modulation
x = IFFT_oversampling(X,N,L); % IFFT and oversampling
x_b = add_CP(x,Ncp*L); % Add CP
x_b_os = [zeros(1,(N/2-Ncp)*L), x_b, zeros(1,N*L/2)]; % Oversampling 256(0) 256(CP) 1024(data) 512(0)
x_p = sqrt(2)*real(x_b_os.*exp(j*2*wc*t)); % From baseband to passband
x_p_c = clipping(x_p,CR);
X_p_c_f= fft(filter(h,1,x_p_c));
x_p_c_f = ifft(X_p_c_f);
x_b_c_f = sqrt(2)*x_p_c_f.*exp(-j*2*wc*t); % From passband to baseband
figure(1); clf
nn = (N/2-Ncp)*L + [1:N*L]; % 257~1280
nn1 = N/2*L + [-Ncp*L+1:0]; % 257~512
nn2 = N/2*L + [0:N*L]; % 512~1536
subplot(221)
plot(t(nn1)-t0, abs(x_b_os(nn1)),'b:'); % CP
hold on;
plot(t(nn2)-t0, abs(x_b_os(nn2)),'k-'); % BB signal
axis([t([nn1(1) nn2(end)])-t0 0 max(abs(x_b_os))]);
title(['Baseband signal, with CP']);
xlabel('t (normalized by symbol duration)');
ylabel('abs(x''[m])');
subplot(223) % baseband
XdB_p_os = 20*log10(abs(fft(x_b_os)));
plot(f,fftshift(XdB_p_os)-max(XdB_p_os),'k');
xlabel('frequency [Hz]');
ylabel('PSD [dB]');
axis([f([1 end]) -100 0]);
subplot(222)
% [counts(個數),centers(在這範圍內的均值)] = hist(___)
% bar(centers,counts)
[pdf_x_p,bin] = hist(x_p(nn),50);
bar(bin,pdf_x_p/sum(pdf_x_p),'k');
xlabel('x');
ylabel('pdf');
title(['Unclipped passband signal']);
subplot(224) % passband
XdB_p = 20*log10(abs(fft(x_p)));
plot(f,fftshift(XdB_p)-max(XdB_p),'k');
xlabel('frequency [Hz]');
ylabel('PSD [dB]');
axis([f([1 end]) -100 0]);
figure(2); clf
subplot(221)
[pdf_x_p_c,bin] = hist(x_p_c(nn),50);
bar(bin,pdf_x_p_c/sum(pdf_x_p_c),'k');
title(['Clipped passband signal, CR=' num2str(CR)]);
xlabel('x');
ylabel('pdf');
subplot(223)
XdB_p_c = 20*log10(abs(fft(x_p_c)));
plot(f,fftshift(XdB_p_c)-max(XdB_p_c),'k');
xlabel('frequency [Hz]');
ylabel('PSD [dB]');
axis([f([1 end]) -100 0]);
subplot(222)
[pdf_x_p_c_f,bin] = hist(real(x_p_c_f(nn)),50);
bar(bin,pdf_x_p_c_f/sum(pdf_x_p_c_f),'k');
title(['Passband signal after clipping and filtering, CR=' num2str(CR)]);
xlabel('x');
ylabel('pdf');
axis([min(bin) max(bin) min(pdf_x_p_c_f/sum(pdf_x_p_c_f)) max(pdf_x_p_c_f/sum(pdf_x_p_c_f))]);
subplot(224)
XdB_p_c_f = 20*log10(abs(X_p_c_f));
plot(f,fftshift(XdB_p_c_f)-max(XdB_p_c_f),'k');
xlabel('frequency [Hz]'); ylabel('PSD [dB]');
axis([f([1 end]) -100 0]);
figure(3); clf
subplot(221)
stem(h,'k');
xlabel('tap');
ylabel('Filter coefficient h[n]');
axis([1, length(h), min(h), max(h)]);
subplot(222)
HdB = 20*log10(abs(fft(h,length(X_p_c_f))));
plot(f,fftshift(HdB),'k');
xlabel('frequency [Hz]');
ylabel('Filter freq response H [dB]');
axis([f([1 end]) -100 0]);
subplot(223)
[pdf_x_p_c_f,bin] = hist(abs(x_b_c_f(nn)),50);
bar(bin,pdf_x_p_c_f/sum(pdf_x_p_c_f),'k');
title(['Baseband signal after clipping and filtering, CR=' num2str(CR)]);
xlabel('|x|');
ylabel('pdf');
axis([min(bin) max(bin) min(pdf_x_p_c_f/sum(pdf_x_p_c_f)) max(pdf_x_p_c_f/sum(pdf_x_p_c_f))]);
subplot(224)
XdB_b_c_f = 20*log10(abs(fft(x_b_c_f)));
plot(f,fftshift(XdB_b_c_f)-max(XdB_b_c_f),'k');
xlabel('frequency [Hz]');
ylabel('PSD [dB]');
axis([f([1 end]) -100 0]);
|
github
|
oklachumi/octave-in-communications-master
|
PAPR_of_preamble.m
|
.m
|
octave-in-communications-master/PAPR_of_preamble.m
| 1,102 |
utf_8
|
b1c544e31056421e9b09e4d44a0260b0
|
% PAPR_of_preamble.m
clear,close,clc all
function [xt, time] = IFFT_oversampling(X,N,L)
if nargin < 3
L = 1;
end
NL = N*L;
T = 1/NL;
time = [0:T:1-T];
X = X(:).';
xt = L*ifft([X(1:N/2) zeros(1,NL-N) X(N/2+1:end)], NL);
endfunction
function [PAPR_dB, AvgP_dB, PeakP_dB] = PAPR(x)
% PAPR_dB : PAPR[dB]
% AvgP_dB : Average power[dB]
% PeakP_dB : Maximum power[dB]
Nx=length(x);
xI=real(x);
xQ=imag(x);
Power = xI.*xI + xQ.*xQ;
PeakP = max(Power);
PeakP_dB = 10*log10(PeakP);
AvgP = sum(Power)/Nx;
AvgP_dB = 10*log10(AvgP);
PAPR_dB = 10*log10(PeakP/AvgP);
endfunction
N = 1024;
L = 4;
Npreamble = 114;
n = 0:Npreamble-1;
for i = 1:Npreamble
X = load(['.\\Wibro-Preamble\\Preamble_sym' num2str(i-1) '.dat']);
X = X(:,1);
X = sign(X);
X = fftshift(X);
x = IFFT_oversampling(X,N);
PAPRdB(i) = PAPR(x);
x_os = IFFT_oversampling(X,N,L);
PAPRdB_os(i) = PAPR(x_os);
end
plot(n,PAPRdB,'-ro', n,PAPRdB_os,':*');
title('PAPR of IEEE 802.16e preamble without and with oversampling');
ylabel('|IFFT(X_i(k))|');
xlabel('Preamble index [0-113]');
legend('L = 1','L = 4', 'location','southeast');
|
github
|
oklachumi/octave-in-communications-master
|
QPSK_in_AWGN_channel.m
|
.m
|
octave-in-communications-master/QPSK_in_AWGN_channel.m
| 1,735 |
utf_8
|
2e3246c3bbb4f7c8adaee7b40d1c1141
|
clear,close,clc all
function [DATA]=intdump(IN,num)
outidx=1;
for z=1:num:length(IN)
DATA(outidx)=sum(IN(z:z+num-1))/num;
outidx=outidx+1;
end
% return DATA
end
M=4; %QPSK的符號類型
nsamp=8;
numsymb=1e5; %每種SNR下的傳輸的符號數
SNR=-3:3;
grayencod=[0 1 3 2]; %Gray編碼格式
for i=1:length(SNR)
msg=randsrc(1,numsymb,[0:3]); %產生發送符號
%msg_gr=grayencod(msg+1); %進行Gray編碼映射
msg_tx=pskmod(msg,M,pi/M,'gray'); %QPSK調製
msg_tx=msg_tx(ones(nsamp,1),:)(:).'; %rectpulse shaping
msg_rx=awgn(msg_tx,SNR(i),'measured'); %通過AWGN信道
msg_rx=intdump(msg_rx,nsamp); %rectpulse shaping integral&dump
msg_demod=pskdemod(msg_rx,M,pi/M,'gray'); %QPSK解調
%[dummy graydecod]=sort(grayencod); %Gray編碼逆映射
%graydecod=graydecod-1; %Gray編碼逆映射
%msg_demod=graydecod(msg_demod+1); %Gray編碼逆映射
[errorBit BER(i)]=biterr(msg,msg_demod,log2(M));%計算BER
[errorSym SER(i)]=symerr(msg,msg_demod); %計算SER
endfor
scatterplot(msg_tx); %畫出發射信號的星座圖
title('Tx constellation');
xlabel('I');
ylabel('Q');
scatterplot(msg_rx); %畫出接收信號的星座圖
title('Rx constellation');
xlabel('I');
ylabel('Q');
figure
semilogy(SNR,BER,'-ro',SNR,SER,'-r*'); %畫出BER和SNR隨SNR變化的曲線
legend('BER','SER');
title('QPSK in AWGN channel');
xlabel('SNR(dB)');
ylabel('BER and SER');
|
github
|
oklachumi/octave-in-communications-master
|
Jakes_model_rayleigh_channel.m
|
.m
|
octave-in-communications-master/Jakes_model_rayleigh_channel.m
| 1,591 |
utf_8
|
67afe767aa7538cb873dbda73173b49b
|
clear,clc,close all
function [h]=rayleigh(fd,t)
%該程式利用改進的jakes模型來產生單徑的平坦型瑞利衰落信道
%IEEE Commu letters, Vol.6, NO.6, JUNE 2002
%輸入變數說明:
% fd:信道的最大多普勒頻移 單位Hz
% t:信號的抽樣時間序列 抽樣間隔單位s
% h:為輸出的瑞利信道函數 一個時間函數複序列
%假設的入射波數目
N=40;
wm=2*pi*fd;
%每象限的入射波數目即振盪器數目
N0=N/4;
%信道函數的實部
Tc=zeros(1,length(t));
%信道函數的虛部
Ts=zeros(1,length(t));
%歸一化功率係數
P_nor=sqrt(1/N0);
%區別個條路徑的均勻分佈隨機相位
theta=2*pi*rand(1,1)-pi;
for ii=1:N0
%第i條入射波的入射角
alfa(ii)=(2*pi*ii-pi+theta)/N;
%對每個子載波而言在(-pi,pi)之間均勻分佈的隨機相位
fi_tc=2*pi*rand(1,1)-pi;
fi_ts=2*pi*rand(1,1)-pi;
%計算衝激響應函數
Tc=Tc+cos(cos(alfa(ii))*wm*t+fi_tc);
Ts=Ts+cos(sin(alfa(ii))*wm*t+fi_ts);
end;
%乘歸一化功率係數得到傳輸函數
h=P_nor*(Tc+j*Ts);
endfunction
fd=10; %Doppler shift 10
ts=1/1000; %信道sampling time
t=0:ts:1;
h1=rayleigh(fd,t);
fd=20;
h2=rayleigh(fd,t);
subplot(2,1,1),plot(20*log10(abs(h1(1:1/ts))));
title('fd =10Hz power vs t');
xlabel('time');ylabel('power');
subplot(2,1,2),plot(20*log10(abs(h2(1:1/ts))));
title('fd=20Hz power vs t');
xlabel('time');ylabel('power');
|
github
|
oklachumi/octave-in-communications-master
|
plot_PL_Hata.m
|
.m
|
octave-in-communications-master/plot_PL_Hata.m
| 1,374 |
utf_8
|
e41a473a50cc5d7735c9a7d9df6a8b20
|
clear,close,clc all
function PL = PL_Hata(fc,d,htx,hrx,Etype)
% Hata Model
% Input
% fc : carrier frequency [Hz]
% d : between base station and mobile station [m]
% htx : height of transmitter [m]
% hrx : height of receiver [m]
% Etype : Environment Type('urban','suburban','open')
% output
% PL : path loss [dB]
if nargin < 5
Etype = 'URBAN';
end
fc = fc/(1e6);
if fc >= 150 && fc <= 200
C_Rx = 8.29*(log10(1.54*hrx))^2 - 1.1;
elseif fc > 200
C_Rx = 3.2*(log10(11.75*hrx))^2 - 4.97;
else
C_Rx = 0.8+(1.1*log10(fc)-0.7)*hrx-1.56*log10(fc);
end
PL = 69.55+26.16*log10(fc)-13.82*log10(htx)-C_Rx...
+(44.9-6.55*log10(htx))*log10(d/1000);
EType = upper(Etype);
if EType(1) == 'S'
PL = PL-2*(log10(fc/28))^2-5.4;
elseif EType(1) == 'O'
PL = PL+(18.33-4.78*log10(fc))*log10(fc)-40.97;
end
end
% plot_PL_Hata.m
fc = 1.5e9;
htx = 30;
hrx = 2;
distance = [1:2:31].^2;
y_urban = PL_Hata(fc,distance,htx,hrx,'urban');
y_suburban = PL_Hata(fc,distance,htx,hrx,'suburban');
y_open = PL_Hata(fc,distance,htx,hrx,'open');
semilogx(distance,y_urban,'k-s', distance,y_suburban,'k-o', distance,y_open,'k-^');
grid on
axis([1 1000 40 110]);
title(['Hata PL model, f_c=', num2str(fc/1e6), 'MHz']);
xlabel('Distance [m]');
ylabel('Path loss [dB]');
legend('urban','suburban','open area','location','northwest');
|
github
|
oklachumi/octave-in-communications-master
|
single_carrier_PAPR.m
|
.m
|
octave-in-communications-master/single_carrier_PAPR.m
| 2,222 |
utf_8
|
02eebda1febb897b92ff1d81a373970b
|
clear,close,clc all
function [s,time] = modulation(x,Ts,Nos,Fc)
% modulation(X,1,32,1)
% Ts : Sampling period
% Nos: Oversampling factor
% Fc : Carrier frequency
Nx = length(x); % 4
offset = 0;
if nargin < 5
scale = 1;
T = Ts/Nos; % Scale and Oversampling period for Baseband
else
scale = sqrt(2);
T=1/Fc/2/Nos; % Scale and Oversampling period for Passband
end
t_Ts = [0:T:Ts-T];
time = [0:T:Nx*Ts-T]; % One sampling interval and whole interval
tmp = 2*pi*Fc*t_Ts+offset;
len_Ts = length(t_Ts); % 8
cos_wct = cos(tmp)*scale;
sin_wct = sin(tmp)*scale;
for n = 1:Nx
s((n-1)*len_Ts+1:n*len_Ts) = real(x(n))*cos_wct-imag(x(n))*sin_wct;
end
endfunction
function [PAPR_dB, AvgP_dB, PeakP_dB] = PAPR(x)
Nx = length(x);
xI = real(x);
xQ = imag(x);
Power = xI.*xI + xQ.*xQ;
PeakP = max(Power);
PeakP_dB = 10*log10(PeakP);
AvgP = sum(Power)/Nx;
AvgP_dB = 10*log10(AvgP);
PAPR_dB = 10*log10(PeakP/AvgP);
endfunction
%single_carrier_PAPR.m
figure(1); clf
Ts=1; % Sampling period
L=8; % Oversampling factor
Fc=1; % Carrier frequency
b=2;
M=2^b; % Modulation order or Alphabet size
XX = randint(1,4,M); % 1 3 2 2
X = pskmod(XX,M,pi/4,'gray'); % M-PSK/QAM symbol for [0:M-1]
L_=L*4; % Oversampling factor to make it look like continuous-time
[xt_pass_,time_] = modulation(X,Ts,L_,Fc); % Continuous-time
[xt_pass,time] = modulation(X,Ts,L,Fc); % L times oversampling
for i_s=1:M
xt_base(L*(i_s-1)+1:L*i_s) = X(i_s)*ones(1,L);
end
PAPR_dB_base = PAPR(xt_base);
subplot(311); stem(time,real(xt_base),'b'); hold on; ylabel('S_{I}');
subplot(312); stem(time,imag(xt_base),'b'); hold on; ylabel('S_{Q}');
subplot(313); stem(time,abs(xt_base).^2,'b'); hold on;
title(['PAPR = ' num2str(round(PAPR_dB_base*100)/100) 'dB']);
xlabel('samples'); ylabel('|S_{I}(n)|^{2}+|S_{Q}(n)|^{2}');
figure(2); clf
PAPR_dB_pass = PAPR(xt_pass);
subplot(211); stem(time,xt_pass,'r'); hold on;
plot(time_,xt_pass_,'k'); ylabel('S(n)');
subplot(212); stem(time,xt_pass.*xt_pass,'r'); hold on;
plot(time_,xt_pass_.*xt_pass_,'k');
title(['PAPR = ' num2str(round(PAPR_dB_pass*100)/100) 'dB']);
xlabel('samples'); ylabel('|S(n)|^{2}');
% PAPRs of baseband/passband signals
PAPRs_of_baseband_passband_signals=[PAPR_dB_base; PAPR_dB_pass]
|
github
|
oklachumi/octave-in-communications-master
|
channel_estimation.m
|
.m
|
octave-in-communications-master/channel_estimation.m
| 7,685 |
utf_8
|
9874b818bb7db1192271c19ca57690ae
|
%channel_estimation.m
% for LS/DFT Channel Estimation with linear/spline interpolation
clear,close,clc all;
function H_LS = LS_CE(Y,Xp,pilot_loc,Nfft,Nps,int_opt)
% LS channel estimation function
% Inputs:
% Y = Frequency-domain received signal
% Xp = Pilot signal
% pilot_loc = Pilot location
% N = FFT size
% Nps = Pilot spacing
% int_opt = 'linear' or 'spline'
% output:
% H_LS = LS channel etimate
Np = Nfft/Nps; % # of pilot
k = 1:Np;
LS_est(k) = Y(pilot_loc(k))./Xp(k); % LS channel estimation
if lower(int_opt(1)) == 'l',
method = 'linear';
else
method = 'spline';
end
H_LS = interpolate(LS_est,pilot_loc,Nfft,method); % Linear/Spline interpolation
endfunction
function H_MMSE = MMSE_CE(Y,Xp,pilot_loc,Nfft,Nps,h,SNR)
% function H_MMSE = MMSE_CE(Y,Xp,pilot_loc,Nfft,Nps,h,ts,SNR)
% MMSE channel estimation function
% Inputs:
% Y = Frequency-domain received signal
% Xp = Pilot signal
% pilot_loc = Pilot location
% Nfft = FFT size
% Nps = Pilot spacing
% h = Channel impulse response
% ts = Sampling time
% SNR = Signal-to-Noise Ratio[dB]
% output:
% H_MMSE = MMSE channel estimate
%H = fft(h,N);
snr = 10^(SNR*0.1);
Np = Nfft/Nps; % # of pilot
k = 1:Np;
H_tilde = Y(1,pilot_loc(k))./Xp(k); % LS estimate
k = 0:length(h)-1; % k_ts = k*ts;
hh = h*h';
tmp = h.*conj(h).*k; % tmp = h.*conj(h).*k_ts;
r = sum(tmp)/hh;
r2 = tmp*k.'/hh; % r2 = tmp*k_ts.'/hh; A.' = transpose(A)
tau_rms = sqrt(r2-r^2); % rms delay
df = 1/Nfft; % 1/(ts*Nfft);
j2pi_tau_df = j*2*pi*tau_rms*df;
K1 = repmat([0:Nfft-1].',1,Np); % K1: Nfft*Np, each row: 0:Nfft-1
K2 = repmat([0:Np-1],Nfft,1); % K2: Nfft*Np, each row: 0:Np-1
rf = 1./(1+j2pi_tau_df*(K1-K2*Nps)); % rf[k]
K3 = repmat([0:Np-1].',1,Np); % K3: Np*Np, each column: 0:Np-1
K4 = repmat([0:Np-1],Np,1); % K4: Np*Np, each column: 0:Np-1
rf2 = 1./(1+j2pi_tau_df*Nps*(K3-K4)); % rf[k]
Rhp = rf;
Rpp = rf2 + eye(length(H_tilde),length(H_tilde))/snr;
H_MMSE = transpose(Rhp*inv(Rpp)*H_tilde.'); % MMSE channel estimate
endfunction
function H_interpolated = interpolate(H_est,pilot_loc,Nfft,method)
% Input: H_est = Channel estimate using pilot sequence
% pilot_loc = location of pilot sequence
% Nfft = FFT size
% method = 'linear'/'spline'
% Output: H_interpolated = interpolated channel
if pilot_loc(1) > 1
slope = (H_est(2)-H_est(1))/(pilot_loc(2)-pilot_loc(1));
H_est = [H_est(1)-slope*(pilot_loc(1)-1) H_est];
pilot_loc = [1 pilot_loc];
end
if pilot_loc(end) < Nfft
slope = (H_est(end)-H_est(end-1))/(pilot_loc(end)-pilot_loc(end-1));
H_est = [H_est H_est(end)+slope*(Nfft-pilot_loc(end))];
pilot_loc = [pilot_loc Nfft];
end
if lower(method(1)) == 'l'
H_interpolated = interp1(pilot_loc,H_est,[1:Nfft]);
else
H_interpolated = interp1(pilot_loc,H_est,[1:Nfft],'spline');
end
endfunction
figure(1); clf;
figure(2); clf;
Nfft = 32;
Ng = Nfft/8; % Ng = Add CP = 4
Nofdm = Nfft+Ng;
Nsym = 100;
Nps = 4; % Pilot spacing
Np = Nfft/Nps; % Numbers of pilots per OFDM symbol
Nd = Nfft-Np; % Numbers of datas per OFDM symbol
Nbps = 4;
M = 2^Nbps; % Number of bits per (modulated) symbol
Es = 1;
A = sqrt(3/2/(M-1)*Es); % Signal energy and QAM normalization factor
%fs = 10e6; ts = 1/fs; % Sampling frequency and Sampling period
SNRs = [0:3:30];
sq2 = sqrt(2);
for i = 1:length(SNRs)
SNR = SNRs(i);
rand('seed',1);
randn('seed',1);
MSE = zeros(1,6);
nose = 0; % Number_of_symbol_errors
for nsym = 1:Nsym
Xp = 2*(randn(1,Np)>0)-1; % Pilot sequence generation: randn -1 and 1
%Data = ((2*(randn(1,Nd)>0)-1) + j*(2*(randn(1,Nd)>0)-1))/sq2; % QPSK modulation
msgint = randint(1,Nfft-Np,M); % bit generation
Data = qammod(msgint,M)*A;
%Data = modulate(mod_object, msgint); Data = modnorm(Data,'avpow',1)*Data; % normalization
ip = 0;
pilot_loc = [];
for k = 1:Nfft % 在頻域的特定位置加入導頻和數據
if mod(k,Nps) == 1
X(k) = Xp(floor(k/Nps)+1);
pilot_loc = [pilot_loc k];
ip = ip+1;
else
X(k) = Data(k-ip); % ip指示了當前OFDM符號中已經加入的導頻的數量
end
end
x = ifft(X,Nfft); % IFFT
xt = [x(Nfft-Ng+1:Nfft) x]; % Add CP
h = [(randn+j*randn) (randn+j*randn)/2]; % generates a (2-tap) channel
H = fft(h,Nfft);
channel_length = length(h); % True channel and its time-domain length
H_power_dB = 10*log10(abs(H.*conj(H))); % True channel power in dB
y_channel = conv(xt, h); % Channel path (convolution)
sig_pow = mean(y_channel.*conj(y_channel));
%y_aw(1,1:Nofdm) = y(1,1:Nofdm) + ...
% sqrt((10.^(-SNR/10))*sig_pow/2)*(randn(1,Nofdm)+j*randn(1,Nofdm)); % Add noise(AWGN)
yt = awgn(y_channel,SNR,'measured');
y = yt(Ng+1:Nofdm); % Remove CP
Y = fft(y); % FFT
for m = 1:3
if m == 1
H_est = LS_CE(Y,Xp,pilot_loc,Nfft,Nps,'linear');
method = 'LS-linear'; % LS estimation with linear interpolation
elseif m == 2
H_est = LS_CE(Y,Xp,pilot_loc,Nfft,Nps,'spline');
method = 'LS-spline'; % LS estimation with spline interpolation
else
H_est = MMSE_CE(Y,Xp,pilot_loc,Nfft,Nps,h,SNR);
method='MMSE'; % MMSE estimation
end
H_est_power_dB = 10*log10(abs(H_est.*conj(H_est)));
h_est = ifft(H_est);
h_DFT = h_est(1:channel_length);
H_DFT = fft(h_DFT,Nfft); % DFT-based channel estimation
H_DFT_power_dB = 10*log10(abs(H_DFT.*conj(H_DFT)));
if nsym == 1
figure(1);
subplot(319+2*m); plot(H_power_dB,'b','linewidth',1); grid on; hold on;
plot(H_est_power_dB,'r:+','Markersize',4,'linewidth',1);
axis([0 32 -20 10]); title(method);
xlabel('Subcarrier Index'); ylabel('Power [dB]');
legend('True Channel',method,4); set(gca,'fontsize',10);
subplot(320+2*m); plot(H_power_dB,'b','linewidth',1); grid on; hold on;
plot(H_DFT_power_dB,'r:+','Markersize',4,'linewidth',1);
axis([0 32 -20 10]); title([method ' with DFT']);
xlabel('Subcarrier Index'); ylabel('Power [dB]');
legend('True Channel',[method ' with DFT'],4); set(gca,'fontsize',10);
end
MSE(m) = MSE(m) + (H-H_est)*(H-H_est)';
MSE(m+3) = MSE(m+3) + (H-H_DFT)*(H-H_DFT)';
end
Y_eq = Y./H_est;
if nsym >= Nsym-10
figure(2);
subplot(121);
plot(Y,'.','Markersize',10);
title(['Before channel compensation']);
%axis([-3 3 -3 3]); axis('equal'); set(gca,'fontsize',10);
hold on;
subplot(122);
plot(Y_eq,'.','Markersize',10);
title(['After channel compensation']);
%axis([-3 3 -3 3]); axis('equal'); set(gca,'fontsize',10);
hold on;
end
ip = 0;
for k = 1:Nfft
if mod(k,Nps) == 1
ip = ip+1;
else
Data_extracted(k-ip) = Y_eq(k);
end
end
msg_detected = qamdemod(Data_extracted/A,M);
nose = nose + sum(msg_detected~=msgint);
end
MSEs(i,:) = MSE/(Nfft*Nsym);
end
Number_of_symbol_errors = nose
figure(3); clf;
semilogy(SNRs',MSEs(:,1),'-x', SNRs',MSEs(:,3),'-o');
xlabel('SNR [dB]'); ylabel('BER');
legend('LS-linear','MMSE');
fprintf('MSE of LS-linear/LS-spline/MMSE Channel Estimation = %6.4e/%6.4e/%6.4e\n',MSEs(end,1:3));
fprintf('MSE of LS-linear/LS-spline/MMSE Channel Estimation with DFT = %6.4e/%6.4e/%6.4e\n',MSEs(end,4:6));
|
github
|
oklachumi/octave-in-communications-master
|
plot_Ray_Ric_channel.m
|
.m
|
octave-in-communications-master/plot_Ray_Ric_channel.m
| 1,079 |
utf_8
|
2d6a6159a58356dbb1271f082e0894c4
|
% plot_Ray_Ric_channel.m
clear,close,clc all
function H = Ray_model(L)
% Rayleigh Channel Model
% Input : L : # of channel realization
% Output: H : Channel vector
H = (randn(1,L)+j*randn(1,L))/sqrt(2);
endfunction
function H=Ric_model(K_dB,L)
% Rician Channel Model
% Input:
% K_dB : K factor [dB]
% L : # of channel realization
% Output:
% h : channel vector
K=10^(K_dB/10);
H = sqrt(K/(K+1)) + sqrt(1/(K+1))*Ray_model(L);
endfunction
N = 200000;
level = 30;
K_dB = [-40 15 30];
Rayleigh_ch = zeros(1,N);
Rician_ch = zeros(2,N);
color = ['k'];
line = ['-'];
marker = ['s','o','^','*'];
% Rayleigh model
% [counts,centers] = hist(___)
Rayleigh_ch = Ray_model(N);
[temp,x] = hist(abs(Rayleigh_ch(1,:)),level);
plot(x,temp,['k-' marker(1)]);
hold on
% Rician model
for i = 1:length(K_dB);
Rician_ch(i,:) = Ric_model(K_dB(i),N);
[temp x] = hist(abs(Rician_ch(i,:)),level);
plot(x,temp,['k-' marker(i+1)]);
end
xlabel('x');
ylabel('Occurance');
legend('Rayleigh','Rician, K = -40 dB','Rician, K = 15 dB','Rician, K = 30 dB');
|
github
|
mortezamg63/Edge-Detection-Back-Propagation-ANN-master
|
Edge_Detection_BP_ANN.m
|
.m
|
Edge-Detection-Back-Propagation-ANN-master/Edge_Detection_BP_ANN.m
| 3,190 |
utf_8
|
6c8f0d26c3cc8c98cac0a119e727a81e
|
function [re,V,W,V0,W0]=Edge_Detection_BP_ANN(addressImage)
Pattern=16;
Epoch=500;
Nx=4;
Pz=12;
My=4;
Alfa=0.15;
s=[1 1 1 1;1 1 1 0;1 1 0 1;1 1 0 0;1 0 1 1;1 0 1 0;1 0 0 1;1 0 0 0;0 1 1 1;0 1 1 0;0 1 0 1;0 1 0 0;0 0 1 1;0 0 1 0;0 0 0 1;0 0 0 0];
t=[1 1 1 1;1 1 1 1;1 1 1 1;1 1 0 0;1 1 1 1;1 0 1 0;1 0 0 1;1 0 0 1;1 1 1 1;0 1 1 0;0 1 0 1;0 1 1 0;0 0 1 1;0 1 1 0;1 0 0 1;1 1 1 1];
[x,y]=size(t);
h=zeros(x,y);
V=rand(Nx,Pz)-.5;
W=rand(Pz,My)-.5;
V0=rand();
W0=rand();
for E=1:Epoch
for P=1:Pattern
X=s(P,:);
for j=1:Pz
Sigma=0;
for i=1:Nx
Sigma=Sigma+X(i)*V(i,j);
end
Zin(j)=V0+Sigma;
Z(j)=SigmoidBp(Zin(j));
end
for k=1:My
Sigma=0;
for j=1:Pz
Sigma=Sigma+Z(j)*W(j,k);
end
Yin(k)=W0+Sigma;
Y(k)=(Yin(k));
end
for k=1:My
delta_k(k)=(t(P,k)-Y(k)')*PBipSigPrim(Yin(k),1);
for j=1:Pz
Delta_W(j,k)=Alfa*delta_k(k)*Z(j);
end
Delta_W0(k)=Alfa*delta_k(k);
W0=W0+Delta_W0(k);
end
for j=1:Pz
Sigma=0;
for k=1:My
Sigma=Sigma+delta_k(k)*W(j,k);
end
delta_in_j(j)=Sigma;
delta_j(j)=delta_in_j(j)*PBipSigPrim(Zin(j),1);
for i=1:Nx
Delta_V(i,j)=Alfa*delta_j(j)*X(i);
end
Delta_V0(j)=Alfa*delta_j(j);
V0=V0+Delta_V0(j);
end
for k=1:My
for j=1:Pz
W(j,k)=W(j,k)+Delta_W(j,k);
end
end
for j=1:Pz
for i=1:Nx
V(i,j)=V(i,j)+Delta_V(i,j);
end
end
end
end
mainimage=imread(addressImage);
bwimage=im2bw(mainimage);
[rows,cols]=size(bwimage);
image=ones(rows,cols);
changeimage=zeros(rows,cols);
for i=1:1:rows-1
for j=1:1:cols-1
mask=bwimage(i:i+1,j:j+1);
inp_NN=[mask(1,1) mask(1,2) mask(2,1) mask(2,2)];
Z=SigmoidBp(inp_NN*V+V0);
Y=round(Z*W+W0);
if(image(i,j) ~=0)
image(i,j)=Y(1);
changeimage(i,j)=Y(1);
end
if(image(i,j+1)~=0)
image(i,j+1)=Y(2);
changeimage(i,j+1)=Y(2);
end
if(image(i+1,j)~=0)
image(i+1,j)=Y(3);
changeimage(i+1,j)=Y(3);
end
if(image(i+1,j+1)~=0)
image(i+1,j+1)=Y(4);
changeimage(i+1,j+1)=Y(4);
end
end
end
subplot(1,2,1);
imshow(mainimage);
subplot(2,2,2);
imshow(bwimage);
figure;imshow(image);
end
function out = PBipSigPrim(input,mode)
if mode == 1
out=(.5)*((1+PBipSig(input)).*(1-PBipSig(input)));
else
out=(.5)*((1+input).*(1-input));
end
end
function out = SigmoidBp(input)
out = 2./(1+exp(- input ))-1;
end
function out = PBipSig(input)
out = 2./(1+exp(- input ))-1;
end
|
github
|
rjanalik/HPC_2017-master
|
writeMeshToVTKFile.m
|
.m
|
HPC_2017-master/Assignment7/meshpart/writeMeshToVTKFile.m
| 2,183 |
utf_8
|
f31e74ce79aa9d048e601b3e892f79fa
|
function writeMeshToVTKFile(prefix, ElementList, PointList, ElementParams, PointParams, type)
numberOfPoints = size(PointList, 1);
numberOfElements = size(ElementList, 1);
numberOfVertices = size(ElementList, 2);
% 2. read the .node file
% %%%%%%%%%%%%%%%%%%%%%%
% this opens a file in text 't mode for read access 'r'
filename = strcat(prefix, '.vtk');
fprintf('writting mesh file %s\n', filename);
fout = fopen(filename, 'w');
fprintf(fout,'# vtk DataFile Version 5.10\n');
fprintf(fout,'Hexahedral mesh with data\n');
fprintf(fout,'ASCII\n');
fprintf(fout,'DATASET UNSTRUCTURED_GRID\n');
fprintf(fout,'POINTS %d float\n', numberOfPoints);
% now write the PointList
% -----------------------
for i = 1:numberOfPoints
x_i = PointList(i,:);
fprintf(fout,'%25.16e %25.16e %25.16e\n', x_i(1), x_i(2), x_i(3));
end
fprintf(fout,'\n');
entries = (numberOfVertices+1)*numberOfElements;
fprintf(fout,'CELLS %d %d\n', numberOfElements, entries);
first_number = min(ElementList(:));
for e = 1:numberOfElements
v_e = ElementList(e, :);
v_e = v_e - first_number;
fprintf(fout,'%d ', numberOfVertices);
for i=1:numberOfVertices
fprintf(fout,'%d ', v_e(i));
end
fprintf(fout, '\n');
end
fprintf(fout,'\n');
fprintf(fout,'CELL_TYPES %d\n', numberOfElements);
for e = 1:numberOfElements
fprintf(fout,'%d\n', type);
end
fprintf(fout,'\n');
nsets = size(ElementParams,2);
if (nsets > 0)
fprintf(fout,'CELL_DATA %d\n', numberOfElements);
fprintf(fout,'SCALARS Surface float 1\n');
fprintf(fout,'LOOKUP_TABLE default\n');
for n = 1:numberOfElements
fprintf(fout,'%25.16e\n', ElementParams(n,1));
end
end
nsets = size(PointParams,2);
if (nsets > 0)
fprintf(fout,'POINT_DATA %d\n', numberOfPoints);
fprintf(fout,'SCALARS NodePartitioning float 1\n');
fprintf(fout,'LOOKUP_TABLE default\n');
for n = 1:numberOfPoints
fprintf(fout,'%25.16e\n', PointParams(n,1));
end
end
fclose(fout);
end
|
github
|
rspurney/TuxNet-master
|
RTPSTAR_MAIN.m
|
.m
|
TuxNet-master/TuxNet-MATLAB/RTP-STAR/RTPSTAR_MAIN.m
| 9,737 |
utf_8
|
c7454da4c48a4b501e3d60bd9e55d43c
|
%Run RTP-STAR a certain number of times. Each time, we
%save the final network that is created. At the end, we count the number of
%times each of the edges appears in a network and only keep edges over a
%certain proportion.
%
%GENIE3 code reference: Huynh-Thu V. A., Irrthum A., Wehenkel L., and Geurts P.
%Inferring regulatory networks from expression data using tree-based methods.
%PLoS ONE, 5(9):e12776, 2010. Original code available at
%https://github.com/vahuynh/GENIE3.
%
%Parameters:
%numiters: number of times to run RTP-STAR and combine results. default
%value = 1 (optional). We recommend up to 100 iterations if you are using
%spatial clustering as the first step is pseudorandom. If you are using
%temporal clustering or a user-provided list of clusters, there is not as much randomness,
%so we recommend <10 iterations (even 1 in most cases will suffice)
%
%maxprop: proportion of edges to keep. default value = 0.33 (1/3 of edges).
%This is only used if numiters >1 (optional)
%
%genes_file: .csv or .txt file that contains list of DE genes in 1 column
%(REQUIRED)
%
%expression_file: .csv or .txt file that contains the expression data for
%GRN inference. See below comments for proper formatting (REQUIRED)
%
%clustering_file: .csv or .txt file that contains the clustering data for
%GRN inference. See below comments for proper formatting (optional)
%
%time_file: .csv or .txt file that contains the timecourse data for
%GRN inference. See below comments for proper formatting (optional)
%
%symbol_file: .csv or .txt file that contains genes (column 1) and their
%known gene ID symbols (column 2) (optional)
%
%connecthubs: allows you to connect the hubs of each cluster, where hubs
%are defined as the node(s) with the most output edges in each cluster.
%Default is true (optional)
%
%clusteringseed: if you are clustering, a seed that you can set for the
%clustering. Otherwise, the clustering is different every time.
%Default is no seed (different clustering every time). (optional)
%
%clustering_type: variable denoting if you are using spatial or temporal
%clustering. Use "S" for spatial and "T" for temporal. Default is "S".
%(optional)
%
%usepresetclusters: set to "true" if you would like to upload your own
%cluster file to use. NOTE: function assumes your clustering file name is clusters.csv
%and has column 1 with gene names, column 2 whatever, and column 3 with cluster numbers.
%Default is false. (optional)
%
%presetclustersfile: file where your clusters are if you are uploading them
%(optional). Default is clusters.csv
%
%output_file: the name of the file where you want to write results.
%Default name is biograph_final.txt
%
%Note that, using default settings, all results files will be saved to your
%current folder on MATLAB. Please check that you are in the correct folder
%before starting the pipeline.
%
%The final text file can be imported into software such as Cytoscape for
%network visualization.
%
%Expression data should be formatted as:
%Rows are genes, columns are experiments
%Column 1: genes
%Column 2: Indicator variable (1=TF, 0=non-TF)
%Columns 3 to X: expression data with biological replicates separate (you
%can use just means if you choose to)
%
%Clustering data should be formatted as:
%Rows are genes, columns are experiments
%Column 1: genes
%Columns 2 to X: mean gene expression data
%Note that clustering data are optional
%
%Time course data should be formatted as:
%Rows are genes, columns are experiments
%Column 1: genes
%Columns 2 to X: mean gene expression data
%Note that time course data are optional
%
%Author:
%Natalie M. Clark
%Email: [email protected]
%Last updated: March 18, 2019
function RTPSTAR_MAIN(numiters, maxprop, genes_file, expression_file, clustering_file, timecourse_file, symbol_file, connecthubs, clusteringseed, clustering_type, usepresetclusters, presetclustersfile, output_file)
%check if variables exist and if not insert default values
if ~exist('numiters', 'var') || isempty(numiters)
numiters = 1;
end
if ~exist('maxprop', 'var') || isempty(maxprop)
maxprop = 0.33;
end
if ~exist('connecthubs', 'var') || isempty(connecthubs)
connecthubs = true;
end
if ~exist('clusteringseed', 'var') || isempty(clusteringseed)
clusteringseed = [];
end
if ~exist('clustering_type', 'var') || isempty(clustering_type)
clustering_type = "S";
end
if ~exist('usepresetclusters', 'var') || isempty(usepresetclusters)
usepresetclusters=false;
end
if ~exist('presetclustersfile', 'var') || isempty(presetclustersfile)
presetclustersfile='clusters.csv';
end
if ~exist('output_file', 'var') || isempty(output_file)
output_file = 'biograph_final.txt';
end
%read in DE genes
DE_genes = readtable(genes_file);
%read in file that contains expression data and get data for DE genes
expression = readtable(expression_file);
expression = expression(ismember(expression{:,1},DE_genes{:,1}),:);
%minimum number of edges to keep
%default is 2*number of TFs
%this is only used if the threshold is too restrictive
threshold = 2*sum(expression{:,2}==1);
%read in file that contains clustering data and get data for DE genes
if ~exist('clustering_file', 'var') || isempty(clustering_file)
isclustering = false;
clustering=[];
else
clustering = readtable(clustering_file);
clustering = clustering(ismember(clustering{:,1},DE_genes{:,1}),:);
isclustering=true;
end
%read in file that contains timecourse data and get data for DE genes
if ~exist('timecourse_file', 'var') || isempty(timecourse_file)
istimecourse = false;
timecourse=[];
else
timecourse = readtable(timecourse_file);
timecourse = timecourse(ismember(timecourse{:,1},DE_genes{:,1}),:);
istimecourse=true;
end
%read in file with gene names
if exist('symbol_file','var') && ~isempty(symbol_file)
symbol = readtable(symbol_file);
else
symbol = table(DE_genes{:,1},DE_genes{:,1});
end
%if only 1 iteration, only run the pipeline once
if numiters==1 && usepresetclusters==false
clusterfile = 'clusters.csv';
regression_tree_pipeline(expression,timecourse,clustering,symbol,...
connecthubs, clusteringseed, isclustering, clustering_type, usepresetclusters, istimecourse,...
[],clusterfile,[],[],output_file);
%if using preset clusters, load clusters from file
elseif numiters==1 && usepresetclusters==true
isclustering=true;
clusterfile = presetclustersfile;
regression_tree_pipeline(expression,timecourse,clustering,symbol,...
connecthubs, clusteringseed, isclustering, clustering_type, usepresetclusters, istimecourse,...
[],clusterfile,[],[],output_file);
%otherwise, run the pipeline the provided number of iterations, and cluster
%the genes each time, using preset clusters if provided
elseif usepresetclusters==true
isclustering=true;
clusterfile = presetclustersfile;
for i = 1:numiters
graphfile = strcat('biograph',int2str(i),'.txt');
regression_tree_pipeline(expression,timecourse,clustering,symbol,...
connecthubs, clusteringseed, isclustering, clustering_type, usepresetclusters, istimecourse,...
[],clusterfile,[],[],graphfile);
end
else
for i = 1:numiters
clusterfile = strcat('clusters',int2str(i),'.csv');
graphfile = strcat('biograph',int2str(i),'.txt');
regression_tree_pipeline(expression,timecourse,clustering,symbol,...
connecthubs, clusteringseed, isclustering, clustering_type, [], istimecourse,...
[],clusterfile,[],[],graphfile);
end
end
%read in all the files and get all of the unique edges
if numiters>1
for i = 1:numiters
if i == 1
edges = readtable(strcat('biograph',int2str(i),'.txt'),'ReadVariableNames',false,'Delimiter','\n');
edges = table2array(edges);
else
newedges = readtable(strcat('biograph',int2str(i),'.txt'),'ReadVariableNames',false,'Delimiter','\n');
newedges = table2array(newedges);
edges = [edges; newedges];
end
end
edges = unique(edges);
%read in each file
%for each file, record which edges are present and add up how many times
%the edges appear
for i = 1:numiters
network = readtable(strcat('biograph',int2str(i),'.txt'),'ReadVariableNames',false,'Delimiter','\n');
network = table2array(network);
presentedges = ismember(edges,network);
if i == 1
number = double(presentedges);
else
number = number+double(presentedges);
end
end
proportions = table(edges, number);
%if the number of edges with proportion >= maxprop is greater than the threshold
%number of edges, we keep all of the edges with proportion >= maxprop
%otherwise, we keep the maximum number of edges even though some of these
%edges may have proportion < maxprop
finaledges = sortrows(proportions,2,'descend');
if table2array(finaledges(threshold,2)) < maxprop*numiters
finaledges = finaledges(table2array(finaledges(:,2))>=table2array(finaledges(threshold,2)),:);
else
finaledges = finaledges(table2array(finaledges(:,2))>=maxprop*numiters,:);
end
%get weights for edges
finaledges = table(finaledges{:,1},table2array(finaledges(:,2))./numiters,...
'VariableNames',{'Edge','Weight'});
%print edges to file
writetable(finaledges,output_file,'Delimiter',' ','QuoteStrings',false,'WriteVariableNames',false);
end
end
|
github
|
rspurney/TuxNet-master
|
regression_tree_pipeline.m
|
.m
|
TuxNet-master/TuxNet-MATLAB/RTP-STAR/regression_tree_pipeline.m
| 8,001 |
utf_8
|
b91841cf59814ad47c4117bd37a184b4
|
%NOTE: This requires the Statistics and Machine Learning and Bioinformatics
%toolboxes.
%This file runs the regression tree pipeline for GRN inference. Genes are
%first clustered (if applicable) and then networks for each cluster are
%inferred. If there are multiple clusters, clusters are connected using the
%same inference algorithm. The final network is printed to a textfile that
%can be uploaded into Cytoscape for network visualization.
%
%Parameters:
%expression_data: MATLAB table that contains the expression data, see below
%comments for proper formating.
%
%time_data: if you are using timecourse for directionality, MATLAB table
%that contains time course data, see below comments for proper formatting
%
%clustering_data: if you are clustering, MATLAB table that contains
%clustering data, see below comments for proper formatting.
%
%clusteringseed: if you are clustering, a seed that you can set for the
%clustering. Otherwise, the clustering is different every time.
%Default is no seed (different clustering every time).
%
%symbol: MATLAB table that contains known symbols for some genes
%
%isclustering: boolean variable (true/false) that indicates if you wish
%to cluster your genes before inferring networks. Default is true.
%
%clustering_type: variable denoting if you are using spatial or temporal
%clustering. Use "S" for spatial and "T" for temporal. Default is "S".
%
%usepresetclusters: boolean variable (true/false) that indicates if you
%wish to upload your own cluster file to use. If you use this option, the
%cluster file MUST be named filename_cluster, and the cluster numbers must be in column 3.
%
%istimecourse:boolean variable (true/false) that indicates if you wish
%to use a timecourse for directionality. Default is true.
%
%maxclusters:the maximum number of clusters to evaluate
%using Silhouette index. Default is floor(p/10 + 5) where p is the number
%of genes.
%
%filename_cluster: the name of the file where you want to write
%clustering results. Make sure to indicate the extension (file.txt, file.csv):
%otherwise, it defaults to a text file. Default name is clusters.csv
%
%timethreshold: fold change cutoff to use in directionality algorithm.
%Default is 1.25
%
%edgenumber: vector of 3 values that represent the thresholds to be used
%during the inference. The 3 values represent the multiplier to be used on
%the number of edges kept for a low, medium, and high number of TFs.
%
%connecthubs: allows you to connect the hubs of each cluster, where hubs
%are defined as the node(s) with the most output edges in each cluster. Default is true
%
%filename_results: the name of the file(s) where you want to write results.
%Default name is biograph.txt. If clustering, files will be indexed based
%on cluster number. If clustering, your final network will be your chosen
%filename+final.txt.
%
%Note that, using default settings, all results files will be saved to your
%current folder on MATLAB. Please check that you are in the correct folder
%before starting the pipeline.
%
%
%The final text file can be imported into software such as Cytoscape for
%network visualization.
%
%Clustering data should be formatted as:
%Rows are genes, columns are experiments
%Column 1: AGI numbers
%Columns 2 to X: mean gene expression data
%
%Inference data should be formatted as:
%Rows are genes, columns are experiments
%Column 1: AGI numbers
%Column 2: binary indicator variable, 1 if known TF function, 0 if not
%Columns 3 to X: expression data with biological replicates separate (you
%can use just means if you choose to)
%
%Time course data should be formatted as:
%Rows are genes, columns are experiments
%Column 1: AGI numbers
%Columns 2 to X: mean gene expression data
%Note that time course data are optional
%
%Author:
%Natalie M. Clark
%Email: [email protected]
%Last updated: March 18, 2019
function regression_tree_pipeline(expression_data, time_data, clustering_data, symbol, connecthubs, clusteringseed, isclustering, clustering_type, usepresetclusters, istimecourse, maxclusters, filename_cluster, timethreshold, edgenumber, filename_results)
%check if parameters exist
%if not, set defaults
if ~exist('isclustering', 'var') || isempty(isclustering)
isclustering = true;
end
if ~exist('clustering_type', 'var') || isempty(clustering_type)
clustering_type = "S";
end
if ~exist('usepresetclusters', 'var') || isempty(usepresetclusters)
usepresetclusters = false;
end
if ~exist('istimecourse', 'var') || isempty(istimecourse)
istimecourse = true;
end
if ~exist('filename_cluster', 'var') || isempty(filename_cluster)
filename_cluster = 'clusters.csv';
end
if ~exist('timethreshold', 'var') || isempty(timethreshold)
timethreshold = 1.25;
end
if ~exist('edgenumber', 'var') || isempty(edgenumber)
edgenumber = [0.5,1.5,2];
end
if ~exist('connecthubs', 'var') || isempty(connecthubs)
connecthubs=false;
end
if ~exist('filename_results', 'var') || isempty(filename_results)
filename_results = 'biograph.txt';
end
%check if maxclusters has been defined
%if not, make it empty so that default can be set in clustering algorithm
if ~exist('maxclusters', 'var')
maxclusters = [];
end
if isclustering
%check if using present clusters
if ~usepresetclusters
%check if using a seed
if ~exist('clusteringseed', 'var') && isempty(clusteringseed)
[numclusters] = clustering(clustering_data,clustering_type,[],symbol,maxclusters,filename_cluster,[]);
else
[numclusters] = clustering(clustering_data,clustering_type,symbol,maxclusters,filename_cluster,clusteringseed);
end
elseif usepresetclusters
myclusters = readtable(filename_cluster);
numclusters = max(myclusters{:,3});
end
end
%run inference step for each cluster
%each biograph will print separately
%if not clustering, we just run once
if ~exist('numclusters', 'var') || isempty(numclusters)
[~,~,bg2,~] = run_regressiontree(expression_data,time_data,[],symbol, istimecourse,[],[],timethreshold,edgenumber);
biograph_to_text(bg2,istimecourse,filename_results);
final_table = readtable(char(filename_results),'ReadVariableNames',false,'Delimiter','\n');
unique_edges = unique(table2cell(final_table));
writetable(cell2table(unique_edges),char(filename_results),'WriteVariableNames',false)
else
currentindex = 1;
for i = 1:numclusters
[~,~,bg2,clusterhub] = run_regressiontree(expression_data,time_data,filename_cluster,symbol,istimecourse,[],i,timethreshold,edgenumber);
%store node(s) with most output edges for each cluster
if numel(clusterhub) == 1
clusterhub_vec(1,currentindex) = clusterhub;
currentindex = currentindex + 1;
elseif numel(clusterhub) > 1
clusterhub_vec(1,currentindex:(currentindex+numel(clusterhub)-1)) = clusterhub;
currentindex = currentindex+numel(clusterhub);
end
%print results to text file for cytoscape for each cluster
%just one text file will contain the entire network
if ~isempty(bg2)
biograph_to_text(bg2,istimecourse,char(filename_results))
end
end
%connect the clusters, if applicable
if connecthubs
[~,~,bg2,~]=run_regressiontree(expression_data,time_data,[],symbol,istimecourse,clusterhub_vec,[],timethreshold,edgenumber);
%print results
biograph_to_text(bg2,istimecourse,char(filename_results));
end
%there may be duplicate edges in the final file, so we need to remove
%them
final_table = readtable(char(filename_results),'ReadVariableNames',false,'Delimiter','\n');
unique_edges = unique(table2cell(final_table));
writetable(cell2table(unique_edges),char(filename_results),'WriteVariableNames',false)
end
|
github
|
rspurney/TuxNet-master
|
run_regressiontree.m
|
.m
|
TuxNet-master/TuxNet-MATLAB/RTP-STAR/run_regressiontree.m
| 11,435 |
utf_8
|
2934174b0588911e0be852269964fc1a
|
%Construct GRN using regression tree algorithm on an excel file
%Infers directionality using time course data
%
%Parameters:
%expression_data: MATLAB table that contains the expression data
%
%clusterfile: file that contains the genes and which cluster they are in
%
%symbol: MATLAB table that contains known symbols for some genes
%
%istimecourse:boolean variable (true/false) that indicates if you wish
%to use a timecourse for directionality. Default is true.
%
%selectedgenes: this allows you to run the network for only certain genes.
%This should be a cell array of the gene names as strings. This is not a
%required parameter and is not recommended for use outside of the pipeline
%file.
%
%clusternum: the cluster # you would like to run. Default is 1
%
%timethreshold: fold change cutoff to use in directionality algorithm.
%Default is 1.25
%
%edgenumber: vector of 3 values that represent the thresholds to be used
%during the inference. The 3 values represent the multiplier to be used on
%the number of edges kept for a low, medium, and high number of TFs.
%
%Returns
%results: matrix with weights of edges from gene i to gene j
%
%threshold: weight cutoff used in final network
%
%bg2: final biograph
%
%clusterhub: the gene to be used to connect the cluster
%
%Assumes the expression data is formatted as:
%Rows are genes, columns are experiments
%Column 1: AGI numbers
%Column 2: binary indicator variable, 1 if known TF function, 0 if not
%Columns 3 to X: expression data
%Columns X+1 to Y: time course data for directionality
%
%Author:
%Natalie M. Clark
%Email: [email protected]
%Last updated: March 18, 2019
function [results,threshold,bg2,mostout] = run_regressiontree(expression_data,time_data, clusterfile,symbol, istimecourse,selectedgenes,clusternum,timethreshold,edgenumber)
%check if parameters exist
%if not, set defaults
if ~exist('istimecourse', 'var') || isempty(istimecourse)
istimecourse = true;
end
if ~exist('clusternum', 'var') || isempty(clusternum)
clusternum = 1;
end
if ~exist('timethreshold', 'var') || isempty(timethreshold)
timethreshold = 1.25;
end
if ~exist('edgenumber', 'var') || isempty(edgenumber)
edgenumber = [0.5,1.5,2];
end
%get AGI numbers
AGIs = table2cell(expression_data(:,1));
%get TF information
isTF = table2array(expression_data(:,2));
%convert tables to matrix without gene names
matrix = table2array(expression_data(:,3:size(expression_data,2)));
if istimecourse
timecourse = table2array(time_data(:,2:size(time_data,2)));
end
%if we use clustering, use only genes in the applicable cluster
if exist('clusterfile', 'var') && ~isempty(clusterfile)
%read in the clusters
clustertable = readtable(clusterfile);
clusters = clustertable(:,3);
%get genes in cluster i
clusteredgenes = clustertable(find(table2array(clusters)==clusternum),1);
%find where those genes are in the expression data
%this accounts for if the genes are not in the same order
clusteredgenesindices = find(ismember(AGIs,table2array(clusteredgenes)));
%select only the clustered genes we want to use
matrix = matrix(clusteredgenesindices(:),:);
if istimecourse
timecourse = timecourse(clusteredgenesindices(:),:);
end
isTF = isTF(clusteredgenesindices(:));
%get symbols for the genes
symbols = table2cell(symbol(:,1:2));
genes = cell(size(clusteredgenes,1),1);
isinfile = find(ismember(clusteredgenes{:,:},symbols(:,1)));
notinfile = find(~ismember(clusteredgenes{:,:},symbols(:,1)));
for i = 1:length(isinfile)
index = isinfile(i);
name_index = find(ismember(symbols(:,1),clusteredgenes{index,:}));
%if symbol is already in the list, this will throw an error
%so if the symbol is already in use, don't use it
if i > 1 && ismember(symbols(name_index,2),genes(~cellfun(@isempty,genes)))
genes(index) = clusteredgenes{index,:};
else
genes(index) = symbols(name_index,2);
end
end
for i = 1:length(notinfile)
index = notinfile(i);
genes(index) = clusteredgenes{index,:};
end
else
%if not clustering, still get symbols
symbols = table2cell(symbol(:,1:2));
genes = cell(length(AGIs),1);
isinfile = find(ismember(AGIs,symbols(:,1)));
notinfile = find(~ismember(AGIs,symbols(:,1)));
for i = 1:length(isinfile)
index = isinfile(i);
name_index = find(ismember(symbols(:,1),AGIs(index)));
%if symbol is already in the list, this will throw an error
%so if the symbol is already in use, don't use it
if i > 1 && ismember(symbols(name_index,2),genes(~cellfun(@isempty,genes)))
genes(index) = AGIs(index);
else
genes(index) = symbols(name_index,2);
end
end
for i = 1:length(notinfile)
index = notinfile(i);
genes(index) = AGIs(index);
end
end
%if no TFs in the cluster, skip
%if only 1 gene, skip
if length(isTF) == 1 || sum(isTF) == 0
results=[];
bg2=[];
threshold=[];
mostout=[];
return
end
%if we are only using certain genes, only use those genes
if exist('selectedgenes', 'var') && ~isempty(selectedgenes)
selectedgenesindices = find(ismember(genes,selectedgenes));
%select only the genes we want to use
matrix = matrix(selectedgenesindices(:),:);
if istimecourse
timecourse = timecourse(selectedgenesindices(:),:);
end
genes = genes(selectedgenesindices(:));
isTF = isTF(selectedgenesindices(:));
end
%transpose the matrix
%regression tree algorithm expects rows are experiments, columns are genes
matrix = matrix';
%get which genes are TFs
input_vec = find(isTF);
%run GENIE3
results = genie3(matrix,input_vec, 'RF', 'sqrt', 10000);
%make biograph
bg1 = biograph(results,genes);
%get weights for the edges
edges = bg1.Edges;
%if there are no edges, we can skip the rest
if isempty(edges)
threshold = 0;
bg2 = bg1;
mostout = {''};
else
%remove low weights from results
for i = 1:length(edges)
edgeweights(i) = edges(i).Weight;
end
%sort the weights
weights_sorted = sort(edgeweights,'descend');
%determine the threshold
%if there are only two nodes, we keep 1 edge
if length(genes) <= 2
numtokeep = 1;
%if there are 3 nodes, we keep 5 edges
elseif length(genes) == 3
numtokeep = 5;
%for all other edges, use predetermined threshold
else
%set thresholds based on number of TFs
if sum(isTF)/length(genes) < 0.25
numtokeep = floor(edgenumber(1)*length(genes));
elseif sum(isTF)/length(genes) >= 0.25 && sum(isTF)/length(genes)<0.5
numtokeep = floor(edgenumber(2)*length(genes));
else
numtokeep = floor(edgenumber(3)*length(genes));
end
end
%if the threshold is greater than the total number of edges, keep them all
if numtokeep > numel(weights_sorted)
threshold = 0;
else
threshold = weights_sorted(numtokeep);
end
%set anything below the threshold to zero so the edge does not appear
%in the biograph
cmatrix = results;
for i = 1:size(results,1)
for j = 1:size(results,2)
if results(i,j) < threshold
cmatrix(i,j) = 0;
end
end
end
%build final biograph
bg2 = biograph(cmatrix,genes);
%use timecourse for directionality, if applicable
if istimecourse
for i = 1:size(results,1)
for j = 1:size(results,2)
%checks that there is an interaction between gene A and gene B
if cmatrix(i,j) ~= 0
%this counts the # of time points that indicate repression
repsum = 0;
%this counts the # of time points that indicate activation
actsum = 0;
for k = 1:size(timecourse,2)-2
%if gene A goes up, and gene B goes down, it's repression
%if gene A goes down, and gene B goes up, it's also
%repression
%fold change must be at least timethreshold to be significant
if timecourse(i,k+1)/timecourse(i,k) > timethreshold && timecourse(j,k+2)/timecourse(j,k+1) < timethreshold
repsum = repsum+1;
elseif timecourse(i,k+1)/timecourse(i,k) < timethreshold && timecourse(j,k+2)/timecourse(j,k+1) > timethreshold
repsum = repsum+1;
%if gene A goes up, and gene B goes up, it's activation
%if gene A goes down, and gene B goes down, it's also
%repression
%fold change must be at least timethreshold to be significant
elseif timecourse(i,k+1)/timecourse(i,k) > timethreshold && timecourse(j,k+2)/timecourse(j,k+1) > timethreshold
actsum = actsum+1;
elseif timecourse(i,k+1)/timecourse(i,k) < timethreshold && timecourse(j,k+2)/timecourse(j,k+1) < timethreshold
actsum = actsum+1;
end
end
%we check number of columns-2 time points
%and see if the majority are repression or activation
%use a strict majority
if repsum > floor(k/2)
%if repression, turn the edge red
edge = getedgesbynodeid(bg2, genes{i}, genes{j});
edge.LineColor = [1.0 0 0];
elseif actsum > floor(k/2)
%if activation, turn the edge green
edge = getedgesbynodeid(bg2, genes{i}, genes{j});
edge.LineColor = [0 1.0 0];
end
end
end
end
end
%view the final biograph with directionality
%view(bg2)
%get stats (number of nodes, number of edges, etc)
%get(bg2)
%determine the node with the most output edges
edges = bg2.edges;
for i = 1:length(edges)
%get all of the regulators for each edge
current_edge = edges(i).ID;
edge_split = strsplit(current_edge,' -> ');
regulators(i) = edge_split(:,1);
end
%find the regulator names
regulator_names = unique(regulators);
%for each regulator, count the number of times it appears
for i = 1:length(regulator_names)
repsum=0;
for j = 1:length(regulators)
if strcmp(char(regulators(j)),char(regulator_names(i))) == 1
repsum = repsum+1;
end
end
totals(i)=repsum;
end
%return the node with the most regulation
%if there is a tie, keep both
maxedges = max(totals);
bestregulators = find(totals==maxedges);
mostout = regulator_names(bestregulators);
end
end
|
github
|
rspurney/TuxNet-master
|
biograph_to_text.m
|
.m
|
TuxNet-master/TuxNet-MATLAB/RTP-STAR/biograph_to_text.m
| 2,422 |
utf_8
|
f5d5af76d7c9b9a4629338931504b7dd
|
%Write biograph results to .txt file for cytoscape
%
%Parameters:
%bg2 is a biograph produced by regression tree algorithm
%
%istimecourse:boolean variable (true/false) that indicates if a timecourse
%was used for directionality. Default is true.
%
%filename is the name of the file where you want to write results.
%Default name is biograph.txt
%
%Author:
%Natalie M. Clark
%Email: [email protected]
%Last updated: March 18, 2019
function biograph_to_text(bg2,istimecourse,filename)
%check if parameters exist
%if not, set defaults
if ~exist('istimecourse', 'var') || isempty(istimecourse)
istimecourse = true;
end
if ~exist('filename', 'var') || isempty(filename)
filename = 'biograph.txt';
end
edges = get(bg2.edges,'ID');
%get colors of edges (activation or repression)
colors = get(bg2.edges,'LineColor');
%get number of edges
numberedges=size(edges,1);
%open file
fileID = fopen(filename,'a');
%for each edge, check color
%if green, activates
%if red, represses
%if neither, directionality unknown
if istimecourse
if numberedges == 1
nodes = strsplit(edges, ' -> ');
if colors == [1.0 0 0]
fprintf(fileID,'%s %s %s\n',nodes{1},'inhibits',nodes{2});
elseif colors == [0 1.0 0]
fprintf(fileID,'%s %s %s\n',nodes{1},'activates',nodes{2});
else
fprintf(fileID,'%s %s %s\n',nodes{1},'regulates',nodes{2});
end
else
for i = 1:numberedges
edge = edges(i);
nodes = strsplit(edge{1}, ' -> ');
if colors{i} == [1.0 0 0]
fprintf(fileID,'%s %s %s\n',nodes{1},'inhibits',nodes{2});
elseif colors{i} == [0 1.0 0]
fprintf(fileID,'%s %s %s\n',nodes{1},'activates',nodes{2});
else
fprintf(fileID,'%s %s %s\n',nodes{1},'regulates',nodes{2});
end
end
end
%if we didn't use timecourse, all edges are grey
%so just print without directionality
else
if numberedges == 1
nodes = strsplit(edges, ' -> ');
fprintf(fileID,'%s %s %s\n',nodes{1},'regulates',nodes{2});
else
for i = 1:numberedges
edge = edges(i);
nodes = strsplit(edge{1}, ' -> ');
fprintf(fileID,'%s %s %s\n',nodes{1},'regulates',nodes{2});
end
end
end
fclose(fileID);
end
|
github
|
rspurney/TuxNet-master
|
clustering.m
|
.m
|
TuxNet-master/TuxNet-MATLAB/RTP-STAR/clustering.m
| 4,325 |
utf_8
|
d772e98fea22725afa8377c495b3bd16
|
%Determine appropriate number of clusters for gene expression data
%Saves the clusters to a file
%
%Parameters:
%clustering_data: MATLAB table that contains
%clustering data, see below comments for proper formatting.
%
%clustering_type: variable denoting if you are using spatial or temporal
%clustering. Use "S" for spatial and "T" for temporal. Default is "S".
%
%symbol: MATLAB table that contains known symbols for some genes
%
%maxclusters: the maximum number of clusters to evaluate
%using Silhouette index. Default is floor(number of genes/10 + 5).
%
%filetowrite: the name of the file where you want to write results. Make
%sure to indicate the extension (file.txt, file.csv): otherwise, it
%defaults to a text file. Default name is clusters.csv
%
%clusteringseed: a seed that you can set for the clustering. Otherwise,
%the clustering is different every time. Default is no seed (different
%clustering every time).
%
%Assumes the excel file is formatted as:
%Rows are genes, columns are experiments
%Column 1: AGI numbers
%Columns 2 to X: mean gene expression data
%
%Returns:
%numclusters: the optimal number of clusters
%
%s: the silhouette values for each gene. A silhouette value close to 1
%indicates that gene is highly similar to all other genes in cluster
%
%Author:
%Natalie M. Clark
%Email: [email protected]
%Last updated: March 18, 2019
function [numclusters] = clustering(clustering_data,clustering_type,symbol, maxclusters,filetowrite,clusteringseed)
%check if parameters exist
%if not, set defaults
if ~exist('filetowrite', 'var') || isempty(filetowrite)
filetowrite = 'clusters.csv';
end
%if we are using a seed, set it
%otherwise, shuffle the seed
if exist('clusteringseed', 'var') && ~isempty(clusteringseed)
rng(clusteringseed);
else
rng('shuffle');
end
%if maxclusters was not set, define it as floor(numgenes/10 + 5)
numgenes = size(clustering_data,1);
if ~exist('maxclusters', 'var') || isempty(maxclusters)
maxclusters = floor(numgenes/10+5);
end
if floor(numgenes/10)<=5
minclusters = floor(numgenes/10);
else
minclusters = floor(numgenes/10-5);
end
if minclusters > maxclusters
minclusters = max([1, maxclusters-10]);
end
%set gene names
%if there is no gene name, use the AGI number
AGIs = table2cell(clustering_data(:,1));
symbols = table2cell(symbol(:,1:2));
genes = cell(length(AGIs),1);
isinfile = find(ismember(AGIs,symbols(:,1)));
notinfile = find(~ismember(AGIs,symbols(:,1)));
for i = 1:length(isinfile)
index = isinfile(i);
name_index = find(ismember(symbols(:,1),AGIs(index)));
genes(index) = symbols(name_index,2);
end
for i = 1:length(notinfile)
index = notinfile(i);
genes(index) = AGIs(index);
end
%convert table to matrix without gene names
matrix = table2array(clustering_data(:,2:size(clustering_data,2)));
%Normalization
M = mean(matrix,2); % % compute the mean of each expression pattern
S = std(matrix,0,2); % compute the variance of each expression pattern
M_mat = M*ones(1,size(matrix,2));
S_mat = S*ones(1,size(matrix,2));
clust_data_norm = (matrix-M_mat)./S_mat; %dividing by std gives us unit var (rather than std)
%if spatial, perform k-means
%if temporal, perform hierarchical clustering
if clustering_type == "S"
%determine optimal number of clusters based on silhouette index
eva = evalclusters(clust_data_norm,'kmeans','silhouette', 'Klist', minclusters:maxclusters);
%perform clustering and check silhouette index
%want an index close to 1 for all genes
numclusters = eva.OptimalK;
clust = kmeans(clust_data_norm,numclusters);
%this will display the silhouette figure
%[s,~] = silhouette(clust_data_norm,clust);
else
%determine optimal number of clusters based on silhouette index
eva = evalclusters(clust_data_norm,'linkage','silhouette', 'Klist', minclusters:maxclusters);
%perform clustering and check silhouette index
%want an index close to 1 for all genes
numclusters = eva.OptimalK;
link = linkage(clust_data_norm);
clust = cluster(link,'maxclust',numclusters);
end
%save clusters to file
clustering_data = table(AGIs,genes,clust);
writetable(clustering_data,filetowrite)
end
|
github
|
rspurney/TuxNet-master
|
init_mart.m
|
.m
|
TuxNet-master/TuxNet-MATLAB/RTP-STAR/GENIE3_MATLAB/RT/init_mart.m
| 455 |
utf_8
|
72ba513ec83e0baceb8d3368a14313a5
|
function rtensparam=init_mart(compl,mu)
rtensparam.nbterms=500;
rtensparam.mart=1;
if nargin>1
rtensparam.martmu=mu;
else
rtensparam.martmu=0.2;
end
rtensparam.bootstrap=0;
rtparam.nmin=1;
rtparam.varmin=0;
rtparam.savepred=1;
rtparam.bestfirst=1;
if nargin>0
rtparam.maxnbsplits=compl;
else
rtparam.maxnbsplits=5;
end
rtparam.extratrees=0;
rtparam.savepred=1;
rtensparam.rtparam=rtparam;
|
github
|
rspurney/TuxNet-master
|
cvpredict.m
|
.m
|
TuxNet-master/TuxNet-MATLAB/RTP-STAR/GENIE3_MATLAB/RT/cvpredict.m
| 1,271 |
utf_8
|
f74f77189c401edfc7728a3ead4408d4
|
function [YPRED]=cvpredict(X,Y,cvparam,bl_learn,bl_param,verbose)
% Test by cross-validation
% parametres:
% bl_learn: the learning function (e.g. 'rtenslearn_c')
% bl_param: the parameters (for example the output of init_extra_trees())
% cvparam: cross-validation parameters
% cvparam.nbfolds: number of folds
% cvparam.initseed: a random seed, to reproduce always the same splitting
% cvparam.verbosebl: 1 to display messages
N=size(X,1);
nbfolds=cvparam.nbfolds;
try
rand('twister',cvparam.initseed);
catch
end
try
cvparam.verbosebl;
catch
cvparam.verbosebl=0;
end
ro=randperm(N);
cvsize=round(N/nbfolds);
YPRED=zeros(N,size(Y,2));
for t=1:nbfolds
if (verbose)
fprintf('Fold %d...\n',t);
end
if (t==nbfolds) % on est au dernier, on prend tout ce qui reste
cvts=int32(ro(((t-1)*cvsize+1):N));
cvls=int32(ro(1:((t-1)*cvsize)));
else
cvts=int32(ro(((t-1)*cvsize+1):(t*cvsize)));
cvls=int32(ro([[1:((t-1)*cvsize)],[(t*cvsize+1):N]]));
end
if (verbose)
fprintf('Size LS=%d, TS=%d...\n',length(cvls),length(cvts));
end
YPRED(cvts,:)=feval(bl_learn,X,Y,cvls,[],bl_param,X(cvts,:),cvparam.verbosebl);
end
|
github
|
rspurney/TuxNet-master
|
rtenspred.m
|
.m
|
TuxNet-master/TuxNet-MATLAB/RTP-STAR/GENIE3_MATLAB/RT/rtenspred.m
| 1,795 |
utf_8
|
a259e929ae10723171d5a1fa6b8e9254
|
function [YTS]=rtenspred(treeensemble,XTSA)
% Make predictions with an ensemble of (multiple output) regression trees
% inputs:
% tree: a tree output by the function rtenslearn_c
% XTS: inputs for the test cases
% YLS: outputs for the learning sample cases
% Output:
% YTS: Predictions for the test cases
verbose=0;
global XTS;
global tree;
XTS=XTSA;
Nts=size(XTS,1);
T=length(treeensemble.trees);
YTS=0;
for t=1:T
if (verbose)
fprintf('t=%d\n',t);
end
tree=treeensemble.trees(t);
YTS=YTS+tree.weight*rtpred();
end
function [YTS]=rtpred()
% Test a multiple output regression tree
% inputs:
% tree: a tree output by the function rtenslearn_c
% YLS: outputs for the learning sample cases
% XTS: inputs for the test cases
% Output:
% YTS: output predictions for the test cases
global assignednodets
global XTS
global tree
Nts=size(XTS,1);
assignednodets=zeros(Nts,1);
verbose=0;
YTS=zeros(Nts,size(tree.predictions,2));
if (verbose)
fprintf('computation of indexes\n');
end
getleafts(1,1:Nts);
if (verbose)
fprintf('computation of predictions\n');
end
for i=1:Nts
YTS(i,:)=tree.predictions(tree.indexprediction(assignednodets(i)),:);
end
function getleafts(currentnode,currentrows)
global assignednodets
global XTS
global tree
testattribute=tree.testattribute(currentnode);
if testattribute==0 % a leaf
assignednodets(currentrows)=currentnode;
else
testthreshold=tree.testthreshold(currentnode);
leftind=(XTS(currentrows,testattribute)<testthreshold);
rightind=~leftind;
getleafts(tree.children(currentnode,1),currentrows(leftind));
getleafts(tree.children(currentnode,2),currentrows(rightind));
end
|
github
|
rspurney/TuxNet-master
|
init_bagging.m
|
.m
|
TuxNet-master/TuxNet-MATLAB/RTP-STAR/GENIE3_MATLAB/RT/init_bagging.m
| 250 |
utf_8
|
790cc604d5e748f4643ac219ac2fbb68
|
function rtensparam=init_bagging()
rtensparam.nbterms=100;
rtensparam.bootstrap=1;
rtparam.nmin=1;
rtparam.varmin=0;
rtparam.savepred=1;
rtparam.bestfirst=0;
rtparam.extratrees=0;
rtparam.savepred=1;
rtensparam.rtparam=rtparam;
|
github
|
rspurney/TuxNet-master
|
init_rf.m
|
.m
|
TuxNet-master/TuxNet-MATLAB/RTP-STAR/GENIE3_MATLAB/RT/init_rf.m
| 376 |
utf_8
|
ecff6eddea689a6452d990fc34d9fdfd
|
function rtensparam=init_rf(k)
rtensparam.nbterms=100;
rtensparam.bootstrap=1;
rtparam.nmin=2;
rtparam.varmin=0;
rtparam.savepred=1;
rtparam.bestfirst=0;
rtparam.rf=1;
rtparam.extratrees=0;
if nargin>0
rtparam.adjustdefaultk=0;
rtparam.extratreesk=k;
else
rtparam.adjustdefaultk=1;
end
rtparam.savepred=1;
rtensparam.rtparam=rtparam;
|
github
|
rspurney/TuxNet-master
|
init_extra_trees.m
|
.m
|
TuxNet-master/TuxNet-MATLAB/RTP-STAR/GENIE3_MATLAB/RT/init_extra_trees.m
| 369 |
utf_8
|
03de82598e002348504fb03b50b0e52b
|
function rtensparam=init_extra_trees(k)
rtensparam.nbterms=100;
rtensparam.bootstrap=0;
rtparam.nmin=1;
rtparam.varmin=0;
rtparam.savepred=1;
rtparam.bestfirst=0;
rtparam.extratrees=1;
if nargin>0
rtparam.adjustdefaultk=0;
rtparam.extratreesk=k;
else
rtparam.adjustdefaultk=1;
end
rtparam.savepred=1;
rtensparam.rtparam=rtparam;
|
github
|
rspurney/TuxNet-master
|
compute_rtens_variable_importance.m
|
.m
|
TuxNet-master/TuxNet-MATLAB/RTP-STAR/GENIE3_MATLAB/RT/compute_rtens_variable_importance.m
| 2,760 |
utf_8
|
b9bdf4e86da94f196544fdf6626b784f
|
function [VI]=compute_rtens_variable_importance(treeensemble,XTS,YTS)
% Compute variable importances from an ensemble of (multiple output)
% regression trees.
% inputs:
% treeensemble: a tree output by the function rtenslearn_c
% XTS: the input data on which to estimate the importances
% YTS: the output data on which to estimate the importance (XTS and YTS
% have the same number of rows)
% Output:
% VI: A matrix whose elements ij represents the importance of feature i for
% output j
Nts=size(XTS,1);
Natt=size(XTS,2);
Nout=size(YTS,2);
VI=zeros(Natt,Nout);
T=size(treeensemble.trees,1);
for t=1:T
VI=VI+compute_rt_variable_importance(treeensemble.trees(t),XTS,YTS);
fprintf('.');
end
fprintf('\n');
VI=VI/T;
function [VI]=compute_rt_variable_importance(tree,XTS,YTS)
Nts=size(XTS,1);
Natt=size(XTS,2);
Nout=size(YTS,2);
VI=zeros(Natt,Nout);
totalvar=zeros(tree.nodenumber,Nout);
totalweight=zeros(tree.nodenumber,1);
assignednode=ones(Nts,1);
opennodes = zeros(tree.nodenumber,1);
opennodes(1)=1;
indexopennodes=1;
nextunusednode=2;
totalvar(1,:)=Nts*var(YTS,1,1);
while(indexopennodes > 0)
currentnode=opennodes(indexopennodes);
noderows = find(assignednode==currentnode);
Nnode = length(noderows);
testattribute=tree.testattribute(currentnode);
if (testattribute==0 || Nnode==0) % une feuille, on ne fait rien
indexopennodes=indexopennodes-1;
else
% split the TS
x=XTS(noderows,tree.testattribute(currentnode));
leftind=XTS(noderows,tree.testattribute(currentnode))<tree.testthreshold(currentnode);
rightind=~leftind;
leftnode=tree.children(currentnode,1);
rightnode=tree.children(currentnode,2);
assignednode(noderows(leftind)) = leftnode;
assignednode(noderows(rightind)) = rightnode;
% compute left/right statistics
Nleft=sum(leftind);
Nright=sum(rightind);
totalvar(leftnode,:)=Nleft*var(YTS(noderows(leftind),:),1,1);
totalvar(rightnode,:)=Nright*var(YTS(noderows(rightind),:),1,1);
if (Nleft~=0 && Nright~=0)
% add the score to the variable that is tested:
TV=(totalvar(currentnode,:)-totalvar(leftnode,:)-totalvar(rightnode,:));
VI(testattribute,1:Nout)=VI(testattribute,1:Nout)+TV;
% put the nodes on the stack
end
indexopennodes=indexopennodes-1;
if (Nleft~=0)
indexopennodes=indexopennodes+1;
opennodes(indexopennodes)=leftnode;
end
if (Nright~=0)
indexopennodes=indexopennodes+1;
opennodes(indexopennodes)=rightnode;
end
end
end
|
github
|
rspurney/TuxNet-master
|
init_single_rt.m
|
.m
|
TuxNet-master/TuxNet-MATLAB/RTP-STAR/GENIE3_MATLAB/RT/init_single_rt.m
| 228 |
utf_8
|
90d7705e4633ed17fe3b8ec55eb02ffc
|
function rtensparam=init_single_rt()
rtensparam.nbterms=1;
rtensparam.bootstrap=0;
rtparam.nmin=1;
rtparam.varmin=0;
rtparam.savepred=1;
rtparam.bestfirst=0;
rtparam.extratrees=0;
rtensparam.rtparam=rtparam;
|
github
|
plantsgo/Metrics-master
|
quadraticWeightedKappa.m
|
.m
|
Metrics-master/MATLAB/metrics/quadraticWeightedKappa.m
| 1,841 |
utf_8
|
1f008d25efe57b152118d3985102f6ac
|
function score = quadraticWeightedKappa(actual, predicted, minRating, maxRating)
%QUADRATICWEIGHTEDKAPPA Calculates the quadratic weighted kappa
% scoreQuadraticWeightedKappa calculates the quadratic weighted kappa
% value, which is a measure of inter-rater agreement between two raters
% that provide discrete numeric ratings. Potential values range from -1
% (representing complete disagreement) to 1 (representing complete
% agreement). A kappa value of 0 is expected if all agreement is due to
% chance.
%
% scoreQuadraticWeightedKappa(X), where X is nRatings-by-2. The
% columns of X represent each of the two raters, and each row represents
% a sample that each rater scored.
%
% The values in X should be integers, and it is assumed that X contains
% the complete range of possible ratings. For example, if the rating
% scale varies from 0-3, then X must contain a 0 and a 3.
%
% scoreQuadraticWeightedKappa(X, minRating, maxRating), where minRating
% is the minimum possible rating, and maxRating is the maximum possible
% rating
%
% Author: Ben Hamner ([email protected])
X = [actual(:) predicted(:)];
if nargin==3
M = confusionMatrix(X, minRating, maxRating);
else
M = confusionMatrix(X);
end
[dx,dy] = meshgrid(1:size(M,1),1:size(M,2));
d = (dx-dy).^2 / range(dx(:)).^2;
E = sum(M,2)*sum(M,1);E = E / sum(M(:));
score = 1 -(sum(d(:).*M(:))/sum(M(:))) / (sum(d(:).*E(:))/sum(E(:)));
function M = confusionMatrix(X, minRating, maxRating)
%CONFUSIONMATRIX Calculates the confusion matrix between two raters
if nargin==3
u = minRating:maxRating;
else
u = min(X(:)):max(X(:));
end
nU = length(u);
M = zeros(nU);
for i=1:nU
for j=1:nU
M(i,j) = sum( (X(:,1)==u(i)) + (X(:,2)==u(j)) == 2);
end
end
|
github
|
plantsgo/Metrics-master
|
auc.m
|
.m
|
Metrics-master/MATLAB/metrics/auc.m
| 1,139 |
utf_8
|
4d20c857e7b3755f9d0c3cbfd41f2f87
|
function auc = auc(category,posterior)
% auc = scoreAUC(category,posterior)
%
% Calculates the area under the ROC for a given set
% of posterior predictions and labels. Currently limited to two classes.
%
% posterior: n*1 matrix of posterior probabilities for class 1
% category: n*1 matrix of categories {0,1}
% auc: Area under the curve
%
% Author: Ben Hamner ([email protected])
%
% Algorithm found in
% A Simple Generalisation of the Area Under the ROC
% Curve for Multiple Class Classification Problems
% David Hand and Robert Till
% http://www.springerlink.com/content/nn141j42838n7u21/fulltext.pdf
if exist('tiedrank')
r = tiedrank(posterior);
else
r = tiedrank_metrics(posterior);
end
auc = (sum(r(category==1)) - sum(category==1)*(sum(category==1)+1)/2) / ...
( sum(category<1)*sum(category==1));
function r = tiedrank_metrics(x)
[~,I] = sort(x);
r = 0*x;
cur_val = x(I(1));
last_pos = 1;
for i=1:length(I)
if cur_val ~= x(I(i))
r(I(last_pos:i-1)) = (last_pos+i-1)/2;
last_pos = i;
cur_val = x(I(i));
end
if i==length(I)
r(I(last_pos:i)) = (last_pos+i)/2;
end
end
|
github
|
plantsgo/Metrics-master
|
testRMSLE.m
|
.m
|
Metrics-master/MATLAB/metrics/test/testRMSLE.m
| 439 |
utf_8
|
e0b4e675710831cc277e5702221b1396
|
function testRMSLE()
%TESTRMSLE Test cases for mean squared log error
%
% Author: Ben Hamner ([email protected])
fprintf('Testing RMSLE ...');
test_case(exp(2)-1,exp(1)-1,1);
test_case([0 .5 1 1.5 2],[0 .5 1 1.5 2], 0);
test_case([1 2;3 exp(1)-1], [1 2;3 exp(2)-1], 0.5);
fprintf('tests passed\n');
function test_case(actual, prediction, expected_score)
score = rmsle(actual, prediction);
assert(abs(expected_score-score) < eps);
|
github
|
plantsgo/Metrics-master
|
testMSLE.m
|
.m
|
Metrics-master/MATLAB/metrics/test/testMSLE.m
| 436 |
utf_8
|
02b54a49a8de0b8602a20259c5235838
|
function testMSLE()
%TESTMSLE Test cases for mean squared log error
%
% Author: Ben Hamner ([email protected])
fprintf('Testing MSLE ...');
test_case(exp(2)-1,exp(1)-1,1);
test_case([0 .5 1 1.5 2],[0 .5 1 1.5 2], 0);
test_case([1 2;3 exp(1)-1], [1 2;3 exp(2)-1], 0.25);
fprintf('tests passed\n');
function test_case(actual, prediction, expected_score)
score = msle(actual, prediction);
assert(abs(expected_score-score) < eps);
|
github
|
plantsgo/Metrics-master
|
testGini.m
|
.m
|
Metrics-master/MATLAB/metrics/test/testGini.m
| 735 |
utf_8
|
1cc0a13705cddc1e7944b4300c978afb
|
function testGini()
%TESTGINI Test cases for Gini
%
% Author: Ben Hamner ([email protected])
fprintf('Testing gini ...');
test_case(1:3, [10 20 30], 1/9);
test_case(1:3, [30 20 10], -1/9);
test_case([2,1,4,3], [0,0,2,1], 0.125);
test_case([0,20,40,0,10], [40,40,10,5,5], 0);
test_case([40,0,20,0,10], [1000000 40 40 5 5], 0.17142857);
test_case([40 20 10 0 0], [40 20 10 0 0], 0.28571429);
test_case([1 1 0 1], [0.86 0.26 0.52 0.32], -0.04166667);
fprintf('tests passed\n');
function test_case(actual, prediction, expected_score)
score = gini(actual, prediction);
if abs(expected_score-score) >= 1e-8
fprintf('\nExpected: %0.12f Predicted: %0.12f\n', expected_score, score);
end
assert(abs(expected_score-score) < 1e-8);
|
github
|
plantsgo/Metrics-master
|
testNormalizedGini.m
|
.m
|
Metrics-master/MATLAB/metrics/test/testNormalizedGini.m
| 747 |
utf_8
|
15cb844c475bdb87cf1ebfd97c4c5748
|
function testNormalizedGini()
%TESTNORMALIZEDGINI Test cases for Normalized Gini
%
% Author: Ben Hamner ([email protected])
fprintf('Testing gini ...');
test_case(1:3, [10 20 30], 1);
test_case(1:3, [30 20 10], -1);
test_case([2,1,4,3], [0,0,2,1], 1);
test_case([0,20,40,0,10], [40,40,10,5,5], 0);
test_case([40,0,20,0,10], [1000000 40 40 5 5], 0.6);
test_case([40 20 10 0 0], [40 20 10 0 0], 1.0);
test_case([1 1 0 1], [0.86 0.26 0.52 0.32], -1/3);
fprintf('tests passed\n');
function test_case(actual, prediction, expected_score)
score = normalizedGini(actual, prediction);
if abs(expected_score-score) >= 1e-8
fprintf('\nExpected: %0.12f Predicted: %0.12f\n', expected_score, score);
end
assert(abs(expected_score-score) < 1e-8);
|
github
|
plantsgo/Metrics-master
|
testAveragePrecisionAtK.m
|
.m
|
Metrics-master/MATLAB/metrics/test/testAveragePrecisionAtK.m
| 686 |
utf_8
|
e58b7d28b8ce1d44d7904468ec0b3008
|
function testAveragePrecisionAtK()
%TESTAVERAGEPRECISIONATK Test cases for AP@K
%
% Author: Ben Hamner ([email protected])
fprintf('Testing averagePrecisionAtK ...');
actual = 1:5;
prediction = 1:10;
score = averagePrecisionAtK(actual, prediction);
assert(abs(1-score) < eps);
test_case(1:5, [6 4 7 1 2], 2, 0.25);
test_case(1:5, [1 1 1 1 1], 5, 0.2);
test_case(1:100, [1:20 200:600], 20, 1);
test_case([1 3], 1:5, 3, 5/6);
test_case([1 2 3], [1 1 1], 3, 1/3);
test_case([1 2 3], [1 2 1], 3, 2/3);
fprintf('tests passed\n');
function test_case(actual, prediction, k, expected_score)
score = averagePrecisionAtK(actual, prediction, k);
assert(abs(expected_score-score) < eps);
|
github
|
plantsgo/Metrics-master
|
testMeanAveragePrecisionAtK.m
|
.m
|
Metrics-master/MATLAB/metrics/test/testMeanAveragePrecisionAtK.m
| 742 |
utf_8
|
e5166acb03fe3dbe8e0b561cd9a98bbe
|
function testMeanAveragePrecisionAtK()
%TESTMEANAVERAGEPRECISIONATK Test cases for MAP@K
%
% Author: Ben Hamner ([email protected])
fprintf('Testing meanAveragePrecisionAtK ...');
actual = {(1:5) [1 2 3]};
prediction = {(1:10) [1 2 4:11 3]};
score = meanAveragePrecisionAtK(actual, prediction);
assert(abs(5/6-score) < eps);
test_case({1:4}, {1:4}, 3, 1.0);
test_case({[1 3 4] [1 2 4] [1 3]}, {1:5 1:5 1:5}, 3, 0.685185185185185);
test_case({1:5 1:5}, {[6 4 7 1 2] [1 1 1 1 1]}, 5, 0.26);
test_case({[1 3] 1:3 1:3}, {1:5 [1 1 1] [1 2 1]}, 3, 11/18);
fprintf('tests passed\n');
function test_case(actual, prediction, k, expected_score)
score = meanAveragePrecisionAtK(actual, prediction, k);
assert(abs(expected_score-score) < eps);
|
github
|
plantsgo/Metrics-master
|
testLogLoss.m
|
.m
|
Metrics-master/MATLAB/metrics/test/testLogLoss.m
| 479 |
utf_8
|
9dac22b7fa7cb76b1bc75193533f31d5
|
function testLogLoss()
%TESTLOGLOSS Test cases for logLoss
%
% Author: Ben Hamner ([email protected])
fprintf('Testing logLoss ...');
test_case([1 1 1 0 0 0], [.5 .1 .01 .9 .75 .001], 1.881797068998267);
test_case([1 1 1 0 0 0], [1 1 1 0 0 0], 0);
score = logLoss([1 1 0 0], [1 0 0 0]);
assert(score == Inf);
fprintf('tests passed\n');
function test_case(actual, prediction, expected_score)
score = logLoss(actual, prediction);
assert(abs(expected_score-score) < eps);
|
github
|
plantsgo/Metrics-master
|
testClassificationError.m
|
.m
|
Metrics-master/MATLAB/metrics/test/testClassificationError.m
| 610 |
utf_8
|
56bb56a872843828c64ef8bdf58e2395
|
function testClassificationError()
%TESTCLASSIFICATIONERROR Test cases for classificationError
%
% Author: Ben Hamner ([email protected])
fprintf('Testing classificationError ...');
test_case([1 1 1 0 0 0], [1 1 1 0 0 0], 0);
test_case([1 1 1 0 0 0], [1 1 1 1 0 0], 1/6);
test_case([1 2;3 4], [1 2;3 3], 1/4);
test_case({'cat' 'dog' 'bird'}, {'cat' 'dog' 'fish'}, 1/3);
test_case({'cat' 'dog' 'bird'}, {'caat' 'doog' 'afish'}, 1);
fprintf('tests passed\n');
function test_case(actual, predicted, expected_score)
score = classificationError(actual, predicted);
assert(abs(expected_score-score) < eps);
|
github
|
marthawhite/reverse-prediction-master
|
RegressionSemi.m
|
.m
|
reverse-prediction-master/algs/RegressionSemi.m
| 6,321 |
utf_8
|
8807200d21eadf0e71d1da2414a2ce78
|
function [Z, W, U, flag] = RegressionSemi(Xl, Yl, Xu, opts)
% REGRESSION_SEMI solves the alternating reverse prediction problem
% approach for the general unconstrained semisupervised setting:
%
% min_{Z,U} loss_fcn(Xl, Yl*U)/tl + mu*loss_fcn(Xu,ZU)/tu + beta*tr(UU^T)
%
%
%% Inputs:
% Xl: labeled input data
% Yl: labeled targets
% Xu: unlabeled data; if empty, then performs supervised reverse regression
% opts: options to optimization e.g. transfer function.
% See DEFAULT below for all possible options and their
% default values.
%
% author: Martha White, University of Alberta, 2012
if nargin < 3
error('RegressionSemi requires at least Xl, Yl and Xu.');
end
DEFAULTS.mu = 0.1;
DEFAULTS.beta = 0.1; % Regularization parameter; if 0, no regularization
DEFAULTS.lambda = 0; % Instance weights; if lambda = 0, no instance weighting
% else, mu is overriden. If lambda = -1, then do norm cut
% weighting else used given instance weights
DEFAULTS.kernel = @kernel_noop;
DEFAULTS.transfer = 'Euclidean';
DEFAULTS.regularizer = @regularizer; % no kernel regularizer in backwards U, because kernel not used on Y
DEFAULTS.TOL = 1e-4;
DEFAULTS.maxiter = 500;
DEFAULTS.maxtime = 500;
DEFAULTS.numrestarts = 3;
DEFAULTS.epsilon = 1e-5;
DEFAULTS.lbfgs_params = struct('TOL', 1e-5, 'maxiter', 200);
%DEFAULTS.optimizer = @(fcn,xinit,params)(fminunc(fcn,xinit,optimset('GradObj','on', 'MaxFunEvals',10000)));
DEFAULTS.optimizer = @(fcn,xinit,params)(fmin_LBFGS(fcn,xinit,params));
DEFAULTS.verbose = 0; % 0 or 1: nothing, 2: print out optimization statements
DEFAULTS.compute_lots = 0; % if 1, then increases maxtime and maxiter
if nargin < 4
opts = DEFAULTS;
else
opts = getOptions(opts, DEFAULTS);
end
if opts.compute_lots
opts.maxtime = 600;
opts.numrestarts = 6;
end
% Obtain loss functions and potential for given transfer
[forloss,revloss,D,f,f_inv] = getLoss(opts.transfer);
if opts.verbose > 1, fprintf(1,'\nRegressionSemi-%s -> Starting...\n\n', opts.transfer); end
tl = size(Xl,1);
tu = size(Xu,1);
k = size(Yl,2);
X = [Xl;Xu];
U = [];
Z = [];
if ~isempty(Xu)
% Initialize Z with labeled data
[Z,W,U,flag] = Regression(Xl,Yl,[],opts);
Z = f(opts.kernel(Xu,Xl)*W);
Xu = opts.kernel(Xu,X);
end
Xl = opts.kernel(Xl,X);
X = opts.kernel(X,X);
n = size(X,2);
if isempty(U)
%U = randn(k, n);
U = pinv(Yl)*f(Xl);
end
fval_prev = loss_U(U(:));
starttime = cputime;
anealtimes = 0;
maxaneals = 3;
fvals = [];
Zvals = [];
for iter = 1:opts.maxiter
% Optimize U first
[Uvec,fval,flag] = opts.optimizer(@loss_U,U(:), opts.lbfgs_params);
U = reshape(Uvec,[k n]);
if flag
warning(['Regression_Semi-%s - > Optimization for U returned with' ...
'flag %g\n'], opts.transfer, flag);
end
% Optimize Z second
if ~isempty(Xu)
[Zvec,fval,flag] = opts.optimizer(@loss_Z,Z(:),opts.lbfgs_params);
Z = reshape(Zvec,[tu k]);
if flag
warning('Optimization for Z returned with flag %g and for transfer %s\n', flag, opts.transfer);
end
end
fval = loss_U(U(:));
if (fval_prev < fval)
warning(['Regression_Semi-%s -> Alternation increased function' ...
' value from %g to %g\n'], opts.transfer, fval_prev,fval);
end
timeout = (cputime-starttime > opts.maxtime);
if (abs(fval_prev-fval) < opts.TOL || timeout)
fvals = [fvals fval];
Zvals = [Zvals {Z}];
if anealtimes > maxaneals || timeout
[m,ind] = min(fvals);
Z = Zvals{ind};
fval = fvals(ind);
if timeout
fprintf(1,'RegressionSemi-%s -> Hit MAXTIME = %g\n', opts.transfer, opts.maxtime);
flag = -1;
end
break;
else
% perturb the current solution
Z = Z + randn(size(Z));
anealtimes = anealtimes + 1;
end
end
fval_prev = fval;
% Print out progress
if opts.verbose > 1 && mod(iter,ceil(opts.maxiter/100.0)) == 0, fprintf(1,'%g,', cputime-starttime); end
end
if opts.verbose > 1, fprintf(1,'\n\n'); end
if (iter == opts.maxiter)
warning('Regression_Semi-%s -> Optimization reached maxiter = %u', opts.transfer, opts.maxiter);
end
% Obtain forward model W
if nargout > 1
[~,W,~] = Regression(X,[Yl;Z],[],opts);
end
% Loss fcn for Z
function [f,g,constraint_opts] = loss_Z(Z)
Zmat = reshape(Z,[tu k]);
if nargout < 2
f = revloss(Xu,Zmat,U,1);
elseif nargout < 3 || nargout(revloss) == 2
[f,g] = revloss(Xu,Zmat,U,1);
g = g(:);
constraint_opts = [];
else
[f,g,constraint_opts] = revloss(Xu,Zmat,U,1);
g = g(:);
end
end
% Loss fcn for U
function [f,g,constraint_opts] = loss_U(U)
Umat = reshape(U,[k n]);
if nargout < 2
f1 = revloss(Xl,Yl,Umat,2);
f2 = revloss(Xu,Z,Umat,2);
f3 = opts.regularizer(Umat);
elseif nargout < 3 || nargout(revloss) == 2
[f1,g1] = revloss(Xl,Yl,Umat,2);
[f2,g2] = revloss(Xu,Z,Umat,2);
[f3,g3] = opts.regularizer(Umat);
g = g1+opts.mu*g2 + opts.beta*g3;
g = g(:);
constraint_opts = [];
else
[f1,g1,constraint_opts1] = revloss(Xl,Yl,Umat,2);
[f2,g2,constraint_opts2] = revloss(Xu,Z,Umat,2);
[f3,g3] = opts.regularizer(Umat);
g = g1+opts.mu*g2 + opts.beta*g3;
g = g(:);
constraint_opts = [];
if isfield(constraint_opts1, 'A')
constraint_opts.A = [constraint_opts1.A; constraint_opts2.A];
constraint_opts.b = [constraint_opts1.b; constraint_opts2.b];
end
if isfield(constraint_opts1, 'Aeq')
constraint_opts.A = [constraint_opts1.Aeq; constraint_opts2.Aeq];
constraint_opts.b = [constraint_opts1.beq; constraint_opts2.beq];
end
if isfield(constraint_opts1, 'Apd')
constraint_opts.A = [constraint_opts1.Apd; constraint_opts2.Apd];
constraint_opts.b = [constraint_opts1.bpd; constraint_opts2.bpd];
end
end
f = f1+opts.mu*f2 + opts.beta*f3;
end
function [f,g] = regularizer(Umat)
f = trace(Umat*Umat')/2;
g = Umat;
end
end
|
github
|
marthawhite/reverse-prediction-master
|
recoverForwardModelSemi.m
|
.m
|
reverse-prediction-master/algs/recoverForwardModelSemi.m
| 1,273 |
utf_8
|
24ad08a30c0191cb7268c0a5191a4ad4
|
function [W,U,Y,Xhat] = recoverForwardModelSemi(X,f,f_inv,CA,Lfor,tl,kernel)
% CA is the clustering algorithm
% Lfor is the loss function
% if kernel provided, then learn forward model on kernel
t = size(X,1);
n = size(X,2);
% Step 1: Compute Y and M
[Y,M] = CA();
k = size(Y,2);
%Y = roundY(Y);
%[match, P] = align(Ytrain,Y);
%Y = roundY(Y*P);
% Step 2: Get reverse model U
U = f(M);
Xhat = f_inv(Y*U);
% Step 3: Solve for forward model
% Use regularization
% IGNORE FORWWARD MODEL FOR NOW
W = [];
%beta = 1e-5;
%if exist('kernel','var') && ~isempty(kernel)
% K = kernel(X,X);
% Wsize = [t,k];
% [W, obj] = fmin_LBFGS(@vecloss_kernel,randn(Wsize(1)*Wsize(2),1));
%else
% Wsize = [n,k];
% [W, obj] = fmin_LBFGS(@vecloss,randn(Wsize(1)*Wsize(2),1));
%end
%W = unvec(W,k);
% return Y only for unlabeled data
Y = roundY(Y);
Y = Y((tl+1):end,:);
function [f,g] = vecloss(W)
denom = t;
[f1,g1] = Lfor(X,unvec(W,k),Y);
f2 = beta*(W'*W)/(2*denom);
g2 = beta*W/denom;
f = f1+f2;
g = g1(:) + g2;
end
function [f,g] = vecloss_kernel(W)
Wmat = unvec(W,k);
denom = t;
[f1,g1] = Lfor(K,Wmat,Y);
f2 = beta*trace(Wmat*Wmat'*K)/(2*denom);
g2 = beta*K*Wmat/denom;
f = f1+f2;
g = g1 + g2;
g = g(:);
end
end
|
github
|
marthawhite/reverse-prediction-master
|
Regression.m
|
.m
|
reverse-prediction-master/algs/Regression.m
| 3,928 |
utf_8
|
1136384940bc98326fdc31f7ee28390e
|
function [Z, W, U, flag] = Regression(Xl, Yl, Xu, opts)
% REGRESSION solves the labelled reverse prediction problem
% approach for constrained supervised prediction
%
% min_{U} loss_fcn(Xl, Yl*U)/tl + beta*tr(UU^T)
% Z = f(Xu*W)
%
% Note: Currently does not return a reverse model, U, returns U = [].
%
%% Inputs:
% Xl: labeled input data
% Yl: labeled targets
% Xu: unlabeled data
% opts: options to optimization e.g. transfer function.
% See DEFAULT below for all possible options and their
% default values.
%
% author: Martha White, University of Alberta, 2012
if nargin < 3
error('Regression requires at least Xl, Yl and Xu.');
end
DEFAULTS.beta = 0.1; % Regularization parameter; if zero, no regularization
DEFAULTS.lambda = 0; % Instance weights; if lambda = 0, no instance weighting
% else, mu is overriden. If lambda = -1, then do norm cut
% weighting else used given instance weights
DEFAULTS.kernel = @kernel_noop;
DEFAULTS.transfer = 'Euclidean';
DEFAULTS.regularizer = @regularizer;
DEFAULTS.TOL = 1e-8;
DEFAULTS.MAX_ITERS = 1000;
DEFAULTS.MAX_TIME = 500;
DEFAULTS.epsilon = 1e-5;
DEFAULTS.lbfgs_params = struct('TOL', 1e-8, 'maxiter', 100);
DEFAULTS.optimizer = @(fcn,xinit,lbfgs_opts)(fmin_LBFGS(fcn,xinit,lbfgs_opts));
%DEFAULTS.optimizer = @(fcn,xinit,params)(fminunc(fcn,xinit,optimset('GradObj','on', 'MaxFunEvals',10000)));
DEFAULTS.verbose = 0; % 0 or 1: nothing, 2: print out optimization statements
if nargin < 4
opts = DEFAULTS;
else
opts = getOptions(opts, DEFAULTS);
end
tl = size(Xl,1);
k = size(Yl,2);
% Obtain loss functions and potential for given transfer
[forloss,revloss,D,f,f_inv] = getLoss(opts.transfer);
X = Xl;
Xl = opts.kernel(Xl,X);
n = size(Xl,2);
% If have actual kernel, make it the kernel regularizer
%if ~isequal(opts.kernel,@kernel_noop)
% opts.regularizer = @kernel_regularizer;
%end
% Obtain forward model W
% Initilaize randomly
%W = rand(n,k);
% Initialize with approximation XW = f^{-1}(Y) gives W = pinv(X)f^{-1}(Y)
W = pinv(Xl)*f_inv(Yl);
if nargout(forloss) > 2
[Wvec,fval,flag] = opts.optimizer(@loss_W_constrained,W(:),opts.lbfgs_params);
else
[Wvec,fval,flag] = opts.optimizer(@loss_W,W(:),opts.lbfgs_params);
end
W = reshape(Wvec,[n k]);
if flag
warning('Regression -> Optimization for W returned with flag %g with transfer %s', flag, opts.transfer);
end
% Usually do not need U for supervised case; Xu might also sometimes be empty
Z = []; U = [];
if ~isempty(Xu)
Xu = opts.kernel(Xu,X);
Z = f(Xu*W);
end
% Loss fcn for W
function [f,g] = loss_W(W)
Wmat = reshape(W,[n k]);
if nargout > 1
[f,g] = forloss(Xl,Wmat,Yl);
[f,g] = addRegularizerBoth(Wmat,f,g);
else
f = forloss(Xl,Wmat,Yl);
f = addRegularizerf(Wmat,f);
end
end
function [f,g,constraint_opts] = loss_W_constrained(W)
Wmat = reshape(W,[n k]);
if nargout >= 3
[f,g,constraint_opts] = forloss(Xl,Wmat,Yl);
[f,g] = addRegularizerBoth(Wmat,f,g);
elseif nargout > 1
[f,g] = forloss(Xl,Wmat,Yl);
[f,g] = addRegularizerBoth(Wmat,f,g);
else
f = forloss(Xl,Wmat,Yl);
f = addRegularizerf(Wmat,f);
end
end
% Adds regularizers and linearizes gradient
function [f,g] = addRegularizerBoth(Wmat,f,g)
if opts.beta ~= 0
[f2,g2] = opts.regularizer(Wmat);
f = f+opts.beta*f2;
g = g+opts.beta*g2;
end
g = g(:);
end
function f = addRegularizerf(Wmat,f)
if opts.beta ~= 0
f2 = opts.regularizer(Wmat);
f = f+opts.beta*f2;
end
end
function [f,g] = regularizer(Wmat)
f = trace(Wmat*Wmat')/2;
if nargout > 1
g = Wmat;
end
end
function [f,g] = kernel_regularizer(Wmat)
f = trace(Wmat*Wmat'*Xl')/2;
if nargout > 1
g = Xl*Wmat;
end
end
end
|
github
|
marthawhite/reverse-prediction-master
|
trg.m
|
.m
|
reverse-prediction-master/algs/competitors/trg.m
| 2,691 |
utf_8
|
42939ede8840f01326383d5522f0b27d
|
function [Z,flag] = trg(Xl, Yl, Xu, opts)
% TRG implements Cortes' transductive regression algorithm
% phi is the feature vector on training examples X_m and for
% the testing examples (unlabeled) Xu
% [Optional] If K is not provided or K==0, does primal solution
% Note that a model W is produced, but it is only applicable to
% transductively label Xu, so it is not returned.
%
% author: Martha White, University of Alberta, 2012
DEFAULTS.kernel = @kernel_noop; % No kernel by default
DEFAULTS.C1 = 5;
DEFAULTS.C2 = 5;
DEFAULTS.lambda = 1e-3;
if nargin < 4
opts = DEFAULTS;
else
opts = getOptions(opts, DEFAULTS);
end
flag = 0;
K = opts.kernel;
% Compute r using approach from TRG paper
tl = size(Yl,1);
n = size(Xl,2);
distances = Xu*[Xl;Xu]';
num_min = min(max(ceil(tl*3/100), 5), tl);
[y,i] = sort(abs(distances));
r = max(max(i(:, num_min)),1);
%r = 10;
Yu = local_krg(Xl, Yl, Xu, K, r);
% If no kernel function K, do primal solution
if isempty(K) || isequal(K, @kernel_noop)
W = get_primal_w(Xl, Yl, Xu, Yu);
else
W = get_dual_w(Xl, Yl, Xu, Yu);
end
U = [];
Z = Xu*W;
if (isnan(W))
fprintf(1, 'trg -> Values too large when multiplying matrices. Solution was a Nan\n');
end
%----------------------------------------------------------------
% Transductive regression function: dual and primal
function W = get_primal_w(Xl, Yl, Xu, Yu)
C_m = opts.C1;
C_u = opts.C2;
N = size(Xl, 2);
W = inv(eye(N) + C_m*Xl'*Xl + C_u*Xu'*Xu)*(C_m*Xl'*Yl + C_u*Xu'*Yu);
end
function W = get_dual_w(Xl, Yl, Xu, Yu)
C_m = opts.C1;
C_u = opts.C2;
m = size(Xl, 1);
u = size(Xu, 1);
M_x = [sqrt(C_m)*Xl' sqrt(C_u)*Xu'];
M_y = [sqrt(C_m)*Yl; sqrt(C_u)*Yu];
K_matrix = M_x'*M_x;
W = M_x*inv(opts.lambda*eye(m+u, m+u) + K_matrix)*M_y;
end
%----------------------------------------------------------------
% Functions for computing local estimates. Most used is local_krg
function Yu = local_krg(Xl, Yl, Xu, K, r)
u = size(Xu, 1);
Yu = zeros(u, size(Yl, 2));
for i = 1:u
if isempty(K) || isequal(K, @kernel_noop)
indices = (sqrt(sum((repmat(Xu(i, :),tl, 1) - Xl).^2, 2)) <= r);
W = krg(Xl(indices, :), Yl(indices, :), [], opts.lambda);
Yu(i, :) = Xu(i, :)*W;
else
indices = (feval(K, Xu(i, :), Xl) <= r);
W = krg(Xl(indices, :), Yl(indices, :), K, opts.lambda);
Yu(i, :) = feval(K, Xu(i, :), Xl(indices, :))*W;
end
end
end
function W = krg(Xl, Yl, K, lambda)
%KRG does (kernel) ridge regression
if isempty(K) || isequal(K, @kernel_noop)
W = (opts.lambda*eye(size(Xl,2)) + Xl'*Xl) \ (Xl'*Yl);
else
K_matrix = feval(K, Xl, Xl);
W = (K_matrix + opts.lambda*eye(size(Xl,1))) \ Yl;
end
end
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.