plateform
stringclasses 1
value | repo_name
stringlengths 13
113
| name
stringlengths 3
74
| ext
stringclasses 1
value | path
stringlengths 12
229
| size
int64 23
843k
| source_encoding
stringclasses 9
values | md5
stringlengths 32
32
| text
stringlengths 23
843k
|
---|---|---|---|---|---|---|---|---|
github
|
Liusifei/Face-Hallucination-master
|
F16a_ACCV12TextureGradientNotIntensity.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F16a_ACCV12TextureGradientNotIntensity.m
| 48,117 |
utf_8
|
fc71ab06a0f444e4b47f80161a3e11c3
|
%Chih-Yuan Yang
%09/19/12
%load sfall, srecall, allHRexampleimages before this function
function gradients_merge = F16a_ACCV12TextureGradientNotIntensity(img_y, zooming, Gau_sigma ,sfall,srecall,allHRexampleimages)
if zooming == 4
para.Gau_sigma = 1.6;
elseif zooming == 3
para.Gau_sigma = 1.2;
end
%change here, return gradient rather than intensity
%[img_edge reliablemap_edge] = IF1_EdgePreserving(img_y,para,zooming,Gau_sigma);
[gradients_edge weightmap_edge] = IF1a_EdgePreserving(img_y,para,zooming,Gau_sigma);
[h_lr w_lr] = size(img_y);
para.lh = h_lr;
para.lw = w_lr;
para.NumberOfHCandidate = 10;
para.SimilarityFunctionSettingNumber = 1;
%load all data set to save loading time
[scanr scanra] = SearchExternalPatches(img_y,para,sfall,srecall);
para.zooming = zooming;
para.ps = 5;
para.Gau_sigma = Gau_sigma;
hrpatch = F8_ExtractAllHrPatches(img_y, para, scanr,allHRexampleimages);
[scanr_self scanra_self] = F22_SearchForSelfSimilarPatchesL2Norm(img_y,para);
para.ehrfKernelWidth = 1.0;
para.bEnablemhrf = true;
[img_texture weightmap_texture] = F11_FilterOutImproperHrPatches(img_y,hrpatch,para,scanr_self,scanra_self,scanr,scanra);
%apply backprojection on img_texture only
iternum = 10;
% img_texture_backproject = F11_BackProjection(img_y, img_texture, Gau_sigma, iternum);
breport = true;
disp('backprojection for img_texture in ACCV12');
img_texture_backproject = IF3_BackProjection(img_y, img_texture, Gau_sigma, iternum,breport);
%extract the graident
gradients_texture = Img2Grad(img_texture_backproject);
gradients_merge = gradients_texture .* repmat(weightmap_texture,[1,1,8]) + gradients_edge .* repmat(weightmap_edge,[1,1,8]);
%debug, generate the intensity
%img_initial = imresize(img_y,zooming);
%bReport = true;
%img_merge = GenerateIntensityFromGradient(img_y,img_initial,gradients_merge,para,zooming,Gau_sigma,bReport);
%keyboard
%nomi = img_texture_backproject.*weightmap_texture + img_edge .* weightmap_edge;
%denomi = reliablemap_edge + weightmap_texture;
%img_hr = nomi ./ denomi;
%there are some 0 value of denomi around boundary
%fill these pixels as img_edge
%nanpixels = isnan(img_hr);
%img_hr(nanpixels) = img_edge(nanpixels);
%ensure there is no nan
%if nnz(isnan(img_hr))
% error('should not be here');
%end
end
function [scanr scanra] = SearchExternalPatches(img_y,para,sfall,srecall)
%how to search parallelly to speed up?
ps = 5; %patch size
[lh lw] = size(img_y);
hrpatchnumber = 10;
%featurefolder = para.featurefolder;
sh = GetShGeneral(ps);
scanr = zeros(6,hrpatchnumber,lh-ps+1,lw-ps+1); %scan results, mm, quan, ii, sr, sc, similairty
smallvalue = -1;
scanr(6,:,:,:) = smallvalue;
scanra = zeros(lh-ps+1,lw-ps+1); %scan results active
%scanrsimmax = smallvalue * ones(lh-ps+1,lw-ps+1); %del this line?
quanarray = [1 2 4 8 16 32];
B = [256 128 64 32 16 8];
imlyi = im2uint8(img_y);
for qidx=1:6
quan = quanarray(qidx);
b = B(qidx);
cur_initial = floor(size(sfall{1},2)/2); %accelerate the loop by using an initial position
for rl=1:lh-ps+1
fprintf('look for lut rl:%d quan:%d\n',rl,quan);
for cl = 1:lw-ps+1
patch = imlyi(rl:rl+ps-1,cl:cl+ps-1);
fq = patch(sh);
if qidx == 1
fquan = fq;
else
fquan = fq - mod(fq,quan) + quan/2;
end
[iila mma] = LookForLookUpTable9_External(fquan,sfall{qidx},cur_initial,para); %index in lookuptable
in = length(iila); %always return 20 instance
for i=1:in
ii = srecall{qidx}(1,iila(i));
sr = srecall{qidx}(2,iila(i));
sc = srecall{qidx}(3,iila(i));
%check whether the patch is in the scanr already
bSamePatch = false;
for j=1:scanra(rl,cl)
if ii == scanr(3,j,rl,cl) && sr == scanr(4,j,rl,cl) && sc == scanr(5,j,rl,cl)
bSamePatch = true;
break
end
end
if bSamePatch == false
similarity = bmm2similarity(b,mma(i),para.SimilarityFunctionSettingNumber);
if scanra(rl,cl) < hrpatchnumber
ix = scanra(rl,cl) + 1;
%to do: update scanr by similarity
%need to double it, otherwise, the int6 will kill similarity
scanr(:,ix,rl,cl) = cat(1,mma(i),quan,double(ii),double(sr),double(sc),similarity);
scanra(rl,cl) = ix;
else
[minval ix] = min(scanr(6,:,rl,cl));
if scanr(6,ix,rl,cl) < similarity
%update
scanr(:,ix,rl,cl) = cat(1,mma(i),quan,double(ii),double(sr),double(sc),similarity);
end
end
end
end
end
end
end
end
function [iila mma] = LookForLookUpTable9_External(fq,lut,cur_initial,para)
hrpatchnumber = para.NumberOfHCandidate; %default 10
fl = length(fq); %feature length
head = 1;
tail = size(lut,2);
lutsize = size(lut,2);
if exist('cur_initial','var')
if cur_initial > lutsize
cur = lutsize;
else
cur = cur_initial;
end
else
cur = round(lutsize/2);
end
cur_rec1 = cur;
%initial comparison
fqsmaller = -1;
fqlarger = 1;
fqsame = 0;
cr = 0; %compare results
mm = 0;
mmiil = 0;
%search for the largest mm
while 1
for c=1:fl
if fq(c) < lut(c,cur)
cr = fqsmaller;
break
elseif fq(c) > lut(c,cur)
cr = fqlarger;
break; %c moves to next
else %equal
cr = fqsame;
if mm < c
mm = c;
mmiil = cur;
end
end
end
if cr == fqsmaller
next = floor((cur + head)/2);
tail = cur; %adjust the range of head and tail
elseif cr == fqlarger;
next = ceil((cur + tail)/2); %the round function has to be floor, because fq is larger than cur
%otherwise the fully 255 patches will never match
head = cur; %adjust the range of head and tail
end
if mm == 25 %it happens, the initial one match the fq, therefore, there is no next defined.
break
end
if cur == next || cur_rec1 == next %the next might oscilate
break;
else
cur_rec1 = cur;
cur = next;
end
%fprintf('cur %d\n',cur);
end
if mm == 0
iila = [];
mma = [];
return
end
%post-process to find the repeated partial vectors
%search for previous
idx = 1;
iila = zeros(hrpatchnumber,1);
mma = zeros(hrpatchnumber,1);
iila(idx) = mmiil;
mma(idx) = mm;
bprecontinue = true;
bproccontinue = true;
presh = 0; %previous shift
procsh = 0; %proceeding shift
while 1
presh = presh -1;
iilpre = mmiil + presh;
if iilpre <1
bprecontinue = false;
premm = 0;
end
procsh = procsh +1;
iilproc = mmiil + procsh;
if iilproc > lutsize
bproccontinue = false;
procmm = 0;
end
if bprecontinue
diff = lut(:,iilpre) ~= fq;
if nnz(diff) == 0
premm = 25;
else
premm = find(diff,1,'first') -1;
end
end
if bproccontinue
diff = lut(:,iilproc) ~= fq;
if nnz(diff) == 0
procmm = 25;
else
procmm = find(diff,1,'first') -1;
end
end
if premm == 0 && procmm == 0
break
end
if premm > procmm
%add pre item
idx = idx + 1;
iila(idx) = iilpre;
mma(idx) = premm;
%pause the proc
bprecontinue = true;
elseif premm < procmm
%add proc item
idx = idx + 1;
iila(idx) = iilproc;
mma(idx) = procmm;
%pause the pre
bproccontinue = true;
else %premm == procmm
%add both item
idx = idx + 1;
iila(idx) = iilpre;
mma(idx) = premm;
if idx == hrpatchnumber
break
end
idx = idx + 1;
iila(idx) = iilproc;
mma(idx) = procmm;
bproccontinue = true;
bprecontinue = true;
end
if idx == hrpatchnumber
break
end
end
if idx < hrpatchnumber
iila = iila(1:idx);
mma = mma(1:idx);
end
end
function s = bmm2similarity(b,mm,SimilarityFunctionSettingNumber)
if SimilarityFunctionSettingNumber == 1
if mm >= 9
Smm = 0.9 + 0.1*(mm-9)/16;
else
Smm = 0.5 * mm/9;
end
Sb = 0.5+0.5*(log2(b)-3)/5;
s = Sb * Smm;
elseif SimilarityFunctionSettingNumber == 2
Smm = mm/25;
Sb = (log2(b)-2)/6;
s = Sb * Smm;
end
end
function hrpatch = F8_ExtractAllHrPatches(img_y, para, scanr,allHRexampleimages)
disp('extracting HR patches');
%how to search parallelly to speed up?
psh = para.ps * para.zooming;
ps = para.ps;
lh = para.lh;
lw = para.lw;
s = para.zooming;
hrpatchnumber = para.NumberOfHCandidate;
hrpatch = zeros(psh,psh,lh-ps+1,lw-ps+1,hrpatchnumber);
allimages = allHRexampleimages;
%analyize which images need to be loaded
alliiset = scanr(3,:,:,:);
alliiset_uni = unique(alliiset(:)); %allmost all images are used, from 1 to 1500
if alliiset_uni(1) ~= 0
alliiset_uni_pure = alliiset_uni;
else
alliiset_uni_pure = alliiset_uni(2:end);
end
for i = 1:length(alliiset_uni_pure)
ii = alliiset_uni_pure(i);
exampleimage_hr = im2double(allimages(:,:,ii));
exampleimage_lr = U3_GenerateLRImage_BlurSubSample(exampleimage_hr,para.zooming,para.Gau_sigma);
match_4D = alliiset == ii;
match_3D = reshape(match_4D,hrpatchnumber,lh-ps+1,lw-ps+1); %remove the first dimension
[d1 d2 d3] = size(match_3D); %second dimention length
[idxset posset] = find(match_3D);
setin = length(idxset);
for j = 1:setin
idx = idxset(j);
possum = posset(j);
pos3 = floor( (possum-1)/d2) +1; %the relationship: possum = (pos3-1) * d2 + pos2, pos2 in (1,d2)
pos2 = possum - (pos3-1)*d2;
rl = pos2;
cl = pos3;
sr = scanr(4,idx,rl,cl);
sc = scanr(5,idx,rl,cl);
srh = (sr-1)*s+1;
srh1 = srh + psh -1;
sch = (sc-1)*s+1;
sch1 = sch + psh-1;
%to do: compensate the HR patch to match the LR query patch
hrp = exampleimage_hr(srh:srh1,sch:sch1); %HR patch
lrq = img_y(rl:rl+ps-1,cl:cl+ps-1); %LR query patch
lrr = exampleimage_lr(sr:sr+ps-1,sc:sc+ps-1); %LR retrieved patch
chrp = hrp + imresize(lrq - lrr,s,'bilinear'); %compensate HR patch
hrpatch(:,:,rl,cl,idx) = chrp;
bVisuallyCheck = false;
if bVisuallyCheck
if ~exist('hfig','var')
hfig = figure;
else
figure(hfig);
end
subplot(1,4,1);
imshow(hrp/255);
title('hrp');
subplot(1,4,2);
imshow(lrr/255);
title('lrr');
subplot(1,4,3);
imshow(lrq/255);
title('lrq');
subplot(1,4,4);
imshow(chrp/255);
title('chrp');
keyboard
end
end
end
end
function [img_texture Reliablemap] = F11_FilterOutImproperHrPatches(img_y,hrpatch,para,scanr_self,scanra_self,scanr,scanra)
%filter out improper hr patches using similarity among lr patches
%load the self-similar data
s = para.zooming;
lh = para.lh;
lw = para.lw;
ps = para.ps;
psh = s * para.ps;
patcharea = para.ps^2;
SSnumberUpperbound = 10;
%do I still need these variables?
cqarray = zeros(32,1)/0;
for qidx = 1:6
quan = 2^(qidx-1);
cqvalue = 0.9^(qidx-1);
cqarray(quan) = cqvalue;
end
hh = lh * s;
hw = lw * s;
hrres_nomi = zeros(hh,hw);
hrres_deno = zeros(hh,hw);
maskmatrix = false(psh,psh,patcharea);
Reliablemap = zeros(hh,hw);
pshs = psh * psh;
for i=1:patcharea
[sh_notsued masklow maskhigh] = GetShGeneral(ps,i,true,s); %ps, mm, bhigh, s
maskmatrix(:,:,i) = maskhigh;
end
mhr = zeros(5*s);
r1 = 2*s+1;
r2 = 3*s;
c1 = 2*s+1;
c2 = 3*s;
mhr(r1:r2,c1:c2) = 1; %the central part
sigma = para.ehrfKernelWidth;
kernel = Sigma2Kernel(sigma);
if para.bEnablemhrf
mhrf = imfilter(mhr,kernel,'replicate');
else
mhrf = mhr;
end
noHmap = scanra == 0;
noHmapToFill = noHmap;
NHOOD = [0 1 0;
1 1 1;
0 1 0];
se = strel('arbitrary',NHOOD);
noHmapneighbor = and( imdilate(noHmap,se) ,~noHmap);
%if the noHmapsever is 0, it is fine
imb = imresize(img_y,s); %use it as the reference if no F is available
rsa = [0 -1 0 1];
csa = [1 0 -1 0];
for rl= 1:lh-ps+1 %75
fprintf('rl:%d total:%d\n',rl,lh-ps+1);
rh = (rl-1)*s+1;
rh1 = rh+psh-1;
for cl = 1:lw-ps+1 %128
ch = (cl-1)*s+1;
ch1 = ch+psh-1;
%load candidates
hin = para.NumberOfHCandidate;
H = zeros(psh,psh,hin);
HSim = zeros(hin,1);
for j=1:hin
H(:,:,j) = hrpatch(:,:,rl,cl,j); %H
HSim(j) = scanr(6,j,rl,cl);
end
%compute the number of reference patches
sspin = min(SSnumberUpperbound,scanra_self(rl,cl));
%self similar patch instance number
F = zeros(ps,ps,sspin);
FSimPure = zeros(1,sspin);
rin = 0;
for i=1:sspin
sr = scanr_self(3,i,rl,cl);
sc = scanr_self(4,i,rl,cl);
%hr candidate number
rin = rin + para.NumberOfHCandidate;
F(:,:,i) = img_y(sr:sr+ps-1,sc:sc+ps-1);
FSimPure(i) = scanr_self(5,i,rl,cl);
end
%load all of the two step patches
R = zeros(psh,psh,rin);
mms = zeros(rin,1);
mmr = zeros(rin,1);
qs = zeros(rin,1);
qr = zeros(rin,1);
FSimBaseR = zeros(rin,1);
RSim = zeros(rin,1);
idx = 0;
if sspin > 0
for i=1:sspin %sspin is the Fin
sr = scanr_self(3,i,rl,cl);
sc = scanr_self(4,i,rl,cl);
%hr candidate number
hrcanin = para.NumberOfHCandidate;
for j=1:hrcanin
idx = idx + 1;
R(:,:,idx) = hrpatch(:,:,sr,sc,j);
mms(idx) = scanr_self(1,i,rl,cl);
qs(idx) = scanr_self(2,i,rl,cl);
mmr(idx) = scanr(1,j,sr,sc);
qr(idx) = scanr(2,j,sr,sc);
FSimBaseR(idx) = FSimPure(i);
RSim(idx) = scanr(6,j,sr,sc);
end
end
else
idx = 1;
rin = 1; %use bicubic
R(:,:,idx) = imb(rh:rh1,ch:ch1);
FSimBaseR(idx) = 1;FSimPure(i);
end
%here is a question, how to define the similarity between H and R?
%L2norm?
hscore = zeros(hin,1);
for i=1:hin
theH = H(:,:,i);
for j=1:rin
theR = R(:,:,j);
spf = FSimBaseR(j);
%similarity between H and R
diff = theH - theR;
L2N = norm(diff(:));
shr = exp(- L2N/pshs);
hscore(i) = hscore(i) + shr*spf;
end
end
[maxscore idx] = max(hscore);
%take this as the example
Reliablemap(rh:rh1,ch:ch1) = Reliablemap(rh:rh1,ch:ch1) + HSim(idx)*mhrf;
if hin > 0 %some patches can't find H
hrres_nomi(rh:rh1,ch:ch1) = hrres_nomi(rh:rh1,ch:ch1) + H(:,:,idx).*mhrf;
hrres_deno(rh:rh1,ch:ch1) = hrres_deno(rh:rh1,ch:ch1) + mhrf;
end
%if any of its neighbor belongs to noHmap, copy additional region to hrres
%if the pixel belongs to noHmapneighbor, then expand the copy regions
if noHmapneighbor(rl,cl) == true
mhrfspecial = zeros(5*s);
mhrfspecial(r1:r2,c1:c2) = 1;
for i=1:4
rs = rsa(i);
cs = csa(i);
checkr = rl+rs;
checkc = cl+cs;
if checkr > 0 && checkr < lh-ps+1 && checkc >0 && checkc <lw-ps+1 && noHmapToFill(checkr,checkc)
%recompute the mhrf and disable the noHmapToFill
noHmapToFill(checkr,checkc) = false;
switch i
case 1
mhrfspecial(r1:r2,c1+s:c2+s) = 1;
case 2
mhrfspecial(r1-s:r2-s,c1:c2) = 1;
case 3
mhrfspecial(r1:r2,c1-s:c2-s) = 1;
case 4
mhrfspecial(r1+s:r2+s,c1:c2) = 1;
end
end
end
mhrfspeical = imfilter(mhrfspecial,kernel,'replicate');
hrres_nomi(rh:rh1,ch:ch1) = hrres_nomi(rh:rh1,ch:ch1) + H(:,:,idx).*mhrfspeical;
hrres_deno(rh:rh1,ch:ch1) = hrres_deno(rh:rh1,ch:ch1) + mhrfspeical;
end
end
end
hrres = hrres_nomi ./hrres_deno;
exception = isnan(hrres);
hrres_filtered = hrres;
hrres_filtered(exception) = 0;
img_texture = (hrres_filtered .* (1-exception) + imb .*exception);
end
function [scanr_self scanra_self] = F22_SearchForSelfSimilarPatchesL2Norm(img_y,para)
ps = para.ps;
patcharea = ps^2;
[lh lw] = size(img_y);
%Find self similar patches
Fpatchnumber = 10;
scanr_self = zeros(5,Fpatchnumber,lh-ps+1,lw-ps+1); %scan results: mm, quan, r,c, similarity
scanra_self = Fpatchnumber * ones(lh-ps+1,lw-ps+1); %scan results active
in = (lh-ps+1)*(lw-ps+1);
fs = zeros(patcharea,in);
rec = zeros(2,in);
idx = 0;
for rl=1:lh-ps+1
for cl=1:lw-ps+1
idx = idx + 1;
rec(:,idx) = [rl;cl];
fs(:,idx) = reshape(img_y(rl:rl+ps-1,cl:cl+ps-1),patcharea,1);
end
end
%search
idx = 0;
for rl=1:lh-ps+1
for cl=1:lw-ps+1
idx = idx + 1;
fprintf('idx %d in %d\n',idx,in);
qf = fs(:,idx);
diff = fs - repmat(qf,1,in);
sqr = sum(diff.^2);
[ssqr ix] = sort(sqr);
saveidx = 0;
for j=1:11
indexinsort = ix(j);
sr = rec(1,indexinsort);
sc = rec(2,indexinsort);
if sr ~= rl || sc ~= cl
saveidx = saveidx + 1;
l2norm = sqrt(ssqr(j));
similarity = exp(-l2norm/25);
scanr_self(:,saveidx,rl,cl) = cat(1,-1,-1,sr,sc,similarity);
end
end
end
end
end
%this function may be replaced by IF1a
function [img_edge ProbOfEdge] = IF1_EdgePreserving(img_y,para,zooming,Gau_sigma)
para.LowMagSuppression = 0;
para.DistanceUpperBound = 2.0;
para.ContrastEnhenceCoef = 1.0;
I_s = IF2_SmoothnessPreservingFunction(img_y,para,zooming);
T = F15_ComputeSRSSD(I_s);
Dissimilarity = EvaluateDissimilarity8(I_s);
Grad_high_initial = Img2Grad(I_s);
%SaveFolder = para.tuningfolder;
[h w] = size(T);
StatisticsFolder = fullfile('EdgePriors');
LoadFileName = sprintf('Statistics_Sc%d_Si%0.1f.mat',zooming,Gau_sigma);
LoadData = load(fullfile(StatisticsFolder,LoadFileName));
Statistics = LoadData.Statistics;
RidgeMap = edge(I_s,'canny',[0 0.01],0.05);
%filter out small ridge and non-maximun ridges
RidgeMap_filtered = RidgeMap;
[r_set c_set] = find(RidgeMap);
SetLength = length(r_set);
for j=1:SetLength
r = r_set(j);
c = c_set(j);
CenterMagValue = T(r,c);
if CenterMagValue < para.LowMagSuppression
RidgeMap_filtered(r,c) = false;
end
end
[r_set c_set] = find(RidgeMap_filtered);
SetLength = length(r_set);
[X Y] = meshgrid(1:11,1:11);
DistPatch = sqrt((X-6).^2 + (Y-6).^2);
DistMap = inf(h,w);
UsedPixel = false(h,w);
CenterCoor = zeros(h,w,2);
%Compute DistMap and CneterCoor
[r_set c_set] = find(RidgeMap_filtered);
for j=1:SetLength
r = r_set(j);
r1 = r-5;
r2 = r+5;
c = c_set(j);
c1 = c-5;
c2 = c+5;
if r1>=1 && r2<=h && c1>=1 && c2<=w %discrad boundary?
MapPatch = DistMap(r1:r2,c1:c2);
MinPatch = min(MapPatch, DistPatch);
DistMap(r1:r2,c1:c2) = MinPatch;
UsedPixel(r1:r2,c1:c2) = true;
ChangedPixels = MinPatch < MapPatch;
OriginalCenterCoorPatch = CenterCoor(r1:r2,c1:c2,:);
NewCoor = cat(3,r*ones(11), c*ones(11));
NewCenterCoorPatch = OriginalCenterCoorPatch .* repmat(1-ChangedPixels,[1,1,2]) + NewCoor .* repmat(ChangedPixels,[1,1,2]);
CenterCoor(r1:r2,c1:c2,:) = NewCenterCoorPatch;
end
end
%Convert dist to table index
TableIndexMap = zeros(h,w);
b = unique(DistPatch(:));
for i=1:length(b)
SetPixels = DistMap == b(i);
TableIndexMap(SetPixels) = i;
end
%mapping (T_p, T_r, d) to S_p
[r_set c_set] = find(UsedPixel);
SetLength = length(r_set);
UpdatedPixel = false(h,w);
S = zeros(h,w);
for i=1:SetLength
r = r_set(i);
c = c_set(i);
r_Center = CenterCoor(r,c,1);
c_Center = CenterCoor(r,c,2);
CurrentMagValue = T(r,c);
BinIdx_Current = ceil(CurrentMagValue /0.005);
%Zebra have super strong Mag
if BinIdx_Current > 100
BinIdx_Current = 100;
end
TableIndex = TableIndexMap(r,c);
if TableIndex > para.DistanceUpperBound
continue
end
CenterMagValue = T(r_Center,c_Center);
%Low Mag Edge suppresion
if CenterMagValue < para.LowMagSuppression
continue
end
BinIdx_Center = ceil(CenterMagValue /0.005);
if BinIdx_Center > 100
BinIdx_Center = 100;
end
%consult the table
if TableIndex == 1 %1 is the index of b(1) where dist = 0, enhance the contrast of pixel on edge
S_p = para.ContrastEnhenceCoef * Statistics(TableIndex).EstimatedMag(BinIdx_Current,BinIdx_Center);
else
S_p = Statistics(TableIndex).EstimatedMag(BinIdx_Current,BinIdx_Center);
end
if isnan(S_p)
else
UpdatedPixel(r,c) = true;
S(r,c) = S_p;
end
end
%Record the RidgeMapMap, for computing te ProbOfMag
%the Mag is the consulted Mag
%here is the problem, when the S is very strong, the affect range of ProbMagOut exceeds 1 pixel
RidgeMapMagValue = zeros(h,w);
for i=1:SetLength
r = r_set(i);
c = c_set(i);
r_Center = CenterCoor(r,c,1);
c_Center = CenterCoor(r,c,2);
RidgeMapMagValue(r,c) = S(r_Center,c_Center);
end
S(~UpdatedPixel) = T(~UpdatedPixel);
img_in = I_s;
if min(Dissimilarity(:)) == 0
d = Dissimilarity + 1e-6; %avoid 0 case; some images may have d(:,:,1) as 0
else
d = Dissimilarity;
end
ratio = d ./ repmat(d(:,:,1),[1,1,8]);
%here is the problem, I need to amplify the gradient directionally
Grad_in = Img2Grad(img_in);
Product = Grad_in .* ratio;
Sqr = Product.^2;
Sum = sum(Sqr,3);
Sqrt = sqrt(Sum); %the Sqrt might be 0, because Grad_in may be pure 0;
r1 = S ./Sqrt;
r1(isnan(r1)) = 0;
Grad_exp = Grad_high_initial .*( ratio .*(repmat(r1,[1,1,8])));
%consolidate inconsistatnt gradient
NewGrad_exp = zeros(h,w,8);
for k=1:4
switch k
case 1
ShiftOp = [0 -1];
case 2
ShiftOp = [1 -1];
case 3
ShiftOp = [1 0];
case 4
ShiftOp = [1 1];
end
k2 =k+4;
Grad1 = Grad_exp(:,:,k);
Grad2 = Grad_exp(:,:,k2);
Grad2Shift = circshift(Grad2,ShiftOp);
Grad1Abs = abs(Grad1);
Grad2AbsShift = abs(Grad2Shift);
Grad1Larger = Grad1Abs > Grad2AbsShift;
Grad2Larger = Grad2AbsShift > Grad1Abs;
NewGrad1 = Grad1 .* Grad1Larger + (-Grad2Shift) .* Grad2Larger;
NewGrad2Shift = Grad2Shift .* Grad2Larger + (-Grad1) .* Grad1Larger;
NewGrad2 = circshift(NewGrad2Shift,-ShiftOp);
NewGrad_exp(:,:,k) = NewGrad1;
NewGrad_exp(:,:,k2) = NewGrad2;
end
%current problem is the over-enhanced gradient (NewMagExp too large)
para.bReport = true;
img_edge = GenerateIntensityFromGradient(img_y,img_in,NewGrad_exp,para,zooming,Gau_sigma);
%compute the Map of edge weight
lambda_m = 2;
m0 = 0;
ProbMagOut = lambda_m * RidgeMapMagValue + m0;
lambda_d = 0.25;
d0 = 0.25;
ProbDistMap = exp(- (lambda_d * DistMap + d0) ); %this coef should be decied by zooming
Product = ProbMagOut .* ProbDistMap;
ProbOfEdge = min(Product,1); %the two terms are not sufficient, direction is not taken into considertion
end
function [gradients_edge weightmap_edge] = IF1a_EdgePreserving(img_y,para,zooming,Gau_sigma)
para.LowMagSuppression = 0;
para.DistanceUpperBound = 2.0;
para.ContrastEnhenceCoef = 1.0;
I_s = IF2_SmoothnessPreservingFunction(img_y,para,zooming);
T = F15_ComputeSRSSD(I_s);
Dissimilarity = EvaluateDissimilarity8(I_s);
Grad_high_initial = Img2Grad(I_s);
%SaveFolder = para.tuningfolder;
[h w] = size(T);
StatisticsFolder = fullfile('EdgePriors');
LoadFileName = sprintf('Statistics_Sc%d_Si%0.1f.mat',zooming,Gau_sigma);
LoadData = load(fullfile(StatisticsFolder,LoadFileName));
Statistics = LoadData.Statistics;
RidgeMap = edge(I_s,'canny',[0 0.01],0.05);
%filter out small ridge and non-maximun ridges
RidgeMap_filtered = RidgeMap;
[r_set c_set] = find(RidgeMap);
SetLength = length(r_set);
for j=1:SetLength
r = r_set(j);
c = c_set(j);
CenterMagValue = T(r,c);
if CenterMagValue < para.LowMagSuppression
RidgeMap_filtered(r,c) = false;
end
end
[r_set c_set] = find(RidgeMap_filtered);
SetLength = length(r_set);
[X Y] = meshgrid(1:11,1:11);
DistPatch = sqrt((X-6).^2 + (Y-6).^2);
DistMap = inf(h,w);
UsedPixel = false(h,w);
CenterCoor = zeros(h,w,2);
%Compute DistMap and CneterCoor
[r_set c_set] = find(RidgeMap_filtered);
for j=1:SetLength
r = r_set(j);
r1 = r-5;
r2 = r+5;
c = c_set(j);
c1 = c-5;
c2 = c+5;
if r1>=1 && r2<=h && c1>=1 && c2<=w %discrad boundary?
MapPatch = DistMap(r1:r2,c1:c2);
MinPatch = min(MapPatch, DistPatch);
DistMap(r1:r2,c1:c2) = MinPatch;
UsedPixel(r1:r2,c1:c2) = true;
ChangedPixels = MinPatch < MapPatch;
OriginalCenterCoorPatch = CenterCoor(r1:r2,c1:c2,:);
NewCoor = cat(3,r*ones(11), c*ones(11));
NewCenterCoorPatch = OriginalCenterCoorPatch .* repmat(1-ChangedPixels,[1,1,2]) + NewCoor .* repmat(ChangedPixels,[1,1,2]);
CenterCoor(r1:r2,c1:c2,:) = NewCenterCoorPatch;
end
end
%Convert dist to table index
TableIndexMap = zeros(h,w);
b = unique(DistPatch(:));
for i=1:length(b)
SetPixels = DistMap == b(i);
TableIndexMap(SetPixels) = i;
end
%mapping (T_p, T_r, d) to S_p
[r_set c_set] = find(UsedPixel);
SetLength = length(r_set);
UpdatedPixel = false(h,w);
S = zeros(h,w);
for i=1:SetLength
r = r_set(i);
c = c_set(i);
r_Center = CenterCoor(r,c,1);
c_Center = CenterCoor(r,c,2);
CurrentMagValue = T(r,c);
BinIdx_Current = ceil(CurrentMagValue /0.005);
%Zebra have super strong Mag
if BinIdx_Current > 100
BinIdx_Current = 100;
end
TableIndex = TableIndexMap(r,c);
if TableIndex > para.DistanceUpperBound
continue
end
CenterMagValue = T(r_Center,c_Center);
%Low Mag Edge suppresion
if CenterMagValue < para.LowMagSuppression
continue
end
BinIdx_Center = ceil(CenterMagValue /0.005);
if BinIdx_Center > 100
BinIdx_Center = 100;
end
%consult the table
if TableIndex == 1 %1 is the index of b(1) where dist = 0, enhance the contrast of pixel on edge
S_p = para.ContrastEnhenceCoef * Statistics(TableIndex).EstimatedMag(BinIdx_Current,BinIdx_Center);
else
S_p = Statistics(TableIndex).EstimatedMag(BinIdx_Current,BinIdx_Center);
end
if isnan(S_p)
else
UpdatedPixel(r,c) = true;
S(r,c) = S_p;
end
end
%Record the RidgeMapMap, for computing te ProbOfMag
%the Mag is the consulted Mag
%here is the problem, when the S is very strong, the affect range of ProbMagOut exceeds 1 pixel
RidgeMapMagValue = zeros(h,w);
for i=1:SetLength
r = r_set(i);
c = c_set(i);
r_Center = CenterCoor(r,c,1);
c_Center = CenterCoor(r,c,2);
RidgeMapMagValue(r,c) = S(r_Center,c_Center);
end
S(~UpdatedPixel) = T(~UpdatedPixel);
img_in = I_s;
if min(Dissimilarity(:)) == 0
d = Dissimilarity + 1e-6; %avoid 0 case; some images may have d(:,:,1) as 0
else
d = Dissimilarity;
end
ratio = d ./ repmat(d(:,:,1),[1,1,8]);
%here is the problem, I need to amplify the gradient directionally
Grad_in = Img2Grad(img_in);
Product = Grad_in .* ratio;
Sqr = Product.^2;
Sum = sum(Sqr,3);
Sqrt = sqrt(Sum); %the Sqrt might be 0, because Grad_in may be pure 0;
r1 = S ./Sqrt;
r1(isnan(r1)) = 0;
Grad_exp = Grad_high_initial .*( ratio .*(repmat(r1,[1,1,8])));
%consolidate inconsistatnt gradient
NewGrad_exp = zeros(h,w,8);
for k=1:4
switch k
case 1
ShiftOp = [0 -1];
case 2
ShiftOp = [1 -1];
case 3
ShiftOp = [1 0];
case 4
ShiftOp = [1 1];
end
k2 =k+4;
Grad1 = Grad_exp(:,:,k);
Grad2 = Grad_exp(:,:,k2);
Grad2Shift = circshift(Grad2,ShiftOp);
Grad1Abs = abs(Grad1);
Grad2AbsShift = abs(Grad2Shift);
Grad1Larger = Grad1Abs > Grad2AbsShift;
Grad2Larger = Grad2AbsShift > Grad1Abs;
NewGrad1 = Grad1 .* Grad1Larger + (-Grad2Shift) .* Grad2Larger;
NewGrad2Shift = Grad2Shift .* Grad2Larger + (-Grad1) .* Grad1Larger;
NewGrad2 = circshift(NewGrad2Shift,-ShiftOp);
NewGrad_exp(:,:,k) = NewGrad1;
NewGrad_exp(:,:,k2) = NewGrad2;
end
%current problem is the over-enhanced gradient (NewMagExp too large)
gradients_edge = NewGrad_exp;
% para.bReport = true;
% img_edge = GenerateIntensityFromGradient(img_y,img_in,NewGrad_exp,para,zooming,Gau_sigma);
%compute the Map of edge weight
lambda_m = 2;
m0 = 0;
ProbMagOut = lambda_m * RidgeMapMagValue + m0;
lambda_d = 0.25;
d0 = 0.25;
ProbDistMap = exp(- (lambda_d * DistMap + d0) ); %this coef should be decied by zooming
Product = ProbMagOut .* ProbDistMap;
weightmap_edge = min(Product,1); %the two terms are not sufficient, direction is not taken into considertion
end
function img_out = IF2_SmoothnessPreservingFunction(img_y,para,zooming)
img_bb = imresize(img_y,zooming);
Kernel = Sigma2Kernel(para.Gau_sigma);
%compute the similarity from low
Coef = 10;
PatchSize = 3;
Sqrt_low = SimilarityEvaluation(img_y,PatchSize);
Similarity_low = exp(-Sqrt_low*Coef);
[h_high w_high] = size(img_bb);
ExpectedSimilarity = zeros(h_high,w_high,16);
%upsamplin the similarity
for dir=1:16
ExpectedSimilarity(:,:,dir) = imresize(Similarity_low(:,:,dir),zooming,'bilinear');
end
%refind the Grad_high by Similarity_high
LoopNumber = 10;
img = img_bb;
for loop = 1:LoopNumber
%refine gradient by ExpectedSimilarity
ValueSum = zeros(h_high,w_high);
WeightSum = sum(ExpectedSimilarity,3); %if thw weight sum is low, it is unsuitable to generate the grad by interpolation
for dir = 1:16
[MoveOp N] = GetMoveKernel16(dir);
if N == 1
MovedData = imfilter(img,MoveOp{1},'replicate');
else %N ==2
MovedData1 = imfilter(img,MoveOp{1},'replicate');
MovedData2 = imfilter(img,MoveOp{2},'replicate');
MovedData = (MovedData1 + MovedData2)/2;
end
Product = MovedData .* ExpectedSimilarity(:,:,dir);
ValueSum = ValueSum + Product;
end
I = ValueSum ./ WeightSum;
%intensity compensate
Diff = imresize(imfilter(I,Kernel,'replicate'),1/zooming, 'nearest') - img_y;
UpSampled = imresize(Diff,zooming,'bilinear');
Grad0 = imfilter(UpSampled,Kernel,'replicate');
Term_LowHigh_in = ComputeFunctionValue_lowhigh(I,img_y,para.Gau_sigma);
I_in = I; %make a copy, restore the value if all beta fails
bDecrease = false;
tau = 0.2;
for line_search_loop=1:10
%line search for the beta, fixed 1/32 is not a good choice
I = I_in - tau * Grad0;
Term_LowHigh_out = ComputeFunctionValue_lowhigh(I,img_y,para.Gau_sigma);
if Term_LowHigh_out < Term_LowHigh_in
bDecrease = true;
break;
else
tau = tau * 0.5;
end
end
if bDecrease == true
I_best = I;
else
break;
end
% fprintf('loop=%d, LowHihg_in=%0.1f, LowHigh_out=%0.1f,\n',loop,Term_LowHigh_in,Term_LowHigh_out);
% imwrite(I,fullfile(SaveFolder, [num2str(loop) '_GenIntenFromGrad.png']));
img = I_best;
end
img_out = img;
end
function SqrtData = SimilarityEvaluation(Img_in,PatchSize)
HalfPatchSize = (PatchSize-1)/2;
[h w] = size(Img_in);
SqrtData = zeros(h,w,16);
f3x3 = ones(3);
for i = 1:16
[DiffOp N] = RetGradientKernel16(i);
if N == 1
Diff = imfilter(Img_in,DiffOp{1},'symmetric');
else
Diff1 = imfilter(Img_in,DiffOp{1},'symmetric');
Diff2 = imfilter(Img_in,DiffOp{2},'symmetric');
Diff = (Diff1+Diff2)/2;
end
Sqr = Diff.^2;
Sum = imfilter(Sqr,f3x3,'replicate');
Mean = Sum/9;
SqrtData(:,:,i) = sqrt(Mean);
end
end
function [DiffOp N] = RetGradientKernel16(dir)
DiffOp = cell(2,1);
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
switch dir
case 1
N = 1;
DiffOp{1} = f{1};
DiffOp{2} = [];
case 2
N = 2;
DiffOp{1} = f{1};
DiffOp{2} = f{2};
case 3
N = 1;
DiffOp{1} = f{2};
DiffOp{2} = [];
case 4
N = 2;
DiffOp{1} = f{2};
DiffOp{2} = f{3};
case 5
N = 1;
DiffOp{1} = f{3};
DiffOp{2} = [];
case 6
N = 2;
DiffOp{1} = f{3};
DiffOp{2} = f{4};
case 7
N = 1;
DiffOp{1} = f{4};
DiffOp{2} = [];
case 8
N = 2;
DiffOp{1} = f{4};
DiffOp{2} = f{5};
case 9
N = 1;
DiffOp{1} = f{5};
DiffOp{2} = [];
case 10
N = 2;
DiffOp{1} = f{5};
DiffOp{2} = f{6};
case 11
DiffOp{1} = f{6};
DiffOp{2} = [];
N = 1;
case 12
N = 2;
DiffOp{1} = f{6};
DiffOp{2} = f{7};
case 13
N = 1;
DiffOp{1} = f{7};
DiffOp{2} = [];
case 14
N = 2;
DiffOp{1} = f{7};
DiffOp{2} = f{8};
case 15
DiffOp{1} = f{8};
DiffOp{2} = [];
N = 1;
case 16
N = 2;
DiffOp{1} = f{8};
DiffOp{2} = f{1};
end
end
function [Kernel N] = GetMoveKernel16(dir)
Kernel = cell(2,1);
f{1} = [0 0 0;
0 0 1;
0 0 0];
f{2} = [0 0 1;
0 0 0;
0 0 0];
f{3} = [0 1 0;
0 0 0;
0 0 0];
f{4} = [1 0 0;
0 0 0;
0 0 0];
f{5} = [0 0 0;
1 0 0;
0 0 0];
f{6} = [0 0 0;
0 0 0;
1 0 0];
f{7} = [0 0 0;
0 0 0;
0 1 0];
f{8} = [0 0 0;
0 0 0;
0 0 1];
switch dir
case 1
N = 1;
Kernel{1} = f{1};
Kernel{2} = [];
case 2
N = 2;
Kernel{1} = f{1};
Kernel{2} = f{2};
case 3
N = 1;
Kernel{1} = f{2};
Kernel{2} = [];
case 4
N = 2;
Kernel{1} = f{2};
Kernel{2} = f{3};
case 5
N = 1;
Kernel{1} = f{3};
Kernel{2} = [];
case 6
N = 2;
Kernel{1} = f{3};
Kernel{2} = f{4};
case 7
N = 1;
Kernel{1} = f{4};
Kernel{2} = [];
case 8
N = 2;
Kernel{1} = f{4};
Kernel{2} = f{5};
case 9
N = 1;
Kernel{1} = f{5};
Kernel{2} = [];
case 10
N = 2;
Kernel{1} = f{5};
Kernel{2} = f{6};
case 11
Kernel{1} = f{6};
Kernel{2} = [];
N = 1;
case 12
N = 2;
Kernel{1} = f{6};
Kernel{2} = f{7};
case 13
N = 1;
Kernel{1} = f{7};
Kernel{2} = [];
case 14
N = 2;
Kernel{1} = f{7};
Kernel{2} = f{8};
case 15
Kernel{1} = f{8};
Kernel{2} = [];
N = 1;
case 16
N = 2;
Kernel{1} = f{8};
Kernel{2} = f{1};
end
end
function f = ComputeFunctionValue_lowhigh(img,img_low,Gau_sigma)
KernelSize = ceil(Gau_sigma) * 3 + 1;
G = fspecial('gaussian',KernelSize,Gau_sigma);
Conv = imfilter(img,G,'replicate');
SubSample = imresize(Conv,size(img_low),'antialias',false);
Diff = SubSample - img_low;
Sqr = Diff.^2;
f = sum(Sqr(:));
end
function Grad = Img2Grad(img)
[h w] = size(img);
Grad = zeros(h,w,8);
DiffOp = RetGradientKernel();
for i=1:8
Grad(:,:,i) = imfilter(img,DiffOp{i},'replicate');
end
end
function f = RetGradientKernel()
f = cell(8,1);
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
end
function Dissimilarity = EvaluateDissimilarity8(Img_in,PatchSize)
if ~exist('PatchSize','var');
PatchSize = 3;
end
[h w] = size(Img_in);
Dissimilarity = zeros(h,w,8);
f3x3 = ones(PatchSize)/(PatchSize^2);
for i = 1:8
DiffOp = RetGradientKernel8(i);
Diff = imfilter(Img_in,DiffOp,'symmetric');
Sqr = Diff.^2;
Sum = imfilter(Sqr,f3x3,'replicate');
Dissimilarity(:,:,i) = sqrt(Sum);
end
end
function DiffOp = RetGradientKernel8(dir)
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
DiffOp = f{dir};
end
function img_out = GenerateIntensityFromGradient(img_y,img_initial,Grad_exp,para,zooming,Gau_sigma,bReport)
if ~isfield(para,'LoopNumber')
para.LoopNumber = 30;
end
if ~isfield(para,'beta0')
beta0 = 1;
else
beta0 = para.beta0;
end
if ~isfield(para,'beta1')
beta1 = 1;
else
beta1 = para.beta1;
end
Kernel = Sigma2Kernel(Gau_sigma);
%compute gradient
I = img_initial;
I_best = I;
for loop = 1:para.LoopNumber
%refine image by patch similarity
%refine image by low-high intensity
Diff = imresize(imfilter(I,Kernel,'replicate'),1/zooming, 'nearest') - img_y;
UpSampled = imresize(Diff,zooming,'bilinear');
Grad0 = imfilter(UpSampled,Kernel,'replicate');
%refine image by expected gradeint
%Gradient decent
%I = ModifyByGradient(I,Grad_exp);
OptDir = Grad_exp - Img2Grad(I);
Grad1 = sum(OptDir,3);
Grad_all = beta0 * Grad0 + beta1 * Grad1;
I_in = I; %make a copy, restore the value if all beta fails
bDecrease = false;
tau = 0.2;
Term_Grad_in = ComputeFunctionValue_Grad(I,Grad_exp);
Term_LowHigh_in = ComputeFunctionValue_lowhigh(I,img_y,para.Gau_sigma);
Term_all_in = Term_LowHigh_in * beta0 + Term_Grad_in * beta1;
for line_search_loop=1:10
%line search for the beta, fixed 1/32 is not a good choice
I = I_in - tau * Grad_all;
Term_Grad_out = ComputeFunctionValue_Grad(I,Grad_exp);
Term_LowHigh_out = ComputeFunctionValue_lowhigh(I,img_y,para.Gau_sigma);
Term_all_out = Term_LowHigh_out * beta0 + Term_Grad_out * beta1;
if Term_all_out < Term_all_in
bDecrease = true;
break;
else
tau = tau * 0.5;
end
end
if bDecrease == true
I_best = I;
else
break;
end
if bReport
fprintf(['loop=%d, all_in=%0.1f, all_out=%0.1f, LowHihg_in=%0.1f, LowHigh_out=%0.1f, ' ...
'Grad_in=%0.1f, Grad_out=%0.1f\n'],loop,Term_all_in,Term_all_out,Term_LowHigh_in,Term_LowHigh_out, ...
Term_Grad_in,Term_Grad_out);
end
end
img_out = I_best;
end
function f = ComputeFunctionValue_Grad(img, Grad_exp)
Grad = Img2Grad(img);
Diff = Grad - Grad_exp;
Sqrt = Diff .^2;
f = sqrt(sum(Sqrt(:)));
end
function lrimg = U3_GenerateLRImage_BlurSubSample(hrimg,s,sigma)
[h w d] = size(hrimg);
htrim = h-mod(h,s);
wtrim = w-mod(w,s);
imtrim = hrimg(1:htrim,1:wtrim,1:d);
%detect image type
kernel = Sigma2Kernel(sigma);
if d == 1
blurimg = imfilter(imtrim,kernel,'replicate');
elseif d == 3
blurimg = zeros(htrim,wtrim,d);
for i=1:3
blurimg(:,:,i) = imfilter(imtrim(:,:,i),kernel,'replicate');
end
end
lrimg = imresize(blurimg,1/s,'bilinear','antialias',false);
end
function img_bp = F11_BackProjection(img_lr, img_hr, Gau_sigma, iternum)
[h_hr] = size(img_hr,1);
[h_lr] = size(img_lr,1);
zooming = h_hr/h_lr;
for i=1:iternum
img_lr_gen = U3_GenerateLRImage_BlurSubSample(img_hr,zooming,Gau_sigma);
diff_lr = img_lr - img_lr_gen;
diff_hr = imresize(diff_lr,zooming,'bilinear');
img_hr = img_hr + diff_hr;
end
img_bp = img_hr;
end
function img_bp = IF3_BackProjection(img_lr, img_hr, Gau_sigma, iternum,bReport)
[h_hr] = size(img_hr,1);
[h_lr] = size(img_lr,1);
zooming = h_hr/h_lr;
for i=1:iternum
img_lr_gen = F19_GenerateLRImage_BlurSubSample(img_hr,zooming,Gau_sigma);
diff_lr = img_lr - img_lr_gen;
term_diff_lr_SSD = sum(sum(diff_lr.^2));
%here is the problem. How to guide this step? Assume the relative intensity unchange?
diff_hr = imresize(diff_lr,zooming,'bilinear');
img_hr = img_hr + diff_hr;
img_lr_new = F19_GenerateLRImage_BlurSubSample(img_hr,zooming,Gau_sigma);
diff_lr_new = img_lr - img_lr_new;
term_diff_lr_SSD_afteronebackprojection = sum(sum(diff_lr_new.^2));
if bReport
fprintf('backproject iteration=%d, term_before=%0.1f, term_after=%0.1f\n', ...
i,term_diff_lr_SSD,term_diff_lr_SSD_afteronebackprojection);
end
end
img_bp = img_hr;
end
|
github
|
Liusifei/Face-Hallucination-master
|
U9_DrawMultiPieLandmarkVisualCheck.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U9_DrawMultiPieLandmarkVisualCheck.m
| 504 |
utf_8
|
3f1349d55d7d68600170460c93193816
|
%Chih-Yuan Yang
%10/01/12
function hfig = U9_DrawMultiPieLandmarkVisualCheck(image,landmark,bshowtext)
%show image
hfig = figure;
imshow(image);
axis off image
hold on
%show landmark
plot(landmark(:,1),landmark(:,2),'w.','MarkerSize',30); %why the color changes after hfig returns?
%show text
if bshowtext
setsize = size(landmark,1);
for i=1:setsize;
text(landmark(i,1),landmark(i,2), sprintf('%d',i));
end
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F11e_BackProjection_GaussianKernel.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F11e_BackProjection_GaussianKernel.m
| 1,422 |
utf_8
|
91c09fc297bd57f1030101b30351e20b
|
%Chih-Yuan Yang
%07/20/14
%F11c: controlled by iternum
%F11d: controled by TolF
%F11e: I replace the IF5 internal function by F26 since they are identical.
% I update the code to support the scaling factor of 3. I replace the F19a
% by F19c since F19c is simpler and does not require F20.
function img_bp = F11e_BackProjection_GaussianKernel(img_lr, img_hr, Gau_sigma, iternum,bReport,TolF)
[h_hr] = size(img_hr,1);
[h_lr] = size(img_lr,1);
zooming = h_hr/h_lr;
for i=1:iternum
img_lr_gen = F19c_GenerateLRImage_GaussianKernel(img_hr,zooming,Gau_sigma);
diff_lr = img_lr - img_lr_gen;
RMSE_diff_lr = sqrt(mean2(diff_lr.^2));
diff_hr = F26_UpsampleAndBlur(diff_lr,zooming, Gau_sigma);
%diff_hr = imresize(diff_lr,zooming,'bilinear');
img_hr = img_hr + diff_hr;
img_lr_new = F19c_GenerateLRImage_GaussianKernel(img_hr,zooming,Gau_sigma);
diff_lr_new = img_lr - img_lr_new;
RMSE_diff_lr_afteronebackprojection = sqrt(mean2(diff_lr_new.^2));
if bReport
fprintf('backproject iteration=%d, RMSE_before=%0.6f, RMSE_after=%0.6f\n', ...
i,RMSE_diff_lr,RMSE_diff_lr_afteronebackprojection);
end
if RMSE_diff_lr_afteronebackprojection < TolF
disp('RMSE_diff_lr_afteronebackprojection < TolF');
break;
end
end
img_bp = img_hr;
end
|
github
|
Liusifei/Face-Hallucination-master
|
F15_ComputeSRSSD.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F15_ComputeSRSSD.m
| 236 |
utf_8
|
3532526636455f6cc3acccf05b85199d
|
%03/17/12
function SRSSD = F15_ComputeSRSSD(GradOrImg)
if size(GradOrImg,3) == 8
Grad = GradOrImg;
else
Grad = F14_Img2Grad(GradOrImg);
end
Sqr = Grad .^2;
Sum = sum(Sqr,3);
SRSSD = sqrt(Sum);
end
|
github
|
Liusifei/Face-Hallucination-master
|
U5_ReadFileNameList.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U5_ReadFileNameList.m
| 249 |
utf_8
|
6cc6e817fc7e4d5c331e663041ff41fc
|
%Chih-Yuan Yang
%03/07/13
%U5: read list from a file and return it as an cell array
function filenamelist = U5_ReadFileNameList( fn_list )
fid = fopen(fn_list,'r');
C = textscan(fid,'%d %s\n');
fclose(fid);
filenamelist = C{2};
end
|
github
|
Liusifei/Face-Hallucination-master
|
IF4s_BuildHRimagefromHRPatches.m
|
.m
|
Face-Hallucination-master/Code/Ours2/IF4s_BuildHRimagefromHRPatches.m
| 4,401 |
utf_8
|
07e053559d7bc259285d722b784bfab0
|
% Build color image
function img_texture = IF4s_BuildHRimagefromHRPatches(hrpatch,zooming)
%reconstruct the high-resolution image
patchsize_hr = size(hrpatch,1);
patchsize_lr = patchsize_hr/zooming;
h_lr = size(hrpatch,4) + patchsize_lr - 1;
w_lr = size(hrpatch,5) + patchsize_lr - 1;
h_expected = h_lr * zooming;
w_expected = w_lr * zooming;
img_texture = zeros(h_expected,w_expected,3);
%most cases
rpixelshift = 2; %this should be modified according to patchsize_lr
cpixelshift = 2;
for rl = 2:h_lr - patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
for cl = 2:w_lr - patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,:,rl,cl);
img_texture(rh:rh1,ch:ch1,:) = usedhrpatch(9:12,9:12,:);
end
end
%left
cl = 1;
ch = 1;
ch1 = ch+3*zooming-1;
for rl=2:h_lr-patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
usedhrpatch = hrpatch(:,:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+zooming-1;
img_texture(rh:rh1,ch:ch1,:) = usedhrpatch(rhsource:rh1source,chsource:ch1source,:);
end
%right
cl = w_lr - patchsize_lr+1;
ch = w_expected - 3*zooming+1;
ch1 = w_expected;
for rl=2:h_lr-patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
usedhrpatch = hrpatch(:,:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+zooming-1;
img_texture(rh:rh1,ch:ch1,:) = usedhrpatch(rhsource:rh1source,chsource:ch1source,:);
end
%top
rl = 1;
rh = 1;
rh1 = rh+3*zooming-1;
for cl=2:w_lr-patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,:,rl,cl);
chsource = 9;
ch1source = chsource+zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1,:) = usedhrpatch(rhsource:rh1source,chsource:ch1source,:);
end
%bottom
rl = h_lr-patchsize_lr+1;
rh = h_expected - 3*zooming+1;
rh1 = h_expected;
for cl=2:w_lr-patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,:,rl,cl);
chsource = 9;
ch1source = chsource+zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1,:) = usedhrpatch(rhsource:rh1source,chsource:ch1source,:);
end
%left-top corner
rl=1;
cl=1;
rh = 1;
rh1 = rh+3*zooming-1;
ch = 1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1,:) = usedhrpatch(rhsource:rh1source,chsource:ch1source,:);
%right-top corner
rl=1;
cl=w_lr-patchsize_lr+1;
rh = (rl-1)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1,:) = usedhrpatch(rhsource:rh1source,chsource:ch1source,:);
%left-bottom corner
rl=h_lr-patchsize_lr+1;
cl=1;
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1,:) = usedhrpatch(rhsource:rh1source,chsource:ch1source,:);
%left-bottom corner
rl=h_lr-patchsize_lr+1;
cl=w_lr-patchsize_lr+1;
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1,:) = usedhrpatch(rhsource:rh1source,chsource:ch1source,:);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F11a_AdaptiveBackProjection.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F11a_AdaptiveBackProjection.m
| 7,250 |
utf_8
|
404edc2e654e7c70e60151b263ca5528
|
%09/28/12
%Chih-Yuan Yang
%The adaptive kernelmap has to be passed from the caller, or generated by img_lr
function img_bp = F11a_AdaptiveBackProjection(img_lr, img_hr, Gau_sigma, iternum,bReport,kernelmap)
[h_hr] = size(img_hr,1);
[h_lr] = size(img_lr,1);
zooming = h_hr/h_lr;
if nargin < 6
%generate the kernelmap
%compute the similarity from low
coef = 10;
sqrt_low = IF1_SimilarityEvaluation(img_lr);
similarity_low = exp(-sqrt_low*coef);
%model the directional Gaussian kernel
[h w] = size(similarity_low);
options = optimset('Display','iter','TolFun',0.001,'TolX',0.1);
initial_sigmax = 1;
initial_sigmay = 1;
initial_theta = 0;
initial_variable = [initial_sigmax, initial_sigmay, initial_theta];
kernel = zeros(8); %the number depends on te zooming
kernelmap = zeros(8,8,h,w);
for rl=1:h
for cl=1:w
%solve the optimization problem for each position
zvalue16points = similarity_low(rl,cl,:);
[x fval]= fminsearch(@(x) IF3_OptProblem(x,zvalue16points), initial_variable, options);
sigma_x = x(1);
sigma_y = x(2);
theta = x(3);
a = cos(theta)^2/2/sigma_x^2 + sin(theta)^2/2/sigma_y^2;
b = -sin(2*theta)/4/sigma_x^2 + sin(2*theta)/4/sigma_y^2 ;
c = sin(theta)^2/2/sigma_x^2 + cos(theta)^2/2/sigma_y^2;
%create the kernel map by parameter a b c
for s = 1:12
yi = s-6.5;
for t = 1:12
xi = t-6.5;
kernel(s,t) = exp(-(a*xi^2 + 2*b*xi*yi + c*yi^2));
end
end
sumvalue = sum(kernel(:));
kernel_normal = kernel / sumvalue;
kernelmap(:,:,rl,cl) = kernel_normal;
end
end
end
for i=1:iternum
img_lr_gen = F19_GenerateLRImage_BlurSubSample(img_hr,zooming,Gau_sigma);
diff_lr = img_lr - img_lr_gen;
term_diff_lr_SSD = sum(sum(diff_lr.^2));
%here, change the kernel pixel by pixel
diff_hr = Upsample(diff_lr,zooming, kernelmap);
% diff_hr = imresize(diff_lr,zooming,'bilinear');
img_hr = img_hr + diff_hr;
img_lr_new = F19_GenerateLRImage_BlurSubSample(img_hr,zooming,Gau_sigma);
diff_lr_new = img_lr - img_lr_new;
term_diff_lr_SSD_afteronebackprojection = sum(sum(diff_lr_new.^2));
if bReport
fprintf('backproject iteration=%d, term_before=%0.1f, term_after=%0.1f\n', ...
i,term_diff_lr_SSD,term_diff_lr_SSD_afteronebackprojection);
end
end
img_bp = img_hr;
end
%model kernelmap as Gaussian?
function diff_hr = Upsample(diff_lr,zooming, kernelmap)
[h w] = size(diff_lr);
for r=1:h
for c=1:w
end
end
end
function SqrtData = IF1_SimilarityEvaluation(Img_in)
[h w] = size(Img_in);
SqrtData = zeros(h,w,16);
f3x3 = ones(3);
for i = 1:16
[DiffOp N] = IF2_RetGradientKernel16(i); %this may be better if there are 32 samples
if N == 1
Diff = imfilter(Img_in,DiffOp{1},'symmetric');
else
Diff1 = imfilter(Img_in,DiffOp{1},'symmetric');
Diff2 = imfilter(Img_in,DiffOp{2},'symmetric');
Diff = (Diff1+Diff2)/2;
end
Sqr = Diff.^2;
Sum = imfilter(Sqr,f3x3,'replicate');
Mean = Sum/9;
SqrtData(:,:,i) = sqrt(Mean);
end
end
function [DiffOp N] = IF2_RetGradientKernel16(dir)
DiffOp = cell(2,1);
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
switch dir
case 1
N = 1;
DiffOp{1} = f{1};
DiffOp{2} = [];
case 2
N = 2;
DiffOp{1} = f{1};
DiffOp{2} = f{2};
case 3
N = 1;
DiffOp{1} = f{2};
DiffOp{2} = [];
case 4
N = 2;
DiffOp{1} = f{2};
DiffOp{2} = f{3};
case 5
N = 1;
DiffOp{1} = f{3};
DiffOp{2} = [];
case 6
N = 2;
DiffOp{1} = f{3};
DiffOp{2} = f{4};
case 7
N = 1;
DiffOp{1} = f{4};
DiffOp{2} = [];
case 8
N = 2;
DiffOp{1} = f{4};
DiffOp{2} = f{5};
case 9
N = 1;
DiffOp{1} = f{5};
DiffOp{2} = [];
case 10
N = 2;
DiffOp{1} = f{5};
DiffOp{2} = f{6};
case 11
DiffOp{1} = f{6};
DiffOp{2} = [];
N = 1;
case 12
N = 2;
DiffOp{1} = f{6};
DiffOp{2} = f{7};
case 13
N = 1;
DiffOp{1} = f{7};
DiffOp{2} = [];
case 14
N = 2;
DiffOp{1} = f{7};
DiffOp{2} = f{8};
case 15
DiffOp{1} = f{8};
DiffOp{2} = [];
N = 1;
case 16
N = 2;
DiffOp{1} = f{8};
DiffOp{2} = f{1};
end
end
function value = IF3_OptProblem(x,zvalue16points)
sigma_x = x(1);
sigma_y = x(2);
theta = x(3);
a = cos(theta)^2/2/sigma_x^2 + sin(theta)^2/2/sigma_y^2;
b = -sin(2*theta)/4/sigma_x^2 + sin(2*theta)/4/sigma_y^2 ;
c = sin(theta)^2/2/sigma_x^2 + cos(theta)^2/2/sigma_y^2;
value = 0;
for i=1:16
[xi yi] = IF4_GetXiYi(i);
diff = zvalue16points(i) - exp(- (a*xi^2 + 2*b*xi*yi + c*yi^2));
value = value + diff^2;
end
end
function [xi yi] = IF4_GetXiYi(i)
switch i
case 1
xi = 1;
yi = 0;
case 2
xi = 1;
yi = -0.5;
case 3
xi = 1;
yi = -1;
case 4
xi = 0.5;
yi = -1;
case 5
xi = 0;
yi = -1;
case 6
xi = -0.5;
yi = -1;
case 7
xi = -1;
yi = -1;
case 8
xi = -1;
yi = -0.5;
case 9
xi = -1;
yi = 0;
case 10
xi = -1;
yi = 0.5;
case 11
xi = -1;
yi = 1;
case 12
xi = -0.5;
yi = 1;
case 13
xi = 0;
yi = 1;
case 14
xi = 0.5;
yi = 1;
case 15
xi = 1;
yi = 1;
case 16
xi = 1;
yi = 0.5;
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F14c_Img2Grad_fast_suppressboundary.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F14c_Img2Grad_fast_suppressboundary.m
| 2,487 |
utf_8
|
dfc73eaa1e6c8da2acc4a7384ee4802a
|
%Chih-Yuan Yang
%03/05/13
%add class control
%F14b: improve the speed, the boudnary is inaccurate
%F14c: resolve the boundary problem
function grad = F14c_Img2Grad_fast_suppressboundary(img)
[h, w] = size(img);
grad = zeros(h,w,8);
rsup = cell(2,1);
csup = cell(2,1);
for i=1:8
switch i
case 1 %right
rs = 0;
cs = 1;
rsup{1} = 'all';
csup{1} = w;
supnumber = 1;
case 2 %top right
rs = -1;
cs = 1;
rsup{1} = 'all';
csup{1} = w;
rsup{2} = 1;
csup{2} = 'all';
supnumber = 2;
case 3 %top
rs = -1;
cs = 0;
rsup{1} = 1;
csup{1} = 'all';
supnumber = 1;
case 4 %top left
rs = -1;
cs = -1;
rsup{1} = 1;
csup{1} = 'all';
rsup{2} = 'all';
csup{2} = 1;
supnumber = 2;
case 5 %left
rs = 0;
cs = -1;
rsup{1} = 'all';
csup{1} = 1;
supnumber = 1;
case 6 %left bottom
rs = 1;
cs = -1;
rsup{1} = 'all';
csup{1} = 1;
rsup{2} = h;
csup{2} = 'all';
supnumber = 2;
case 7 %bottom
rs = 1;
cs = 0;
rsup{1} = h;
csup{1} = 'all';
supnumber = 1;
case 8 %bottom right
rs = 1;
cs = 1;
rsup{1} = h;
csup{1} = 'all';
rsup{2} = 'all';
csup{2} = w;
supnumber = 2;
end
grad(:,:,i) = circshift(img,[-rs,-cs]) - img ; %correct
%suppress the boundary
for supidx = 1:supnumber
if ischar(rsup{supidx}) && strcmp(rsup{supidx},'all')
c = csup{supidx};
grad(:,c,i) = 0;
end
if ischar(csup{supidx}) && strcmp(csup{supidx},'all')
r = rsup{supidx};
grad(r,:,i) = 0;
end
end
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F16d_ACCV12TextureGradientNotIntensity.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F16d_ACCV12TextureGradientNotIntensity.m
| 21,023 |
utf_8
|
d5f8b5f2fa7dd109fd9f1bad6a31d0ce
|
%Chih-Yuan Yang
%10/04/12
%16d: only run patch selection because the edge part has been separated
function [gradients_texture img_texture img_texture_backproject] = F16d_ACCV12TextureGradientNotIntensity(img_y, zooming, Gau_sigma ,sfall,srecall,allHRexampleimages,allLRexampleimages)
if zooming == 4
para.Gau_sigma = 1.6;
elseif zooming == 3
para.Gau_sigma = 1.2;
end
[h_lr w_lr] = size(img_y);
para.lh = h_lr;
para.lw = w_lr;
para.NumberOfHCandidate = 10;
para.SimilarityFunctionSettingNumber = 1;
%load all data set to save loading time
[scanr scanra] = SearchExternalPatches(img_y,para,sfall,srecall);
para.zooming = zooming;
para.ps = 5;
para.Gau_sigma = Gau_sigma;
hrpatch = F8_ExtractAllHrPatches(img_y, para, scanr,allHRexampleimages,allLRexampleimages);
[scanr_self scanra_self] = F22_SearchForSelfSimilarPatchesL2Norm(img_y,para);
para.ehrfKernelWidth = 1.0;
para.bEnablemhrf = true;
[img_texture weightmap_texture] = F11_FilterOutImproperHrPatches(img_y,hrpatch,para,scanr_self,scanra_self,scanr,scanra);
%apply backprojection on img_texture only
iternum = 100;
breport = true;
disp('backprojection for img_texture in ACCV12');
img_texture_backproject = F11c_BackProjection_GaussianKernel(img_y, img_texture, Gau_sigma, iternum,breport);
%extract the graident
gradients_texture = Img2Grad(img_texture_backproject);
end
function [scanr scanra] = SearchExternalPatches(img_y,para,sfall,srecall)
%how to search parallelly to speed up?
ps = 5; %patch size
[lh lw] = size(img_y);
hrpatchnumber = 10;
%featurefolder = para.featurefolder;
sh = GetShGeneral(ps);
scanr = zeros(6,hrpatchnumber,lh-ps+1,lw-ps+1); %scan results, mm, quan, ii, sr, sc, similairty
smallvalue = -1;
scanr(6,:,:,:) = smallvalue;
scanra = zeros(lh-ps+1,lw-ps+1); %scan results active
%scanrsimmax = smallvalue * ones(lh-ps+1,lw-ps+1); %del this line?
quanarray = [1 2 4 8 16 32];
B = [256 128 64 32 16 8];
imlyi = im2uint8(img_y);
for qidx=1:6
quan = quanarray(qidx);
b = B(qidx);
cur_initial = floor(size(sfall{1},2)/2); %accelerate the loop by using an initial position
for rl=1:lh-ps+1
fprintf('look for lut rl:%d quan:%d\n',rl,quan);
for cl = 1:lw-ps+1
patch = imlyi(rl:rl+ps-1,cl:cl+ps-1);
fq = patch(sh);
if qidx == 1
fquan = fq;
else
fquan = fq - mod(fq,quan) + quan/2;
end
[iila mma] = LookForLookUpTable9_External(fquan,sfall{qidx},cur_initial,para); %index in lookuptable
in = length(iila); %always return 20 instance
for i=1:in
ii = srecall{qidx}(1,iila(i));
sr = srecall{qidx}(2,iila(i));
sc = srecall{qidx}(3,iila(i));
%check whether the patch is in the scanr already
bSamePatch = false;
for j=1:scanra(rl,cl)
if ii == scanr(3,j,rl,cl) && sr == scanr(4,j,rl,cl) && sc == scanr(5,j,rl,cl)
bSamePatch = true;
break
end
end
if bSamePatch == false
similarity = bmm2similarity(b,mma(i),para.SimilarityFunctionSettingNumber);
if scanra(rl,cl) < hrpatchnumber
ix = scanra(rl,cl) + 1;
%to do: update scanr by similarity
%need to double it, otherwise, the int6 will kill similarity
scanr(:,ix,rl,cl) = cat(1,mma(i),quan,double(ii),double(sr),double(sc),similarity);
scanra(rl,cl) = ix;
else
[minval ix] = min(scanr(6,:,rl,cl));
if scanr(6,ix,rl,cl) < similarity
%update
scanr(:,ix,rl,cl) = cat(1,mma(i),quan,double(ii),double(sr),double(sc),similarity);
end
end
end
end
end
end
end
end
function [iila mma] = LookForLookUpTable9_External(fq,lut,cur_initial,para)
hrpatchnumber = para.NumberOfHCandidate; %default 10
fl = length(fq); %feature length
head = 1;
tail = size(lut,2);
lutsize = size(lut,2);
if exist('cur_initial','var')
if cur_initial > lutsize
cur = lutsize;
else
cur = cur_initial;
end
else
cur = round(lutsize/2);
end
cur_rec1 = cur;
%initial comparison
fqsmaller = -1;
fqlarger = 1;
fqsame = 0;
cr = 0; %compare results
mm = 0;
mmiil = 0;
%search for the largest mm
while 1
for c=1:fl
if fq(c) < lut(c,cur)
cr = fqsmaller;
break
elseif fq(c) > lut(c,cur)
cr = fqlarger;
break; %c moves to next
else %equal
cr = fqsame;
if mm < c
mm = c;
mmiil = cur;
end
end
end
if cr == fqsmaller
next = floor((cur + head)/2);
tail = cur; %adjust the range of head and tail
elseif cr == fqlarger;
next = ceil((cur + tail)/2); %the round function has to be floor, because fq is larger than cur
%otherwise the fully 255 patches will never match
head = cur; %adjust the range of head and tail
end
if mm == 25 %it happens, the initial one match the fq, therefore, there is no next defined.
break
end
if cur == next || cur_rec1 == next %the next might oscilate
break;
else
cur_rec1 = cur;
cur = next;
end
%fprintf('cur %d\n',cur);
end
if mm == 0
iila = [];
mma = [];
return
end
%post-process to find the repeated partial vectors
%search for previous
idx = 1;
iila = zeros(hrpatchnumber,1);
mma = zeros(hrpatchnumber,1);
iila(idx) = mmiil;
mma(idx) = mm;
bprecontinue = true;
bproccontinue = true;
presh = 0; %previous shift
procsh = 0; %proceeding shift
while 1
presh = presh -1;
iilpre = mmiil + presh;
if iilpre <1
bprecontinue = false;
premm = 0;
end
procsh = procsh +1;
iilproc = mmiil + procsh;
if iilproc > lutsize
bproccontinue = false;
procmm = 0;
end
if bprecontinue
diff = lut(:,iilpre) ~= fq;
if nnz(diff) == 0
premm = 25;
else
premm = find(diff,1,'first') -1;
end
end
if bproccontinue
diff = lut(:,iilproc) ~= fq;
if nnz(diff) == 0
procmm = 25;
else
procmm = find(diff,1,'first') -1;
end
end
if premm == 0 && procmm == 0
break
end
if premm > procmm
%add pre item
idx = idx + 1;
iila(idx) = iilpre;
mma(idx) = premm;
%pause the proc
bprecontinue = true;
elseif premm < procmm
%add proc item
idx = idx + 1;
iila(idx) = iilproc;
mma(idx) = procmm;
%pause the pre
bproccontinue = true;
else %premm == procmm
%add both item
idx = idx + 1;
iila(idx) = iilpre;
mma(idx) = premm;
if idx == hrpatchnumber
break
end
idx = idx + 1;
iila(idx) = iilproc;
mma(idx) = procmm;
bproccontinue = true;
bprecontinue = true;
end
if idx == hrpatchnumber
break
end
end
if idx < hrpatchnumber
iila = iila(1:idx);
mma = mma(1:idx);
end
end
function s = bmm2similarity(b,mm,SimilarityFunctionSettingNumber)
if SimilarityFunctionSettingNumber == 1
if mm >= 9
Smm = 0.9 + 0.1*(mm-9)/16;
else
Smm = 0.5 * mm/9;
end
Sb = 0.5+0.5*(log2(b)-3)/5;
s = Sb * Smm;
elseif SimilarityFunctionSettingNumber == 2
Smm = mm/25;
Sb = (log2(b)-2)/6;
s = Sb * Smm;
end
end
function hrpatch = F8_ExtractAllHrPatches(img_y, para, scanr,allHRexampleimages,allLRexampleimages)
disp('extracting HR patches');
%how to search parallelly to speed up?
psh = para.ps * para.zooming;
ps = para.ps;
lh = para.lh;
lw = para.lw;
s = para.zooming;
hrpatchnumber = para.NumberOfHCandidate;
hrpatch = zeros(psh,psh,lh-ps+1,lw-ps+1,hrpatchnumber);
allimages = allHRexampleimages;
%analyize which images need to be loaded
alliiset = scanr(3,:,:,:);
alliiset_uni = unique(alliiset(:)); %allmost all images are used, from 1 to 1500
if alliiset_uni(1) ~= 0
alliiset_uni_pure = alliiset_uni;
else
alliiset_uni_pure = alliiset_uni(2:end);
end
for i = 1:length(alliiset_uni_pure)
ii = alliiset_uni_pure(i);
fprintf('extracting image %d\n',ii);
exampleimage_hr = im2double(allimages(:,:,ii));
exampleimage_lr = allLRexampleimages(:,:,ii);
%exampleimage_lr = U3_GenerateLRImage_BlurSubSample(exampleimage_hr,para.zooming,para.Gau_sigma);
match_4D = alliiset == ii;
match_3D = reshape(match_4D,hrpatchnumber,lh-ps+1,lw-ps+1); %remove the first dimension
[d1 d2 d3] = size(match_3D); %second dimention length
[idxset posset] = find(match_3D);
setin = length(idxset);
for j = 1:setin
idx = idxset(j);
possum = posset(j);
pos3 = floor( (possum-1)/d2) +1; %the relationship: possum = (pos3-1) * d2 + pos2, pos2 in (1,d2)
pos2 = possum - (pos3-1)*d2;
rl = pos2;
cl = pos3;
sr = scanr(4,idx,rl,cl);
sc = scanr(5,idx,rl,cl);
srh = (sr-1)*s+1;
srh1 = srh + psh -1;
sch = (sc-1)*s+1;
sch1 = sch + psh-1;
%to do: compensate the HR patch to match the LR query patch
hrp = exampleimage_hr(srh:srh1,sch:sch1); %HR patch
lrq = img_y(rl:rl+ps-1,cl:cl+ps-1); %LR query patch
lrr = exampleimage_lr(sr:sr+ps-1,sc:sc+ps-1); %LR retrieved patch
chrp = hrp + imresize(lrq - lrr,s,'bilinear'); %compensate HR patch
hrpatch(:,:,rl,cl,idx) = chrp;
bVisuallyCheck = false;
if bVisuallyCheck
if ~exist('hfig','var')
hfig = figure;
else
figure(hfig);
end
subplot(1,4,1);
imshow(hrp/255);
title('hrp');
subplot(1,4,2);
imshow(lrr/255);
title('lrr');
subplot(1,4,3);
imshow(lrq/255);
title('lrq');
subplot(1,4,4);
imshow(chrp/255);
title('chrp');
keyboard
end
end
end
end
function [img_texture Reliablemap] = F11_FilterOutImproperHrPatches(img_y,hrpatch,para,scanr_self,scanra_self,scanr,scanra)
%filter out improper hr patches using similarity among lr patches
%load the self-similar data
s = para.zooming;
lh = para.lh;
lw = para.lw;
ps = para.ps;
psh = s * para.ps;
patcharea = para.ps^2;
SSnumberUpperbound = 10;
%do I still need these variables?
cqarray = zeros(32,1)/0;
for qidx = 1:6
quan = 2^(qidx-1);
cqvalue = 0.9^(qidx-1);
cqarray(quan) = cqvalue;
end
hh = lh * s;
hw = lw * s;
hrres_nomi = zeros(hh,hw);
hrres_deno = zeros(hh,hw);
maskmatrix = false(psh,psh,patcharea);
Reliablemap = zeros(hh,hw);
pshs = psh * psh;
for i=1:patcharea
[sh_notsued masklow maskhigh] = GetShGeneral(ps,i,true,s); %ps, mm, bhigh, s
maskmatrix(:,:,i) = maskhigh;
end
mhr = zeros(5*s);
r1 = 2*s+1;
r2 = 3*s;
c1 = 2*s+1;
c2 = 3*s;
mhr(r1:r2,c1:c2) = 1; %the central part
sigma = para.ehrfKernelWidth;
kernel = F20_Sigma2Kernel(sigma);
if para.bEnablemhrf
mhrf = imfilter(mhr,kernel,'replicate');
else
mhrf = mhr;
end
noHmap = scanra == 0;
noHmapToFill = noHmap;
NHOOD = [0 1 0;
1 1 1;
0 1 0];
se = strel('arbitrary',NHOOD);
noHmapneighbor = and( imdilate(noHmap,se) ,~noHmap);
%if the noHmapsever is 0, it is fine
imb = imresize(img_y,s); %use it as the reference if no F is available
rsa = [0 -1 0 1];
csa = [1 0 -1 0];
for rl= 1:lh-ps+1 %75
fprintf('rl:%d total:%d\n',rl,lh-ps+1);
rh = (rl-1)*s+1;
rh1 = rh+psh-1;
for cl = 1:lw-ps+1 %128
ch = (cl-1)*s+1;
ch1 = ch+psh-1;
%load candidates
hin = para.NumberOfHCandidate;
H = zeros(psh,psh,hin);
HSim = zeros(hin,1);
for j=1:hin
H(:,:,j) = hrpatch(:,:,rl,cl,j); %H
HSim(j) = scanr(6,j,rl,cl);
end
%compute the number of reference patches
sspin = min(SSnumberUpperbound,scanra_self(rl,cl));
%self similar patch instance number
F = zeros(ps,ps,sspin);
FSimPure = zeros(1,sspin);
rin = 0;
for i=1:sspin
sr = scanr_self(3,i,rl,cl);
sc = scanr_self(4,i,rl,cl);
%hr candidate number
rin = rin + para.NumberOfHCandidate;
F(:,:,i) = img_y(sr:sr+ps-1,sc:sc+ps-1);
FSimPure(i) = scanr_self(5,i,rl,cl);
end
%load all of the two step patches
R = zeros(psh,psh,rin);
mms = zeros(rin,1);
mmr = zeros(rin,1);
qs = zeros(rin,1);
qr = zeros(rin,1);
FSimBaseR = zeros(rin,1);
RSim = zeros(rin,1);
idx = 0;
if sspin > 0
for i=1:sspin %sspin is the Fin
sr = scanr_self(3,i,rl,cl);
sc = scanr_self(4,i,rl,cl);
%hr candidate number
hrcanin = para.NumberOfHCandidate;
for j=1:hrcanin
idx = idx + 1;
R(:,:,idx) = hrpatch(:,:,sr,sc,j);
mms(idx) = scanr_self(1,i,rl,cl);
qs(idx) = scanr_self(2,i,rl,cl);
mmr(idx) = scanr(1,j,sr,sc);
qr(idx) = scanr(2,j,sr,sc);
FSimBaseR(idx) = FSimPure(i);
RSim(idx) = scanr(6,j,sr,sc);
end
end
else
idx = 1;
rin = 1; %use bicubic
R(:,:,idx) = imb(rh:rh1,ch:ch1);
FSimBaseR(idx) = 1;FSimPure(i);
end
%here is a question, how to define the similarity between H and R?
%L2norm?
hscore = zeros(hin,1);
for i=1:hin
theH = H(:,:,i);
for j=1:rin
theR = R(:,:,j);
spf = FSimBaseR(j);
%similarity between H and R
diff = theH - theR;
L2N = norm(diff(:));
shr = exp(- L2N/pshs);
hscore(i) = hscore(i) + shr*spf;
end
end
[maxscore idx] = max(hscore);
%take this as the example
Reliablemap(rh:rh1,ch:ch1) = Reliablemap(rh:rh1,ch:ch1) + HSim(idx)*mhrf;
if hin > 0 %some patches can't find H
hrres_nomi(rh:rh1,ch:ch1) = hrres_nomi(rh:rh1,ch:ch1) + H(:,:,idx).*mhrf;
hrres_deno(rh:rh1,ch:ch1) = hrres_deno(rh:rh1,ch:ch1) + mhrf;
end
%if any of its neighbor belongs to noHmap, copy additional region to hrres
%if the pixel belongs to noHmapneighbor, then expand the copy regions
if noHmapneighbor(rl,cl) == true
mhrfspecial = zeros(5*s);
mhrfspecial(r1:r2,c1:c2) = 1;
for i=1:4
rs = rsa(i);
cs = csa(i);
checkr = rl+rs;
checkc = cl+cs;
if checkr > 0 && checkr < lh-ps+1 && checkc >0 && checkc <lw-ps+1 && noHmapToFill(checkr,checkc)
%recompute the mhrf and disable the noHmapToFill
noHmapToFill(checkr,checkc) = false;
switch i
case 1
mhrfspecial(r1:r2,c1+s:c2+s) = 1;
case 2
mhrfspecial(r1-s:r2-s,c1:c2) = 1;
case 3
mhrfspecial(r1:r2,c1-s:c2-s) = 1;
case 4
mhrfspecial(r1+s:r2+s,c1:c2) = 1;
end
end
end
mhrfspeical = imfilter(mhrfspecial,kernel,'replicate');
hrres_nomi(rh:rh1,ch:ch1) = hrres_nomi(rh:rh1,ch:ch1) + H(:,:,idx).*mhrfspeical;
hrres_deno(rh:rh1,ch:ch1) = hrres_deno(rh:rh1,ch:ch1) + mhrfspeical;
end
end
end
hrres = hrres_nomi ./hrres_deno;
exception = isnan(hrres);
hrres_filtered = hrres;
hrres_filtered(exception) = 0;
img_texture = (hrres_filtered .* (1-exception) + imb .*exception);
end
function [scanr_self scanra_self] = F22_SearchForSelfSimilarPatchesL2Norm(img_y,para)
ps = para.ps;
patcharea = ps^2;
[lh lw] = size(img_y);
%Find self similar patches
Fpatchnumber = 10;
scanr_self = zeros(5,Fpatchnumber,lh-ps+1,lw-ps+1); %scan results: mm, quan, r,c, similarity
scanra_self = Fpatchnumber * ones(lh-ps+1,lw-ps+1); %scan results active
in = (lh-ps+1)*(lw-ps+1);
fs = zeros(patcharea,in);
rec = zeros(2,in);
idx = 0;
for rl=1:lh-ps+1
for cl=1:lw-ps+1
idx = idx + 1;
rec(:,idx) = [rl;cl];
fs(:,idx) = reshape(img_y(rl:rl+ps-1,cl:cl+ps-1),patcharea,1);
end
end
%search
idx = 0;
for rl=1:lh-ps+1
for cl=1:lw-ps+1
idx = idx + 1;
fprintf('idx %d in %d\n',idx,in);
qf = fs(:,idx);
diff = fs - repmat(qf,1,in);
sqr = sum(diff.^2);
[ssqr ix] = sort(sqr);
saveidx = 0;
for j=1:11
indexinsort = ix(j);
sr = rec(1,indexinsort);
sc = rec(2,indexinsort);
if sr ~= rl || sc ~= cl
saveidx = saveidx + 1;
l2norm = sqrt(ssqr(j));
similarity = exp(-l2norm/25);
scanr_self(:,saveidx,rl,cl) = cat(1,-1,-1,sr,sc,similarity);
end
end
end
end
end
%this function may be replaced by IF1a
function Grad = Img2Grad(img)
[h w] = size(img);
Grad = zeros(h,w,8);
DiffOp = RetGradientKernel();
for i=1:8
Grad(:,:,i) = imfilter(img,DiffOp{i},'replicate');
end
end
function f = RetGradientKernel()
f = cell(8,1);
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
end
|
github
|
Liusifei/Face-Hallucination-master
|
U13_ComputeTransformMatrix.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U13_ComputeTransformMatrix.m
| 1,261 |
utf_8
|
e029340afaef26a58f7273c0495d4509
|
%08/31/12
%Chih-Yuan Yang
%align and two points, no more restriction that the two eyes are horizontal
function transformmatrix = U13_ComputeTransformMatrix(input_points, base_points)
x1 = input_points(1,1);
y1 = input_points(1,2);
x2 = input_points(2,1);
y2 = input_points(2,2);
xb1 = base_points(1,1);
yb1 = base_points(1,2);
xb2 = base_points(2,1);
yb2 = base_points(2,2);
%compute the angle of input_points from base_points
theta_input = atan((y2-y1)/(x2-x1)); %note there is a negative sign to rotate closewise
theta_base = atan((yb2-yb1)/(xb2-xb1));
theta_change = theta_base - theta_input;
%compute the scaling factor
db = sqrt((xb2-xb1)^2 + (yb2-yb1)^2);
d = sqrt((x2-x1)^2 + (y2-y1)^2);
lambda = db/d;
%1: shift to origin
%2: rotate
%3: scaling
%4: shift to base points
m1 = [1 0 -x1;
0 1 -y1;
0 0 1];
m2 = [cos(theta_change) -sin(theta_change) 0;
sin(theta_change) cos(theta_change) 0;
0 0 1];
m3 = [lambda 0 0;
0 lambda 0;
0 0 1];
m4 = [1 0 xb1;
0 1 yb1;
0 0 1];
transformmatrix = m4*m3*m2*m1;
end
|
github
|
Liusifei/Face-Hallucination-master
|
F19b_GenerateLRImage_jpeg.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F19b_GenerateLRImage_jpeg.m
| 2,021 |
utf_8
|
0deb75d0b2c032cd97fde6ff5c26c7a5
|
%Chih-Yuan Yang
%09/28/12
%Change the method of subsampling
%Sifei Liu
%02/22/13
%Generating compressed LR image
% hrimg: estimated HR image;
% s: zooming factor
% sigma: gaussian scale
% quality: jpeg compress quality
function lrimg = F19b_GenerateLRImage_jpeg(hrimg,s,sigma,quality)
if isa(hrimg,'uint8')
hrimg = im2double(hrimg);
end
[h w d] = size(hrimg);
htrim = h-mod(h,s);
wtrim = w-mod(w,s);
imtrim = hrimg(1:htrim,1:wtrim,1:d);
h_lr = htrim/s;
w_lr = wtrim/s;
%detect image type
if s == 3 %or any odd number
kernel = F20_Sigma2Kernel(sigma);
if d == 1
blurimg = imfilter(imtrim,kernel,'replicate');
elseif d == 3
blurimg = zeros(htrim,wtrim,d);
for i=1:3
blurimg(:,:,i) = imfilter(imtrim(:,:,i),kernel,'replicate');
end
end
lrimg = imresize(blurimg,1/s,'nearest');
elseif s == 4
kernelsize = ceil(sigma*3)*2+2;
kernel = fspecial('gaussian',kernelsize,sigma);
if d == 1
blurimg = imfilter(imtrim,kernel,'replicate');
elseif d == 3
blurimg = zeros(htrim,wtrim,d);
for i=1:3
blurimg(:,:,i) = imfilter(imtrim(:,:,i),kernel,'replicate');
end
end
lrimg = zeros(h_lr,w_lr,d);
for didx = 1:d
for rl=1:h_lr
r_hr_sample = (rl-1)*s+2; %the shift is the key issue, because the effect of imfilter using a kernel
%shapened in even number width is equivalent to a 0.5 pixel shift in the
%original image
for cl = 1:w_lr
c_hr_sample = (cl-1)*s+2;
lrimg(rl,cl,didx) = blurimg(r_hr_sample,c_hr_sample,didx);
end
end
end
end
% JPEG compression
lrimg = im2jpeg(lrimg,quality);
lrimg = lrimg.image;
end
|
github
|
Liusifei/Face-Hallucination-master
|
F6a_RetriveAreaGradientsByAlign_TwoPoints.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F6a_RetriveAreaGradientsByAlign_TwoPoints.m
| 2,320 |
utf_8
|
f8707eebc9c2bbbdc4384293e2351860
|
%Chih-Yuan Yang
%09/25/12
%change rawexampleimage to uint8 to save memroy
function gradientcandidate = F6a_RetriveAreaGradientsByAlign_TwoPoints(testimage_lr, rawexampleimage, inputpoints, basepoints, region_lr, zooming, Gau_sigma)
%the rawexampleimage should be double
if ~isa(rawexampleimage,'uint8')
error('wrong class');
end
region_hr = F23_ConvertLRRegionToHRRegion(region_lr, zooming);
exampleimagenumber = size(rawexampleimage,3);
%find the transform matrix by solving an optimization problem
alignedexampleimage_hr = zeros(480,640,exampleimagenumber,'uint8'); %set as uint8 to reduce memory demand
alignedexampleimage_lr = zeros(120,160,exampleimagenumber);
parfor i=1:exampleimagenumber
alignedexampleimage_hr(:,:,i) = F18a_AlignExampleImageByTwoPoints(rawexampleimage(:,:,i),inputpoints(:,:,i),basepoints);
%F19 automatically convert uint8 input to double
alignedexampleimage_lr(:,:,i) = F19_GenerateLRImage_BlurSubSample(alignedexampleimage_hr(:,:,i),zooming,Gau_sigma);
end
%crop the region
area_test = im2double(testimage_lr(region_lr.top_idx:region_lr.bottom_idx,region_lr.left_idx:region_lr.right_idx));
%extract feature from the eyerange, the features are the gradient of LR eye region
feature_test = F24_ExtractFeatureFromArea(area_test); %the unit is double
%search for the thousand example images to find the most similar eyerange
normvalue = zeros(exampleimagenumber,1);
parfor j=1:exampleimagenumber
examplearea_lr = alignedexampleimage_lr(region_lr.top_idx:region_lr.bottom_idx,region_lr.left_idx:region_lr.right_idx,j);
feature_example_lr = F24_ExtractFeatureFromArea(examplearea_lr); %the unit is double
normvalue(j) = norm(feature_test - feature_example_lr);
end
%find the small norm
[sortnorm ix] = sort(normvalue);
%some of them are very similar
%mostsimilarindex = ix(1:20);
gradientcandidate = zeros(region_hr.height,region_hr.width,8,1); %the 3rd dim is dx and dy
%parfor j=1:20
j=1;
examplehrregion = alignedexampleimage_hr(region_hr.top_idx:region_hr.bottom_idx,region_hr.left_idx:region_hr.right_idx,ix(j));
gradientcandidate(:,:,:,j) = F14_Img2Grad(im2double(examplehrregion));
%end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F45_SimilarityEvaluation.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F45_SimilarityEvaluation.m
| 4,835 |
utf_8
|
ca3c2e4016d4fe1901981f92c7ea77f0
|
%Chih-Yuan Yang
%11/09/12
%Separate from F27a
function SqrtData = F45_SimilarityEvaluation(Img_in)
[h w] = size(Img_in);
SqrtData = zeros(h,w,16);
f3x3 = ones(3);
for i = 1:16
[DiffOp N] = IF2_RetGradientKernel16(i);
if N == 1
Diff = imfilter(Img_in,DiffOp{1},'symmetric');
else
Diff1 = imfilter(Img_in,DiffOp{1},'symmetric');
Diff2 = imfilter(Img_in,DiffOp{2},'symmetric');
Diff = (Diff1+Diff2)/2;
end
Sqr = Diff.^2;
Sum = imfilter(Sqr,f3x3,'replicate');
Mean = Sum/9;
SqrtData(:,:,i) = sqrt(Mean);
end
end
function [DiffOp N] = IF2_RetGradientKernel16(dir)
DiffOp = cell(2,1);
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
switch dir
case 1
N = 1;
DiffOp{1} = f{1};
DiffOp{2} = [];
case 2
N = 2;
DiffOp{1} = f{1};
DiffOp{2} = f{2};
case 3
N = 1;
DiffOp{1} = f{2};
DiffOp{2} = [];
case 4
N = 2;
DiffOp{1} = f{2};
DiffOp{2} = f{3};
case 5
N = 1;
DiffOp{1} = f{3};
DiffOp{2} = [];
case 6
N = 2;
DiffOp{1} = f{3};
DiffOp{2} = f{4};
case 7
N = 1;
DiffOp{1} = f{4};
DiffOp{2} = [];
case 8
N = 2;
DiffOp{1} = f{4};
DiffOp{2} = f{5};
case 9
N = 1;
DiffOp{1} = f{5};
DiffOp{2} = [];
case 10
N = 2;
DiffOp{1} = f{5};
DiffOp{2} = f{6};
case 11
DiffOp{1} = f{6};
DiffOp{2} = [];
N = 1;
case 12
N = 2;
DiffOp{1} = f{6};
DiffOp{2} = f{7};
case 13
N = 1;
DiffOp{1} = f{7};
DiffOp{2} = [];
case 14
N = 2;
DiffOp{1} = f{7};
DiffOp{2} = f{8};
case 15
DiffOp{1} = f{8};
DiffOp{2} = [];
N = 1;
case 16
N = 2;
DiffOp{1} = f{8};
DiffOp{2} = f{1};
end
end
function [Kernel N] = IF3_GetMoveKernel16(dir)
Kernel = cell(2,1);
f{1} = [0 0 0;
0 0 1;
0 0 0];
f{2} = [0 0 1;
0 0 0;
0 0 0];
f{3} = [0 1 0;
0 0 0;
0 0 0];
f{4} = [1 0 0;
0 0 0;
0 0 0];
f{5} = [0 0 0;
1 0 0;
0 0 0];
f{6} = [0 0 0;
0 0 0;
1 0 0];
f{7} = [0 0 0;
0 0 0;
0 1 0];
f{8} = [0 0 0;
0 0 0;
0 0 1];
switch dir
case 1
N = 1;
Kernel{1} = f{1};
Kernel{2} = [];
case 2
N = 2;
Kernel{1} = f{1};
Kernel{2} = f{2};
case 3
N = 1;
Kernel{1} = f{2};
Kernel{2} = [];
case 4
N = 2;
Kernel{1} = f{2};
Kernel{2} = f{3};
case 5
N = 1;
Kernel{1} = f{3};
Kernel{2} = [];
case 6
N = 2;
Kernel{1} = f{3};
Kernel{2} = f{4};
case 7
N = 1;
Kernel{1} = f{4};
Kernel{2} = [];
case 8
N = 2;
Kernel{1} = f{4};
Kernel{2} = f{5};
case 9
N = 1;
Kernel{1} = f{5};
Kernel{2} = [];
case 10
N = 2;
Kernel{1} = f{5};
Kernel{2} = f{6};
case 11
Kernel{1} = f{6};
Kernel{2} = [];
N = 1;
case 12
N = 2;
Kernel{1} = f{6};
Kernel{2} = f{7};
case 13
N = 1;
Kernel{1} = f{7};
Kernel{2} = [];
case 14
N = 2;
Kernel{1} = f{7};
Kernel{2} = f{8};
case 15
Kernel{1} = f{8};
Kernel{2} = [];
N = 1;
case 16
N = 2;
Kernel{1} = f{8};
Kernel{2} = f{1};
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F27a_SmoothnessPreserving.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F27a_SmoothnessPreserving.m
| 8,143 |
utf_8
|
59cf63fde62f924872cf245e7725f34e
|
%Chih-Yuan Yang
%10/27/12
%Seperate this function from F21, because this function is required in training phase
%F27a: save similarity_lr and similarity_hr to draw figures required for the paper
function img_out = F27a_SmoothnessPreserving(img_y,zooming,Gau_sigma)
img_bb = imresize(img_y,zooming);
%compute the similarity from low
Coef = 10;
PatchSize = 3;
Sqrt_low = IF1_SimilarityEvaluation(img_y); %I may need more directions, 16 may be too small
Similarity_low = exp(-Sqrt_low*Coef);
[h_high w_high] = size(img_bb);
ExpectedSimilarity = zeros(h_high,w_high,16);
%upsamplin the similarity
for dir=1:16
ExpectedSimilarity(:,:,dir) = imresize(Similarity_low(:,:,dir),zooming,'bilinear');
end
folder_project = fileparts(fileparts(pwd));
folder_save = fullfile(folder_project,'PaperWriting','CVPR13','manuscript','figs','Illustration','SmoothnessPreservingUpsampling');
for i=1:16
fn_save = sprintf('Similarity_lr_%d.png',i);
hfig = figure;
imagesc(Similarity_low(:,:,i));
caxis([0,1]);
axis off image
saveas(hfig,fullfile(folder_save,fn_save));
close
fn_save = sprintf('Similarity_hr_%d.png',i);
hfig = figure;
imagesc(ExpectedSimilarity(:,:,i));
caxis([0,1]);
axis off image
saveas(hfig,fullfile(folder_save,fn_save));
close
end
%refind the Grad_high by Similarity_high
LoopNumber = 10;
img = img_bb;
for loop = 1:LoopNumber
%refine gradient by ExpectedSimilarity
ValueSum = zeros(h_high,w_high);
WeightSum = sum(ExpectedSimilarity,3); %if thw weight sum is low, it is unsuitable to generate the grad by interpolation
for dir = 1:16
[MoveOp N] = IF3_GetMoveKernel16(dir);
if N == 1
MovedData = imfilter(img,MoveOp{1},'replicate');
else %N ==2
MovedData1 = imfilter(img,MoveOp{1},'replicate');
MovedData2 = imfilter(img,MoveOp{2},'replicate');
MovedData = (MovedData1 + MovedData2)/2;
end
Product = MovedData .* ExpectedSimilarity(:,:,dir);
ValueSum = ValueSum + Product;
end
I = ValueSum ./ WeightSum;
%intensity compensate
diff_lr = F19c_GenerateLRImage_GaussianKernel(I,zooming,Gau_sigma) - img_y;
diff_hr = F26_UpsampleAndBlur(diff_lr,zooming,Gau_sigma);
Grad0 = diff_hr;
Term_LowHigh_in = F28_ComputeSquareSumLowHighDiff(I,img_y,Gau_sigma);
I_in = I; %make a copy, restore the value if all beta fails
bDecrease = false;
%should I use the strict constraint?
tau = 0.2;
for line_search_loop=1:10
%line search for the beta, fixed 1/32 is not a good choice
I = I_in - tau * Grad0;
Term_LowHigh_out = F28_ComputeSquareSumLowHighDiff(I,img_y,Gau_sigma);
if Term_LowHigh_out < Term_LowHigh_in
bDecrease = true;
break;
else
tau = tau * 0.5;
end
end
if bDecrease == true
I_best = I;
else
break;
end
img = I_best;
end
img_out = img;
end
function SqrtData = IF1_SimilarityEvaluation(Img_in,PatchSize)
[h w] = size(Img_in);
SqrtData = zeros(h,w,16);
f3x3 = ones(3);
for i = 1:16
[DiffOp N] = IF2_RetGradientKernel16(i);
if N == 1
Diff = imfilter(Img_in,DiffOp{1},'symmetric');
else
Diff1 = imfilter(Img_in,DiffOp{1},'symmetric');
Diff2 = imfilter(Img_in,DiffOp{2},'symmetric');
Diff = (Diff1+Diff2)/2;
end
Sqr = Diff.^2;
Sum = imfilter(Sqr,f3x3,'replicate');
Mean = Sum/9;
SqrtData(:,:,i) = sqrt(Mean);
end
end
function [DiffOp N] = IF2_RetGradientKernel16(dir)
DiffOp = cell(2,1);
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
switch dir
case 1
N = 1;
DiffOp{1} = f{1};
DiffOp{2} = [];
case 2
N = 2;
DiffOp{1} = f{1};
DiffOp{2} = f{2};
case 3
N = 1;
DiffOp{1} = f{2};
DiffOp{2} = [];
case 4
N = 2;
DiffOp{1} = f{2};
DiffOp{2} = f{3};
case 5
N = 1;
DiffOp{1} = f{3};
DiffOp{2} = [];
case 6
N = 2;
DiffOp{1} = f{3};
DiffOp{2} = f{4};
case 7
N = 1;
DiffOp{1} = f{4};
DiffOp{2} = [];
case 8
N = 2;
DiffOp{1} = f{4};
DiffOp{2} = f{5};
case 9
N = 1;
DiffOp{1} = f{5};
DiffOp{2} = [];
case 10
N = 2;
DiffOp{1} = f{5};
DiffOp{2} = f{6};
case 11
DiffOp{1} = f{6};
DiffOp{2} = [];
N = 1;
case 12
N = 2;
DiffOp{1} = f{6};
DiffOp{2} = f{7};
case 13
N = 1;
DiffOp{1} = f{7};
DiffOp{2} = [];
case 14
N = 2;
DiffOp{1} = f{7};
DiffOp{2} = f{8};
case 15
DiffOp{1} = f{8};
DiffOp{2} = [];
N = 1;
case 16
N = 2;
DiffOp{1} = f{8};
DiffOp{2} = f{1};
end
end
function [Kernel N] = IF3_GetMoveKernel16(dir)
Kernel = cell(2,1);
f{1} = [0 0 0;
0 0 1;
0 0 0];
f{2} = [0 0 1;
0 0 0;
0 0 0];
f{3} = [0 1 0;
0 0 0;
0 0 0];
f{4} = [1 0 0;
0 0 0;
0 0 0];
f{5} = [0 0 0;
1 0 0;
0 0 0];
f{6} = [0 0 0;
0 0 0;
1 0 0];
f{7} = [0 0 0;
0 0 0;
0 1 0];
f{8} = [0 0 0;
0 0 0;
0 0 1];
switch dir
case 1
N = 1;
Kernel{1} = f{1};
Kernel{2} = [];
case 2
N = 2;
Kernel{1} = f{1};
Kernel{2} = f{2};
case 3
N = 1;
Kernel{1} = f{2};
Kernel{2} = [];
case 4
N = 2;
Kernel{1} = f{2};
Kernel{2} = f{3};
case 5
N = 1;
Kernel{1} = f{3};
Kernel{2} = [];
case 6
N = 2;
Kernel{1} = f{3};
Kernel{2} = f{4};
case 7
N = 1;
Kernel{1} = f{4};
Kernel{2} = [];
case 8
N = 2;
Kernel{1} = f{4};
Kernel{2} = f{5};
case 9
N = 1;
Kernel{1} = f{5};
Kernel{2} = [];
case 10
N = 2;
Kernel{1} = f{5};
Kernel{2} = f{6};
case 11
Kernel{1} = f{6};
Kernel{2} = [];
N = 1;
case 12
N = 2;
Kernel{1} = f{6};
Kernel{2} = f{7};
case 13
N = 1;
Kernel{1} = f{7};
Kernel{2} = [];
case 14
N = 2;
Kernel{1} = f{7};
Kernel{2} = f{8};
case 15
Kernel{1} = f{8};
Kernel{2} = [];
N = 1;
case 16
N = 2;
Kernel{1} = f{8};
Kernel{2} = f{1};
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
U27_CreateSymphonyFile.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U27_CreateSymphonyFile.m
| 259 |
utf_8
|
66156ab38fd172eb5aa7e15ac5306e17
|
%Chih-Yuan Yang
%09/16/12
%To parallel run Glasner's algorithm
function U24_CreateSymphonyFile(fn_create,iiend,filenamelist)
fid = fopen(fn_create,'w+');
for i=1:iiend
fprintf(fid,'%05d %s 0\n',i,filenamelist{i});
end
fclose(fid);
end
|
github
|
Liusifei/Face-Hallucination-master
|
U27b_CreateSemaphoreFile_IndexOnly_NoFileName.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U27b_CreateSemaphoreFile_IndexOnly_NoFileName.m
| 214 |
utf_8
|
d5ad9fa32410a0809a0c6e6c89a3a2a6
|
%Chih-Yuan Yang
%3/22/13
%
function U27b_CreateSemaphoreFile_IndexOnly_NoFileName(fn_create,iiend)
fid = fopen(fn_create,'w+');
for i=1:iiend
fprintf(fid,'%05d 0\n',i);
end
fclose(fid);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F11c_BackProjection_GaussianKernel.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F11c_BackProjection_GaussianKernel.m
| 2,214 |
utf_8
|
55512b6425f55cb53110c116ec2c6d72
|
%Chih-Yuan Yang
%09/28/12
%Change from square kernel to Gaussian Kernel
function img_bp = F11c_BackProjection_GaussianKernel(img_lr, img_hr, Gau_sigma, iternum,bReport)
[h_hr] = size(img_hr,1);
[h_lr] = size(img_lr,1);
zooming = h_hr/h_lr;
for i=1:iternum
img_lr_gen = F19a_GenerateLRImage_GaussianKernel(img_hr,zooming,Gau_sigma);
diff_lr = img_lr - img_lr_gen;
term_diff_lr_SSD = sum(sum(diff_lr.^2));
diff_hr = IF5_Upsample(diff_lr,zooming, Gau_sigma);
%diff_hr = imresize(diff_lr,zooming,'bilinear');
img_hr = img_hr + diff_hr;
img_lr_new = F19a_GenerateLRImage_GaussianKernel(img_hr,zooming,Gau_sigma);
diff_lr_new = img_lr - img_lr_new;
term_diff_lr_SSD_afteronebackprojection = sum(sum(diff_lr_new.^2));
if bReport
fprintf('backproject iteration=%d, term_before=%0.3f, term_after=%0.3f\n', ...
i,term_diff_lr_SSD,term_diff_lr_SSD_afteronebackprojection);
end
end
img_bp = img_hr;
end
function diff_hr = IF5_Upsample(diff_lr,zooming, Gau_sigma)
[h w] = size(diff_lr);
h_hr = h*zooming;
w_hr = w*zooming;
upsampled = zeros(h_hr,w_hr);
if zooming == 3
for rl = 1:h
rh = (rl-1) * zooming + 2;
for cl = 1:c
ch = (cl-1) * zooming + 2;
upsampled(rh,ch) = diff_lr(rl,cl);
end
end
kernel = Sigma2Kernel(Gau_sigma);
diff_hr = imfilter(upsampled,kernel,'replicate');
elseif zooming == 4
%compute the kernel by ourself, assuming the range is
%control the kernel and the position of the diff
kernelsize = ceil(Gau_sigma * 3)*2+2; %+2 this is the even number
kernel = fspecial('gaussian',kernelsize,Gau_sigma);
%subsample diff_lr to (3,3), because of the result of imfilter
for rl = 1:h
rh = (rl-1) * zooming + 3;
for cl = 1:w
ch = (cl-1) * zooming + 3;
upsampled(rh,ch) = diff_lr(rl,cl);
end
end
diff_hr = imfilter(upsampled, kernel,'replicate');
else
error('not processed');
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F37i_GetTexturePatchMatch_SingleFor.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F37i_GetTexturePatchMatch_SingleFor.m
| 10,640 |
utf_8
|
cc8acd1dc21c233f921a826293d77cbd
|
%Chih-Yuan Yang
%2/3/15
%Use patchmatch to retrieve a texture background
%F37h: This file is updated from F37f to support the scaling factor of 3.
%F37i: Now I run the code on Linux machines where the parfor is unstable. Thus I change the parfor to for
function [gradients_texture, img_texture, img_texture_backprojection] = F37i_GetTexturePatchMatch_SingleFor(img_y, ...
hrexampleimages, lrexampleimages, landmarks_test, rawexamplelandmarks)
%parameter
numberofHcandidate = 10;
%start
[h_lr, w_lr, exampleimagenumber] = size(lrexampleimages);
[h_hr, w_hr, ~] = size(hrexampleimages);
scalingfactor = floor(h_hr/h_lr);
if scalingfactor == 4
Gau_sigma = 1.6;
elseif scalingfactor == 3
Gau_sigma = 1.2;
end
alignedexampleimage_hr = zeros(h_hr,w_hr,exampleimagenumber,'uint8'); %set as uint8 to reduce memory demand
alignedexampleimage_lr = zeros(h_lr,w_lr,exampleimagenumber);
disp('align images');
set = 28:48; %eyes and nose
basepoints = landmarks_test(set,:);
inputpoints = rawexamplelandmarks(set,:,:);
for k=1:exampleimagenumber
alignedexampleimage_hr(:,:,k) = F18_AlignExampleImageByLandmarkSet(hrexampleimages(:,:,k),inputpoints(:,:,k),basepoints);
%F19 automatically convert uint8 input to double
alignedexampleimage_lr(:,:,k) = F19c_GenerateLRImage_GaussianKernel(alignedexampleimage_hr(:,:,k),scalingfactor,Gau_sigma);
end
cores = 2; % Use more cores for more speed
if cores==1
algo = 'cpu';
else
algo = 'cputiled';
end
patchsize_lr = 5;
nn_iters = 5;
A = repmat(img_y,[1 1 3]);
testnumber = exampleimagenumber;
xyandl2norm = zeros(h_lr,w_lr,3,testnumber,'int32');
disp('patchmatching');
for i=1:testnumber;
%run patchmatch
B = repmat(alignedexampleimage_lr(:,:,i),[1 1 3]);
xyandl2norm(:,:,:,i) = nnmex(A, B, algo, patchsize_lr, nn_iters, [], [], [], [], cores); %the return totalpatchnumber int32
end
l2norm_double = double(xyandl2norm(:,:,3,:));
[sortedl2norm, ix] = sort(l2norm_double,4);
hrpatchextractdata = zeros(h_lr-patchsize_lr+1,w_lr-patchsize_lr+1,numberofHcandidate,3); %ii,r_lr_src,c_lr_src
%here
hrpatchsimilarity = zeros(h_lr-patchsize_lr+1,w_lr-patchsize_lr+1,numberofHcandidate);
parameter_l2normtosimilarity = 625;
for rl = 1:h_lr-patchsize_lr+1
for cl = 1:w_lr-patchsize_lr+1
for k=1:numberofHcandidate
knnidx = ix(rl,cl,1,k);
x = xyandl2norm(rl,cl,1,knnidx); %start from 0
y = xyandl2norm(rl,cl,2,knnidx);
clsource = x+1;
rlsource = y+1;
hrpatchextractdata(rl,cl,k,:) = reshape([knnidx rlsource clsource],[1 1 1 3]);
hrpatchsimilarity(rl,cl,k) = exp(-sortedl2norm(rl,cl,1,knnidx)/parameter_l2normtosimilarity);
end
end
end
hrpatch = F39_ExtractAllHrPatches(patchsize_lr,scalingfactor, hrpatchextractdata,alignedexampleimage_hr);
hrpatch = F40_CompensateHRpatches(hrpatch, img_y, scalingfactor, hrpatchextractdata,alignedexampleimage_lr);
%mostsimilarinputpatchrecord = IF2_SearchForSelfSimilarPatchesL2Norm(img_y,patchsize_lr);
%hrpatch_filtered = IF3_SimilarityFilter(hrpatch,hrpatchsimilarity,mostsimilarinputpatchrecord);
%img_texture = IF4_BuildHRimagefromHRPatches(hrpatch_filtered,scalingfactor);
img_texture = IF4_BuildHRimagefromHRPatches(hrpatch,scalingfactor);
iternum = 1000;
Tolf = 0.0001;
breport = false;
disp('backprojection for img_texture');
img_texture_backprojection = F11e_BackProjection_GaussianKernel(img_y, img_texture, Gau_sigma, iternum,breport,Tolf);
%extract the graident
gradients_texture = F14_Img2Grad(img_texture_backprojection);
end
function img_texture = IF4_BuildHRimagefromHRPatches(hrpatch,scalingfactor)
%reconstruct the high-resolution image
patchsize_hr = size(hrpatch,1);
patchsize_lr = patchsize_hr/scalingfactor;
h_lr = size(hrpatch,3) + patchsize_lr - 1;
w_lr = size(hrpatch,4) + patchsize_lr - 1;
h_expected = h_lr * scalingfactor;
w_expected = w_lr * scalingfactor;
img_texture = zeros(h_expected,w_expected);
if scalingfactor == 4
transfer_region_r = 9:12; %This is the region of the central 4x4 pixels of a 20x20 patch
transfer_region_c = 9:12;
elseif scalingfactor == 3
transfer_region_r = 7:9; %This is the region of the central 3x3 pixels of a 15x15 patch
transfer_region_c = 7:9;
end
% Central region.
rpixelshift = 2; %this should be modified according to patchsize_lr
cpixelshift = 2;
for rl = 2:h_lr - patchsize_lr
rh_begin_idx = (rl-1+rpixelshift)*scalingfactor+1;
rh_end_idx = rh_begin_idx+scalingfactor-1;
for cl = 2:w_lr - patchsize_lr
ch_begin_idx = (cl-1+cpixelshift)*scalingfactor+1;
ch_end_idx = ch_begin_idx+scalingfactor-1;
usedhrpatch = hrpatch(:,:,rl,cl);
img_texture(rh_begin_idx:rh_end_idx,ch_begin_idx:ch_end_idx) = usedhrpatch(transfer_region_r,transfer_region_c);
end
end
%left
if scalingfactor == 4
transfer_region_r = 9:12; %This is a region of the left 4x12 pixels of a 20x20 patch
transfer_region_c = 1:12;
elseif scalingfactor == 3
transfer_region_r = 7:9; %This is a region of the left 3x9 pixels of a 15x15 patch
transfer_region_c = 1:9;
end
cl = 1;
ch = 1;
ch1 = ch+3*scalingfactor-1;
for rl=2:h_lr-patchsize_lr
rh = (rl-1+rpixelshift)*scalingfactor+1;
rh1 = rh+scalingfactor-1;
usedhrpatch = hrpatch(:,:,rl,cl);
img_texture(rh:rh1,ch:ch1) = usedhrpatch(transfer_region_r,transfer_region_c);
end
%right
if scalingfactor == 4
transfer_region_r = 9:12; %This is a region of the right 4x12 pixels of a 20x20 patch
transfer_region_c = 9:20;
elseif scalingfactor == 3
transfer_region_r = 7:9; %This is a region of the right 3x9 pixels of a 15x15 patch
transfer_region_c = 7:15;
end
cl = w_lr - patchsize_lr+1;
ch = w_expected - 3*scalingfactor+1;
ch1 = w_expected;
for rl=2:h_lr-patchsize_lr
rh = (rl-1+rpixelshift)*scalingfactor+1;
rh1 = rh+scalingfactor-1;
usedhrpatch = hrpatch(:,:,rl,cl);
img_texture(rh:rh1,ch:ch1) = usedhrpatch(transfer_region_r,transfer_region_c);
end
%top
if scalingfactor == 4
transfer_region_r = 1:12; %This is a region of the top 12x4 pixels of a 20x20 patch
transfer_region_c = 9:12;
elseif scalingfactor == 3
transfer_region_r = 1:9; %This is a region of the top 9x3 pixels of a 15x15 patch
transfer_region_c = 7:9;
end
rl = 1;
rh = 1;
rh1 = rh+3*scalingfactor-1;
for cl=2:w_lr-patchsize_lr
ch = (cl-1+cpixelshift)*scalingfactor+1;
ch1 = ch+scalingfactor-1;
usedhrpatch = hrpatch(:,:,rl,cl);
img_texture(rh:rh1,ch:ch1) = usedhrpatch(transfer_region_r,transfer_region_c);
end
%bottom
if scalingfactor == 4
transfer_region_r = 9:20; %This is a region of the bottom 12x4 pixels of a 20x20 patch
transfer_region_c = 9:12;
elseif scalingfactor == 3
transfer_region_r = 7:15; %This is a region of the bottom 9x3 pixels of a 15x15 patch
transfer_region_c = 7:9;
end
rl = h_lr-patchsize_lr+1;
rh = h_expected - 3*scalingfactor+1;
rh1 = h_expected;
for cl=2:w_lr-patchsize_lr
ch = (cl-1+cpixelshift)*scalingfactor+1;
ch1 = ch+scalingfactor-1;
usedhrpatch = hrpatch(:,:,rl,cl);
img_texture(rh:rh1,ch:ch1) = usedhrpatch(transfer_region_r,transfer_region_c);
end
%left-top corner
if scalingfactor == 4
transfer_region_r = 1:12; %This is a region of the left-top 12x12 pixels of a 20x20 patch
transfer_region_c = 1:12;
elseif scalingfactor == 3
transfer_region_r = 1:9; %This is a region of the left-top 9x9 pixels of a 15x15 patch
transfer_region_c = 1:9;
end
rl=1;
cl=1;
rh = 1;
rh1 = rh+3*scalingfactor-1;
ch = 1;
ch1 = ch+3*scalingfactor-1;
usedhrpatch = hrpatch(:,:,rl,cl);
img_texture(rh:rh1,ch:ch1) = usedhrpatch(transfer_region_r,transfer_region_c);
%right-top corner
if scalingfactor == 4
transfer_region_r = 1:12; %This is a region of the right-top 12x12 pixels of a 20x20 patch
transfer_region_c = 9:20;
elseif scalingfactor == 3
transfer_region_r = 1:9; %This is a region of the right-top 9x9 pixels of a 15x15 patch
transfer_region_c = 7:15;
end
rl=1;
cl=w_lr-patchsize_lr+1;
rh = (rl-1)*scalingfactor+1;
rh1 = rh+3*scalingfactor-1;
ch = (cl-1+cpixelshift)*scalingfactor+1;
ch1 = ch+3*scalingfactor-1;
usedhrpatch = hrpatch(:,:,rl,cl);
img_texture(rh:rh1,ch:ch1) = usedhrpatch(transfer_region_r,transfer_region_c);
%left-bottom corner
if scalingfactor == 4
transfer_region_r = 9:20; %This is a region of the left-bottom 12x12 pixels of a 20x20 patch
transfer_region_c = 1:12;
elseif scalingfactor == 3
transfer_region_r = 7:15; %This is a region of the left-bottom 9x9 pixels of a 15x15 patch
transfer_region_c = 1:9;
end
rl=h_lr-patchsize_lr+1;
cl=1;
rh = (rl-1+rpixelshift)*scalingfactor+1;
rh1 = rh+3*scalingfactor-1;
ch = (cl-1)*scalingfactor+1;
ch1 = ch+3*scalingfactor-1;
usedhrpatch = hrpatch(:,:,rl,cl);
img_texture(rh:rh1,ch:ch1) = usedhrpatch(transfer_region_r,transfer_region_c);
%right-bottom corner
if scalingfactor == 4
transfer_region_r = 9:20; %This is a region of the right-bottom 12x12 pixels of a 20x20 patch
transfer_region_c = 9:20;
elseif scalingfactor == 3
transfer_region_r = 7:15; %This is a region of the left-bottom 9x9 pixels of a 15x15 patch
transfer_region_c = 7:15;
end
rl=h_lr-patchsize_lr+1;
cl=w_lr-patchsize_lr+1;
rh = (rl-1+rpixelshift)*scalingfactor+1;
rh1 = rh+3*scalingfactor-1;
ch = (cl-1+cpixelshift)*scalingfactor+1;
ch1 = ch+3*scalingfactor-1;
usedhrpatch = hrpatch(:,:,rl,cl);
img_texture(rh:rh1,ch:ch1) = usedhrpatch(transfer_region_r,transfer_region_c);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F13_TestNewBackProjection.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F13_TestNewBackProjection.m
| 593 |
utf_8
|
283841ca532ea0ca079b5b3adf26f1db
|
%09/13/12
%Chih-Yuan Yang
function img_out = F13_TestNewBackProjection(img_lr, img_hr, Gau_sigma, iternum)
img_initial = img_hr;
img_out = img_initial; %iternum may be 0
for i=1:iternum
%img_lr_gen = U3_GenerateLRImage_BlurSubSample(img_hr,zooming,Gau_sigma);
gradients_old = F14_Img2Grad(img_hr);
LoopNumber = 30;
beta0 = 2^(i-1);
beta1 = 1;
bReport = true;
img_out = F4_GenerateIntensityFromGradient(img_lr,img_initial,gradients_old,Gau_sigma,LoopNumber,beta0,beta1,bReport);
img_initial = img_out;
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F40_ExtractAllHrPatches.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F40_ExtractAllHrPatches.m
| 2,490 |
utf_8
|
4703805b0d3fd12ba361d750a76e2f14
|
%Chih-Yuan Yang
%10/07/12
%Sepearte internal function as external
function hrpatch = F40_ExtractAllHrPatches(patchsize_lr,zooming,hrpatchextractdata,allHRexampleimages)
%question: if the hrpatch does not need compensate, the input paramters img_y and allLRexampleimages can be ignore
%in:
%hrpatchextractdata: (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1) x numberofHcandidate * 3
%the last 3 dim: ii, r_lr_src, c_lr_src
disp('extracting HR patches');
patchsize_hr = patchsize_lr * zooming;
[h_lr_active, w_lr_active, numberofHcandidate, ~] = size(hrpatchextractdata);
hrpatch = zeros(patchsize_hr,patchsize_hr,3,h_lr_active,w_lr_active,numberofHcandidate);
%analyize which images need to be loaded
alliiset = hrpatchextractdata(:,:,:,1);
alliiset_uni = unique(alliiset(:));
for i = 1:length(alliiset_uni)
ii = alliiset_uni(i);
fprintf('extracting image in the function F40 %d\n',ii);
exampleimage_hr = im2double(allHRexampleimages(:,:,:,ii));
match_4D = alliiset == ii;
match_3D = reshape(match_4D,h_lr_active,w_lr_active,numberofHcandidate); %remove the last dimension
[rlset clandkset] = find(match_3D);
setsize = length(rlset);
for j = 1:setsize
rl = rlset(j);
clandklinearindex = clandkset(j);
%the relationship clandklindearidx = x_lr_active * (k-1) + cl
k = floor( (clandklinearindex-1)/w_lr_active) +1; %the relationship: possum = (pos3-1) * d2 + pos2, pos2 totalpatchnumber (1,d2)
cl = clandklinearindex - (k-1)*w_lr_active;
sr = hrpatchextractdata(rl,cl,k,2);
sc = hrpatchextractdata(rl,cl,k,3);
srh = (sr-1)*zooming+1;
srh1 = srh + patchsize_hr -1;
sch = (sc-1)*zooming+1;
sch1 = sch + patchsize_hr-1;
%compensate the HR patch to match the LR query patch
hrp = exampleimage_hr(srh:srh1,sch:sch1,:); %HR patch
%lrq = img_y(rl:rl+patchsize_lr-1,cl:cl+patchsize_lr-1); %LR query patch
%lrr = exampleimage_lr(sr:sr+patchsize_lr-1,sc:sc+patchsize_lr-1); %LR retrieved patch
%the imresize make the process very slow
%chrp = hrp + imresize(lrq - lrr,zooming,'bilinear'); %compensate HR patch
%hrpatch(:,:,rl,cl,k) = chrp;
hrpatch(:,:,:,rl,cl,k) = hrp;
end
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
U21_DrawLandmarks.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U21_DrawLandmarks.m
| 821 |
utf_8
|
6046e1df7e14273c2505bea2c01539b3
|
%09/10/12
%
function U21_DrawLandmarks(im, boxes, posemap,bshownumbers,bdrawpose)
% showboxes(im, boxes)
% Draw boxes on top of image.
imshow(im);
hold on;
axis image;
axis off;
for b = boxes,
partsize = b.xy(1,3)-b.xy(1,1)+1;
tx = (min(b.xy(:,1)) + max(b.xy(:,3)))/2;
ty = min(b.xy(:,2)) - partsize/2;
if bdrawpose
text(tx,ty, num2str(posemap(b.c)),'fontsize',18,'color','c');
end
for i = size(b.xy,1):-1:1;
x1 = b.xy(i,1);
y1 = b.xy(i,2);
x2 = b.xy(i,3);
y2 = b.xy(i,4);
%line([x1 x1 x2 x2 x1]', [y1 y2 y2 y1 y1]', 'color', 'b', 'linewidth', 1);
plot((x1+x2)/2,(y1+y2)/2,'r.','markersize',9);
if bshownumbers
text((x1+x2)/2,(y1+y2)/2, num2str(i), 'fontsize',9,'color','k');
end
end
end
drawnow;
|
github
|
Liusifei/Face-Hallucination-master
|
T1_shiftblock.m
|
.m
|
Face-Hallucination-master/Code/Ours2/T1_shiftblock.m
| 3,847 |
utf_8
|
cfb7142784ab5b0c4f9bd552452392ec
|
% SHIFTBLOCK output the shift block structures
% jpeg: input jpeg data stucture
% sftblk: output shifted block stucture, including:
% vdb: coefficients of vertical shifted block;
% hdb: coefficients of horizontal shifted block;
% Vvars: spatial domain of step component;
% Vcons: spatial domain of signal containing no step fucntion;
% Vtv: total variation in vertical direction;
% Htv: total variation in horizontal direction;
% Only y channel has been processed in this version
% Sifei Liu, 21/03/2013
function sftblk = T1_shiftblock(jpeg)
% get y channel
if isstruct(jpeg)
image = rgb2ycbcr(im2double(jpeg.image));
else
image = repmat(jpeg,[1,1,3]);
end
[h,w,~] = size(image);
r = mod(size(image,1),8);
c = mod(size(image,2),8);
if r~=0
image = [image;repmat(image(end,:,:),[8-r,1,1])];
end
if c~=0
image = [image,repmat(image(:,end,:),[1,8-c,1])];
end
% vertical db
% y_v = image(:,5:end-4,1) * 255;
% y_v = y_v - 128;
y_v = image(:,5:end-4,1);
sftblk.Vdb = bdct(y_v);
% vertical mask
vmask = zeros(8);
vmask(1,2:2:end) = 1; %vmask(2:2:end,1) = 1;
% vmask(1,1)=1;
% vcmask = ones(8) - vmask;
vmask(1,2:2:end) = [ -0.9061 0.3182 -0.2126 0.1802];
% image layers
[m,n] = size(sftblk.Vdb);
% % ====== new version ======
vars_beta = sftblk.Vdb .* repmat(vmask,size(sftblk.Vdb)/8);
vars_beta = vars_beta * kron(eye(size(sftblk.Vdb,2)/8),ones(8,1));
vars_beta = vars_beta(1:8:end,:);
vmask(1,2:2:end) = 1;
dct_vars = (sftblk.Vdb .* repmat(vmask,size(sftblk.Vdb)/8)).*...
kron(vars_beta,ones(8));
sftblk.Vvars = ibdct(dct_vars);
dct_cons = sftblk.Vdb - dct_vars;
sftblk.Vcons = ibdct(dct_cons);
% % =========================
sftblk.Vcons = [repmat(sftblk.Vcons(:,1),1,4),sftblk.Vcons,repmat(sftblk.Vcons(:,end),1,4)];
sftblk.Vvars = [repmat(sftblk.Vvars(:,1),1,4),sftblk.Vvars,repmat(sftblk.Vvars(:,end),1,4)];
sftblk.Vcons = sftblk.Vcons(1:h,1:w);
sftblk.Vvars = sftblk.Vvars(1:h,1:w);
% sftblk.Vvars = ibdct(sftblk.Vdb .* repmat(vmask,size(sftblk.Vdb)/8));
% sftblk.Vvars = (sftblk.Vvars + 48) / 255;
% sftblk.Vcons = ibdct(sftblk.Vdb .* repmat(vcmask,size(sftblk.Vdb)/8));
% sftblk.Vcons = (sftblk.Vcons + 80) / 255;
% % sum along y axies for each block
% m_ax = kron(eye(m/8),ones(1,8));
% m_ay = kron(eye(n/8),[ones(3,1);100;ones(4,1)]);
% y_ax = m_ax * sftblk.Vvars;
% sftblk.Vtv = abs(y_ax - [y_ax(:,2:end),y_ax(:,end)]) * m_ay;
%
% % horizontal db
y_h = image(5:end-4,:,1);
% y_h = image(5:end-4,:,1) * 255;
% y_h = y_h - 125.5;
sftblk.Hdb = bdct(y_h);
% % horizontal mask
hmask = zeros(8);
hmask(2:2:end,1) = [ -0.9061 0.3182 -0.2126 0.1802]';
% % ====== new version ======
vars_beta = sftblk.Hdb .* repmat(hmask,size(sftblk.Hdb)/8);
vars_beta = kron(eye(size(sftblk.Hdb,1)/8),ones(1,8)) * vars_beta;
vars_beta = vars_beta(:,1:8:end);
hmask(2:2:end,1) = 1;
dct_vars = (sftblk.Hdb .* repmat(hmask,size(sftblk.Hdb)/8)).*...
kron(vars_beta,ones(8));
sftblk.Hvars = ibdct(dct_vars);
dct_cons = sftblk.Hdb - dct_vars;
sftblk.Hcons = ibdct(dct_cons);
% % =========================
sftblk.Hcons = [repmat(sftblk.Hcons(1,:),4,1);sftblk.Hcons;repmat(sftblk.Hcons(end,:),4,1)];
sftblk.Hvars = [repmat(sftblk.Hvars(1,:),4,1);sftblk.Hvars;repmat(sftblk.Hvars(end,:),4,1)];
sftblk.Hcons = sftblk.Hcons(1:h,1:w);
sftblk.Hvars = sftblk.Hvars(1:h,1:w);
% hcmask = ones(8) - hmask;
% % image layers
% sftblk.Hvars = ibdct(sftblk.Hdb .* repmat(hmask,size(sftblk.Hdb)/8));
% sftblk.Hvars = (sftblk.Hvars + 125.5) / 255;
% sftblk.Hcons = ibdct(sftblk.Hdb .* repmat(hcmask,size(sftblk.Hdb)/8));
% sftblk.Hcons = (sftblk.Hcons + 125.5) / 255;
% % sum along x axies for each block
% [m,n] = size(sftblk.Hvars);
% m_ax = kron(eye(m/8),[ones(1,3),5,ones(1,4)]);
% m_ay = kron(eye(n/8),ones(8,1));
% y_ay = sftblk.Hvars * m_ay;
% sftblk.Htv = m_ax * abs(y_ay - [y_ay(2:end,:);y_ay(end,:)]);
|
github
|
Liusifei/Face-Hallucination-master
|
F6d_RetriveImage_DrawFlowChart.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F6d_RetriveImage_DrawFlowChart.m
| 2,597 |
utf_8
|
9eafbd0e18b8ac33ab67131aee3ca1f8
|
%Chih-Yuan Yang
%07/20/14 I update the function F19a to F19c.
%F6d: return the alinged images to draw the flowchart
function [retrievedhrimage, retrievedlrimage, retrievedidx, alignedexampleimage_hr, alignedexampleimage_lr] = ...
F6d_RetriveImage_DrawFlowChart(testimage_lr, ...
rawexampleimage, inputpoints, basepoints, mask_lr, zooming, Gau_sigma, glasslist, bglassavoid)
%the rawexampleimage should be double
if ~isa(rawexampleimage,'uint8')
error('wrong class');
end
[h_hr, w_hr, exampleimagenumber] = size(rawexampleimage);
[h_lr, w_lr] = size(testimage_lr);
%find the transform matrix by solving an optimization problem
alignedexampleimage_hr = zeros(h_hr,w_hr,exampleimagenumber,'uint8'); %set as uint8 to reduce memory demand
alignedexampleimage_lr = zeros(h_lr,w_lr,exampleimagenumber);
parfor i=1:exampleimagenumber
alignedexampleimage_hr(:,:,i) = F18b_AlignExampleImageByLandmarkSet(rawexampleimage(:,:,i),inputpoints(:,:,i),basepoints);
%F19 automatically convert uint8 input to double
alignedexampleimage_lr(:,:,i) = F19c_GenerateLRImage_GaussianKernel(alignedexampleimage_hr(:,:,i),zooming,Gau_sigma);
end
[r_set, c_set] = find(mask_lr);
top = min(r_set);
bottom = max(r_set);
left = min(c_set);
right = max(c_set);
area_test = im2double(testimage_lr(top:bottom,left:right));
area_mask = mask_lr(top:bottom,left:right);
area_test_aftermask = area_test .* area_mask;
%extract feature from the eyerange, the features are the gradient of LR eye region
feature_test = F24_ExtractFeatureFromArea(area_test_aftermask); %the unit is double
%search for the thousand example images to find the most similar eyerange
normvalue = zeros(exampleimagenumber,1);
parfor j=1:exampleimagenumber
examplearea_lr = alignedexampleimage_lr(top:bottom,left:right,j);
examplearea_lr_aftermask = examplearea_lr .* area_mask;
feature_example_lr = F24_ExtractFeatureFromArea(examplearea_lr_aftermask); %the unit is double
normvalue(j) = norm(feature_test - feature_example_lr);
end
%find the small norm
[sortnorm ix] = sort(normvalue);
%some of them are very similar
%only return the 1nn
if bglassavoid
for k=1:exampleimagenumber
if glasslist(ix(k)) == false
break
end
end
else
k =1;
end
retrievedhrimage = alignedexampleimage_hr(:,:,ix(k));
retrievedlrimage = alignedexampleimage_lr(:,:,ix(k));
retrievedidx = ix(k);
end
|
github
|
Liusifei/Face-Hallucination-master
|
T1_Img2Grad_Blockcompensate.m
|
.m
|
Face-Hallucination-master/Code/Ours2/T1_Img2Grad_Blockcompensate.m
| 2,086 |
utf_8
|
c5d81b3a52933b697b727de169c40b13
|
%This is Sifei's code to remove the blocky artifacts.
%The idea is to average gradients of pixels along every 8x8 block
%What is the difference of Grad_o and Grad_v?
function Grad_o = T1_Img2Grad_Blockcompensate(img)
bz = 8;
sftblk = T1_shiftblock(img);
% normalization
% img = (img-min(min(img)))/(max(max(img))-min(min(img)));
% sftblk.Vcons = (sftblk.Vcons-min(min(sftblk.Vcons)))/(max(max(sftblk.Vcons))-min(min(sftblk.Vcons)));
% get gradient images
Grad_o = T1_Img2Grad(img);
Grad_v = T1_Img2Grad(sftblk.Vcons);
Grad_h = T1_Img2Grad(sftblk.Hcons);
dir_num = size(Grad_o,3);
% % ========== for debug ===========
% for k = 1:dir_num
% subplot(2,dir_num,k);imshow(Grad_o(:,:,k),[]);
% end
% % ========== for debug ===========
% replace vertical blocking
for m = 1:dir_num
% v8
% if ~isempty(find(m == [1 2 8],1))
for n = bz:bz:size(img,2)
% Grad_o(:,n,m) = (Grad_o(:,n-1,m)+Grad_o(:,n+1,m))/2;
% Grad_o(:,n,m) = Grad_v(:,n,m);
Grad_o(:,n,m) = (Grad_o(:,n-1,m)+Grad_o(:,n+1,m)+Grad_v(:,n,m))/3;
end
% end
% if ~isempty(find(m == [6 7 8],1))
% h8
for n = bz:bz:size(img,1)-bz/2
Grad_o(n,:,m) = (Grad_h(n,:,m) + Grad_o(n-1,:,m) + Grad_o(n+1,:,m))/3;
end
% end
% v9
% if ~isempty(find(m == [4 5 6],1))
for n = bz+1:bz:size(img,2)
% Grad_o(:,n,m) = (Grad_o(:,n-1,m)+Grad_o(:,n+1,m))/2;
% Grad_o(:,n,m) = Grad_v(:,n,m);
Grad_o(:,n,m) = (Grad_o(:,n-1,m)+Grad_o(:,n+1,m)+Grad_v(:,n,m))/3;
end
% end
% h9
% if ~isempty(find(m == [2 3 4],1))
for n = bz+1:bz:size(img,2)
% Grad_o(:,n,m) = (Grad_o(:,n-1,m)+Grad_o(:,n+1,m))/2;
% Grad_o(:,n,m) = Grad_v(:,n,m);
Grad_o(n,:,m) = (Grad_h(n,:,m) + Grad_o(n-1,:,m) + Grad_o(n+1,:,m))/3;
end
% end
end
% % ========== for debug ===========
% for k = 1:dir_num
% subplot(2,dir_num,dir_num+k);imshow(Grad_o(:,:,k),[]);
% end
% % ========== for debug ===========
|
github
|
Liusifei/Face-Hallucination-master
|
F31_AlignImages.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F31_AlignImages.m
| 624 |
utf_8
|
77d84815d8097ed2d39598c753d5f9d9
|
%Chih-Yuan Yang
%09/29/12
%as title
function alignedimages = F30_AlignImages(rawexampleimage, inputpoints, basepoints)
%the rawexampleimage should be double
if ~isa(rawexampleimage,'uint8')
error('wrong class');
end
exampleimagenumber = size(rawexampleimage,3);
%find the transform matrix by solving an optimization problem
alignedimages = zeros(480,640,exampleimagenumber,'uint8'); %set as uint8 to reduce memory demand
parfor i=1:exampleimagenumber
alignedimages(:,:,i) = F18_AlignExampleImageByLandmarkSet(rawexampleimage(:,:,i),inputpoints(:,:,i),basepoints);
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F10_CSHUpampling.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F10_CSHUpampling.m
| 3,639 |
utf_8
|
4c6ce81c759131078a8b3cf93717fdc0
|
%Chih-Yuan Yang
%09/11/12
%To solve the hair and background problem
function img_hr = F10_CSHUpampling(img_y, exampleimage_hr, zooming, Gau_sigma)
addpath(genpath(fullfile('Lib','CSH_code_v2')));
%set randseed
seed = RandStream('mcg16807','Seed',0);
RandStream.setGlobalStream(seed)
[h_hr w_hr imagenumber] = size(exampleimage_hr);
%generate LR images
parfor i=1:imagenumber
exampleimage_lr(:,:,i) = U3_GenerateLRImage_BlurSubSample(im2double(exampleimage_hr(:,:,i)),zooming,Gau_sigma);
end
width = 4;
iteration = 5;
nnk = 20;
[lh lw d] = size(im8);
recin = 20;
normcurr = zeros(lh,lw,nnk);
eh = h_lr-ps+1;
ew = w_lr-ps+1; %effective w
scanr = zeros(eh,ew,recin,4); %scan results, norm, ii, sr, sc
bigvalue = 255*width*width;
scanr(:,:,:,1) = bigvalue;
end
im8 = imread( para.SourceFile);
im8y = rgb2gray(im8);
%find the CSH nn
width = ps; %should I use 4 or 8?
iteration = 5;
nnk = 20;
[lh lw d] = size(im8);
recin = 20;
normcurr = zeros(lh,lw,nnk);
A = im8y;
ps = width;
eh = lh-ps+1;
ew = lw-ps+1; %effective w
scanr = zeros(eh,ew,recin,4); %scan results, norm, ii, sr, sc
bigvalue = 255*width*width;
scanr(:,:,:,1) = bigvalue;
iistart = para.iistart;
iiend = para.iiend;
for ii=iistart:iiend
%if ii == 2
% keyboard
%end
fn = sprintf('%05d.png',ii);
fprintf('csh fn: %s\n',fn);
ime = imread(fullfile(DatasetFolder,fn)); %the channel number is 1
B = ime;
idxhead = (ii-1)*nnk+1;
idxend = idxhead + nnk-1;
retres = CSH_nn(A,B,width,iteration,nnk); %x,y <==> c,r $retrived results
%dimension: w,h,2,nnk
for l = 1:nnk
colMap = retres(:,:,1,l);
rowMap = retres(:,:,2,l);
br_boundary_to_ignore = width -1;
%GetAnnError_GrayLevel_C1 is a funciton in CHS lab. It can compute very fast
normcurr(:,:,l) = GetAnnError_GrayLevel_C1(A,B,uint16(rowMap),uint16(colMap),uint16(0),uint16(br_boundary_to_ignore), uint16(width));
end
%update scanr
normcurrmin = min(normcurr,[],3);
checkmap = normcurrmin(1:eh,1:ew) < scanr(:,:,recin,1); %the last one has the largest norm
[rset cset] = find(checkmap);
setin = length(rset);
for j=1:setin
rl = rset(j);
cl = cset(j);
[normcurrsort ixcurr] = sort(normcurr(rl,cl,:));
for i=1:nnk
%update the smaller norm
compidx = recin-i+1;
if normcurrsort(i) < scanr(rl,cl,compidx,1)
%update
oriidx = ixcurr(i);
scanr(rl,cl,compidx,1) = normcurrsort(i);
scanr(rl,cl,compidx,2) = ii;
scanr(rl,cl,compidx,3) = retres(rl,cl,2,oriidx); %rowmap
scanr(rl,cl,compidx,4) = retres(rl,cl,1,oriidx); %colmap
else
break
end
end
%sort again the updated data
[normnewsort ixnew] = sort(scanr(rl,cl,:,1));
tempdata = scanr(rl,cl,:,:);
for i=1:recin
if ixnew(i) ~= i
scanr(rl,cl,i,:) = tempdata(1,1,ixnew(i),:);
end
end
end
end
sn = sprintf('%s_csh_scanr_%d_%d.mat',para.SaveName,iistart,iiend);
save(fullfile(para.tuningfolder,sn),'scanr');
|
github
|
Liusifei/Face-Hallucination-master
|
F4c_GenerateIntensityFromGradient.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F4c_GenerateIntensityFromGradient.m
| 4,531 |
utf_8
|
8f1bc8f4bba3afae27185acf447e8f30
|
%Chih-Yuan Yang
%09/29/12
%F4b: change first gradient from F19 to F19a, which uses a precise Gaussian kernel
%F4c: F4b is too slow, change the parameters
%Gradually incrase the coef of high-low term to achieve the contrained optimization problem
function img_out = F4c_GenerateIntensityFromGradient(img_y,img_initial,Grad_exp,Gau_sigma,bReport,loopnumber,totalupdatenumber,linesearchstepnumber)
if nargin <= 6
linesearchstepnumber = 4;
loopnumber = 4;
totalupdatenumber = 4;
end
beta0_initial = 1; %beginning
beta1_initial = 1;
zooming = size(img_initial,1)/size(img_y,1);
if zooming ~= floor(zooming)
error('zooming should be an integer');
end
I = img_initial;
term_intensity_check = zeros(linesearchstepnumber,1);
term_gradient_check = zeros(linesearchstepnumber,1);
[h w] = size(img_initial);
for updatenumber = 0:totalupdatenumber
beta0 = beta0_initial;
beta1 = beta1_initial * 0.5^updatenumber;
for loop = 1:loopnumber
%refine image by low-high intensity
img_lr_gen = F19a_GenerateLRImage_GaussianKernel(I,zooming,Gau_sigma);
diff_lr = img_lr_gen - img_y;
diff_hr = IF5_Upsample(diff_lr,zooming, Gau_sigma);
Grad0 = diff_hr;
%refine image by expected gradeint
%Gradient decent
OptDir = Grad_exp - F14_Img2Grad(I);
Grad1 = sum(OptDir,3);
Grad_all = beta0 * Grad0 + beta1 * Grad1;
I_in = I; %make a copy, restore the value if all beta fails
tau_initial = 1;
term_gradient_in = ComputeFunctionValue_Grad(I,Grad_exp);
term_intensity_in = F28_ComputeSquareSumLowHighDiff(I,img_y,Gau_sigma);
term_all_in = term_intensity_in * beta0 + term_gradient_in * beta1;
for line_search_step=1:linesearchstepnumber
tau = tau_initial * 0.5^(line_search_step-1);
I_check = I_in - tau * Grad_all;
term_gradient_check(line_search_step) = ComputeFunctionValue_Grad(I_check,Grad_exp);
term_intensity_check(line_search_step) = F28_ComputeSquareSumLowHighDiff(I_check,img_y,Gau_sigma);
end
term_all_check = term_intensity_check * beta0 + term_gradient_check * beta1;
[sortvalue ix] = sort(term_all_check);
if sortvalue(1) < term_all_in
%update
search_step_best = ix(1);
tau_best = tau_initial * 0.5^(search_step_best-1);
I_best = I_in - tau_best * Grad_all;
I = I_best; %assign the image for next loop
else
break;
end
if bReport
fprintf(['updatenumber=%d, loop=%d, all_in=%0.3f, all_out=%0.3f, Intensity_in=%0.3f, Intensity_out=%0.3f, ' ...
'Grad_in=%0.3f, Grad_out=%0.3f\n'],updatenumber, loop,term_all_in,sortvalue(1),term_intensity_in,...
term_intensity_check(ix(1)), term_gradient_in,term_gradient_check(ix(1)));
end
end
end
img_out = I_best;
end
function diff_hr = IF5_Upsample(diff_lr,zooming, Gau_sigma)
[h w] = size(diff_lr);
h_hr = h*zooming;
w_hr = w*zooming;
upsampled = zeros(h_hr,w_hr);
if zooming == 3
for rl = 1:h
rh = (rl-1) * zooming + 2;
for cl = 1:c
ch = (cl-1) * zooming + 2;
upsampled(rh,ch) = diff_lr(rl,cl);
end
end
kernel = Sigma2Kernel(Gau_sigma);
diff_hr = imfilter(upsampled,kernel,'replicate');
elseif zooming == 4
%compute the kernel by ourself, assuming the range is
%control the kernel and the position of the diff
kernelsize = ceil(Gau_sigma * 3)*2+2; %+2 this is the even number
kernel = fspecial('gaussian',kernelsize,Gau_sigma);
%subsample diff_lr to (3,3), because of the result of imfilter
for rl = 1:h
rh = (rl-1) * zooming + 3;
for cl = 1:w
ch = (cl-1) * zooming + 3;
upsampled(rh,ch) = diff_lr(rl,cl);
end
end
diff_hr = imfilter(upsampled, kernel,'replicate');
else
error('not processed');
end
end
function f = ComputeFunctionValue_Grad(img, Grad_exp)
Grad = F14_Img2Grad(img);
Diff = Grad - Grad_exp;
Sqrt = Diff .^2;
f = sqrt(sum(Sqrt(:)));
end
|
github
|
Liusifei/Face-Hallucination-master
|
F16_ACCV12UpamplingBackProjectionOnTextureOnly.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F16_ACCV12UpamplingBackProjectionOnTextureOnly.m
| 43,630 |
utf_8
|
35bce9ddf165ff8accb3378a116dace3
|
%Chih-Yuan Yang
%09/18/12
%load sfall, srecall, allHRexampleimages before this function
function img_hr = F16_ACCV12UpamplingBackProjectionOnTextureOnly(img_y, zooming, Gau_sigma ,sfall,srecall,allHRexampleimages)
if zooming == 4
para.Gau_sigma = 1.6;
elseif zooming == 3
para.Gau_sigma = 1.2;
end
[img_edge reliablemap_edge] = F1_EdgePreserving(img_y,para,zooming,Gau_sigma);
[h_lr w_lr] = size(img_y);
para.lh = h_lr;
para.lw = w_lr;
para.NumberOfHCandidate = 10;
para.SimilarityFunctionSettingNumber = 1;
%load all data set to save loading time
[scanr scanra] = SearchExternalPatches(img_y,para,sfall,srecall);
para.zooming = zooming;
para.ps = 5;
para.Gau_sigma = Gau_sigma;
hrpatch = F8_ExtractAllHrPatches(img_y, para, scanr,allHRexampleimages);
[scanr_self scanra_self] = F22_SearchForSelfSimilarPatchesL2Norm(img_y,para);
para.ehrfKernelWidth = 1.0;
para.bEnablemhrf = true;
[img_texture reliablemap_texture] = F11_FilterOutImproperHrPatches(img_y,hrpatch,para,scanr_self,scanra_self,scanr,scanra);
%apply backprojection on img_texture only
iternum = 10;
img_texture_backproject = F11_BackProjection(img_y, img_texture, Gau_sigma, iternum);
nomi = img_texture_backproject.*reliablemap_texture + img_edge .* reliablemap_edge;
denomi = reliablemap_edge + reliablemap_texture;
img_hr = nomi ./ denomi;
%there are some 0 value of denomi around boundary
%fill these pixels as img_edge
nanpixels = isnan(img_hr);
img_hr(nanpixels) = img_edge(nanpixels);
%ensure there is no nan
if nnz(isnan(img_hr))
error('should not be here');
end
end
function [scanr scanra] = SearchExternalPatches(img_y,para,sfall,srecall)
%how to search parallelly to speed up?
ps = 5; %patch size
[lh lw] = size(img_y);
hrpatchnumber = 10;
%featurefolder = para.featurefolder;
sh = GetShGeneral(ps);
scanr = zeros(6,hrpatchnumber,lh-ps+1,lw-ps+1); %scan results, mm, quan, ii, sr, sc, similairty
smallvalue = -1;
scanr(6,:,:,:) = smallvalue;
scanra = zeros(lh-ps+1,lw-ps+1); %scan results active
%scanrsimmax = smallvalue * ones(lh-ps+1,lw-ps+1); %del this line?
quanarray = [1 2 4 8 16 32];
B = [256 128 64 32 16 8];
imlyi = im2uint8(img_y);
for qidx=1:6
quan = quanarray(qidx);
b = B(qidx);
cur_initial = floor(size(sfall{1},2)/2); %accelerate the loop by using an initial position
for rl=1:lh-ps+1
fprintf('look for lut rl:%d quan:%d\n',rl,quan);
for cl = 1:lw-ps+1
patch = imlyi(rl:rl+ps-1,cl:cl+ps-1);
fq = patch(sh);
if qidx == 1
fquan = fq;
else
fquan = fq - mod(fq,quan) + quan/2;
end
[iila mma] = LookForLookUpTable9_External(fquan,sfall{qidx},cur_initial,para); %index in lookuptable
in = length(iila); %always return 20 instance
for i=1:in
ii = srecall{qidx}(1,iila(i));
sr = srecall{qidx}(2,iila(i));
sc = srecall{qidx}(3,iila(i));
%check whether the patch is in the scanr already
bSamePatch = false;
for j=1:scanra(rl,cl)
if ii == scanr(3,j,rl,cl) && sr == scanr(4,j,rl,cl) && sc == scanr(5,j,rl,cl)
bSamePatch = true;
break
end
end
if bSamePatch == false
similarity = bmm2similarity(b,mma(i),para.SimilarityFunctionSettingNumber);
if scanra(rl,cl) < hrpatchnumber
ix = scanra(rl,cl) + 1;
%to do: update scanr by similarity
%need to double it, otherwise, the int6 will kill similarity
scanr(:,ix,rl,cl) = cat(1,mma(i),quan,double(ii),double(sr),double(sc),similarity);
scanra(rl,cl) = ix;
else
[minval ix] = min(scanr(6,:,rl,cl));
if scanr(6,ix,rl,cl) < similarity
%update
scanr(:,ix,rl,cl) = cat(1,mma(i),quan,double(ii),double(sr),double(sc),similarity);
end
end
end
end
end
end
end
end
function [iila mma] = LookForLookUpTable9_External(fq,lut,cur_initial,para)
hrpatchnumber = para.NumberOfHCandidate; %default 10
fl = length(fq); %feature length
head = 1;
tail = size(lut,2);
lutsize = size(lut,2);
if exist('cur_initial','var')
if cur_initial > lutsize
cur = lutsize;
else
cur = cur_initial;
end
else
cur = round(lutsize/2);
end
cur_rec1 = cur;
%initial comparison
fqsmaller = -1;
fqlarger = 1;
fqsame = 0;
cr = 0; %compare results
mm = 0;
mmiil = 0;
%search for the largest mm
while 1
for c=1:fl
if fq(c) < lut(c,cur)
cr = fqsmaller;
break
elseif fq(c) > lut(c,cur)
cr = fqlarger;
break; %c moves to next
else %equal
cr = fqsame;
if mm < c
mm = c;
mmiil = cur;
end
end
end
if cr == fqsmaller
next = floor((cur + head)/2);
tail = cur; %adjust the range of head and tail
elseif cr == fqlarger;
next = ceil((cur + tail)/2); %the round function has to be floor, because fq is larger than cur
%otherwise the fully 255 patches will never match
head = cur; %adjust the range of head and tail
end
if mm == 25 %it happens, the initial one match the fq, therefore, there is no next defined.
break
end
if cur == next || cur_rec1 == next %the next might oscilate
break;
else
cur_rec1 = cur;
cur = next;
end
%fprintf('cur %d\n',cur);
end
if mm == 0
iila = [];
mma = [];
return
end
%post-process to find the repeated partial vectors
%search for previous
idx = 1;
iila = zeros(hrpatchnumber,1);
mma = zeros(hrpatchnumber,1);
iila(idx) = mmiil;
mma(idx) = mm;
bprecontinue = true;
bproccontinue = true;
presh = 0; %previous shift
procsh = 0; %proceeding shift
while 1
presh = presh -1;
iilpre = mmiil + presh;
if iilpre <1
bprecontinue = false;
premm = 0;
end
procsh = procsh +1;
iilproc = mmiil + procsh;
if iilproc > lutsize
bproccontinue = false;
procmm = 0;
end
if bprecontinue
diff = lut(:,iilpre) ~= fq;
if nnz(diff) == 0
premm = 25;
else
premm = find(diff,1,'first') -1;
end
end
if bproccontinue
diff = lut(:,iilproc) ~= fq;
if nnz(diff) == 0
procmm = 25;
else
procmm = find(diff,1,'first') -1;
end
end
if premm == 0 && procmm == 0
break
end
if premm > procmm
%add pre item
idx = idx + 1;
iila(idx) = iilpre;
mma(idx) = premm;
%pause the proc
bprecontinue = true;
elseif premm < procmm
%add proc item
idx = idx + 1;
iila(idx) = iilproc;
mma(idx) = procmm;
%pause the pre
bproccontinue = true;
else %premm == procmm
%add both item
idx = idx + 1;
iila(idx) = iilpre;
mma(idx) = premm;
if idx == hrpatchnumber
break
end
idx = idx + 1;
iila(idx) = iilproc;
mma(idx) = procmm;
bproccontinue = true;
bprecontinue = true;
end
if idx == hrpatchnumber
break
end
end
if idx < hrpatchnumber
iila = iila(1:idx);
mma = mma(1:idx);
end
end
function s = bmm2similarity(b,mm,SimilarityFunctionSettingNumber)
if SimilarityFunctionSettingNumber == 1
if mm >= 9
Smm = 0.9 + 0.1*(mm-9)/16;
else
Smm = 0.5 * mm/9;
end
Sb = 0.5+0.5*(log2(b)-3)/5;
s = Sb * Smm;
elseif SimilarityFunctionSettingNumber == 2
Smm = mm/25;
Sb = (log2(b)-2)/6;
s = Sb * Smm;
end
end
function hrpatch = F8_ExtractAllHrPatches(img_y, para, scanr,allHRexampleimages)
disp('extracting HR patches');
%how to search parallelly to speed up?
psh = para.ps * para.zooming;
ps = para.ps;
lh = para.lh;
lw = para.lw;
s = para.zooming;
hrpatchnumber = para.NumberOfHCandidate;
hrpatch = zeros(psh,psh,lh-ps+1,lw-ps+1,hrpatchnumber);
allimages = allHRexampleimages;
%analyize which images need to be loaded
alliiset = scanr(3,:,:,:);
alliiset_uni = unique(alliiset(:)); %allmost all images are used, from 1 to 1500
if alliiset_uni(1) ~= 0
alliiset_uni_pure = alliiset_uni;
else
alliiset_uni_pure = alliiset_uni(2:end);
end
for i = 1:length(alliiset_uni_pure)
ii = alliiset_uni_pure(i);
exampleimage_hr = im2double(allimages(:,:,ii));
exampleimage_lr = U3_GenerateLRImage_BlurSubSample(exampleimage_hr,para.zooming,para.Gau_sigma);
match_4D = alliiset == ii;
match_3D = reshape(match_4D,hrpatchnumber,lh-ps+1,lw-ps+1); %remove the first dimension
[d1 d2 d3] = size(match_3D); %second dimention length
[idxset posset] = find(match_3D);
setin = length(idxset);
for j = 1:setin
idx = idxset(j);
possum = posset(j);
pos3 = floor( (possum-1)/d2) +1; %the relationship: possum = (pos3-1) * d2 + pos2, pos2 in (1,d2)
pos2 = possum - (pos3-1)*d2;
rl = pos2;
cl = pos3;
sr = scanr(4,idx,rl,cl);
sc = scanr(5,idx,rl,cl);
srh = (sr-1)*s+1;
srh1 = srh + psh -1;
sch = (sc-1)*s+1;
sch1 = sch + psh-1;
%to do: compensate the HR patch to match the LR query patch
hrp = exampleimage_hr(srh:srh1,sch:sch1); %HR patch
lrq = img_y(rl:rl+ps-1,cl:cl+ps-1); %LR query patch
lrr = exampleimage_lr(sr:sr+ps-1,sc:sc+ps-1); %LR retrieved patch
chrp = hrp + imresize(lrq - lrr,s,'bilinear'); %compensate HR patch
hrpatch(:,:,rl,cl,idx) = chrp;
bVisuallyCheck = false;
if bVisuallyCheck
if ~exist('hfig','var')
hfig = figure;
else
figure(hfig);
end
subplot(1,4,1);
imshow(hrp/255);
title('hrp');
subplot(1,4,2);
imshow(lrr/255);
title('lrr');
subplot(1,4,3);
imshow(lrq/255);
title('lrq');
subplot(1,4,4);
imshow(chrp/255);
title('chrp');
keyboard
end
end
end
end
function [img_texture Reliablemap] = F11_FilterOutImproperHrPatches(img_y,hrpatch,para,scanr_self,scanra_self,scanr,scanra)
%filter out improper hr patches using similarity among lr patches
%load the self-similar data
s = para.zooming;
lh = para.lh;
lw = para.lw;
ps = para.ps;
psh = s * para.ps;
patcharea = para.ps^2;
SSnumberUpperbound = 10;
%do I still need these variables?
cqarray = zeros(32,1)/0;
for qidx = 1:6
quan = 2^(qidx-1);
cqvalue = 0.9^(qidx-1);
cqarray(quan) = cqvalue;
end
hh = lh * s;
hw = lw * s;
hrres_nomi = zeros(hh,hw);
hrres_deno = zeros(hh,hw);
maskmatrix = false(psh,psh,patcharea);
Reliablemap = zeros(hh,hw);
pshs = psh * psh;
for i=1:patcharea
[sh_notsued masklow maskhigh] = GetShGeneral(ps,i,true,s); %ps, mm, bhigh, s
maskmatrix(:,:,i) = maskhigh;
end
mhr = zeros(5*s);
r1 = 2*s+1;
r2 = 3*s;
c1 = 2*s+1;
c2 = 3*s;
mhr(r1:r2,c1:c2) = 1; %the central part
sigma = para.ehrfKernelWidth;
kernel = Sigma2Kernel(sigma);
if para.bEnablemhrf
mhrf = imfilter(mhr,kernel,'replicate');
else
mhrf = mhr;
end
noHmap = scanra == 0;
noHmapToFill = noHmap;
NHOOD = [0 1 0;
1 1 1;
0 1 0];
se = strel('arbitrary',NHOOD);
noHmapneighbor = and( imdilate(noHmap,se) ,~noHmap);
%if the noHmapsever is 0, it is fine
imb = imresize(img_y,s); %use it as the reference if no F is available
rsa = [0 -1 0 1];
csa = [1 0 -1 0];
for rl= 1:lh-ps+1 %75
fprintf('rl:%d total:%d\n',rl,lh-ps+1);
rh = (rl-1)*s+1;
rh1 = rh+psh-1;
for cl = 1:lw-ps+1 %128
ch = (cl-1)*s+1;
ch1 = ch+psh-1;
%load candidates
hin = para.NumberOfHCandidate;
H = zeros(psh,psh,hin);
HSim = zeros(hin,1);
for j=1:hin
H(:,:,j) = hrpatch(:,:,rl,cl,j); %H
HSim(j) = scanr(6,j,rl,cl);
end
%compute the number of reference patches
sspin = min(SSnumberUpperbound,scanra_self(rl,cl));
%self similar patch instance number
F = zeros(ps,ps,sspin);
FSimPure = zeros(1,sspin);
rin = 0;
for i=1:sspin
sr = scanr_self(3,i,rl,cl);
sc = scanr_self(4,i,rl,cl);
%hr candidate number
rin = rin + para.NumberOfHCandidate;
F(:,:,i) = img_y(sr:sr+ps-1,sc:sc+ps-1);
FSimPure(i) = scanr_self(5,i,rl,cl);
end
%load all of the two step patches
R = zeros(psh,psh,rin);
mms = zeros(rin,1);
mmr = zeros(rin,1);
qs = zeros(rin,1);
qr = zeros(rin,1);
FSimBaseR = zeros(rin,1);
RSim = zeros(rin,1);
idx = 0;
if sspin > 0
for i=1:sspin %sspin is the Fin
sr = scanr_self(3,i,rl,cl);
sc = scanr_self(4,i,rl,cl);
%hr candidate number
hrcanin = para.NumberOfHCandidate;
for j=1:hrcanin
idx = idx + 1;
R(:,:,idx) = hrpatch(:,:,sr,sc,j);
mms(idx) = scanr_self(1,i,rl,cl);
qs(idx) = scanr_self(2,i,rl,cl);
mmr(idx) = scanr(1,j,sr,sc);
qr(idx) = scanr(2,j,sr,sc);
FSimBaseR(idx) = FSimPure(i);
RSim(idx) = scanr(6,j,sr,sc);
end
end
else
idx = 1;
rin = 1; %use bicubic
R(:,:,idx) = imb(rh:rh1,ch:ch1);
FSimBaseR(idx) = 1;FSimPure(i);
end
%here is a question, how to define the similarity between H and R?
%L2norm?
hscore = zeros(hin,1);
for i=1:hin
theH = H(:,:,i);
for j=1:rin
theR = R(:,:,j);
spf = FSimBaseR(j);
%similarity between H and R
diff = theH - theR;
L2N = norm(diff(:));
shr = exp(- L2N/pshs);
hscore(i) = hscore(i) + shr*spf;
end
end
[maxscore idx] = max(hscore);
%take this as the example
Reliablemap(rh:rh1,ch:ch1) = Reliablemap(rh:rh1,ch:ch1) + HSim(idx)*mhrf;
if hin > 0 %some patches can't find H
hrres_nomi(rh:rh1,ch:ch1) = hrres_nomi(rh:rh1,ch:ch1) + H(:,:,idx).*mhrf;
hrres_deno(rh:rh1,ch:ch1) = hrres_deno(rh:rh1,ch:ch1) + mhrf;
end
%if any of its neighbor belongs to noHmap, copy additional region to hrres
%if the pixel belongs to noHmapneighbor, then expand the copy regions
if noHmapneighbor(rl,cl) == true
mhrfspecial = zeros(5*s);
mhrfspecial(r1:r2,c1:c2) = 1;
for i=1:4
rs = rsa(i);
cs = csa(i);
checkr = rl+rs;
checkc = cl+cs;
if checkr > 0 && checkr < lh-ps+1 && checkc >0 && checkc <lw-ps+1 && noHmapToFill(checkr,checkc)
%recompute the mhrf and disable the noHmapToFill
noHmapToFill(checkr,checkc) = false;
switch i
case 1
mhrfspecial(r1:r2,c1+s:c2+s) = 1;
case 2
mhrfspecial(r1-s:r2-s,c1:c2) = 1;
case 3
mhrfspecial(r1:r2,c1-s:c2-s) = 1;
case 4
mhrfspecial(r1+s:r2+s,c1:c2) = 1;
end
end
end
mhrfspeical = imfilter(mhrfspecial,kernel,'replicate');
hrres_nomi(rh:rh1,ch:ch1) = hrres_nomi(rh:rh1,ch:ch1) + H(:,:,idx).*mhrfspeical;
hrres_deno(rh:rh1,ch:ch1) = hrres_deno(rh:rh1,ch:ch1) + mhrfspeical;
end
end
end
hrres = hrres_nomi ./hrres_deno;
exception = isnan(hrres);
hrres_filtered = hrres;
hrres_filtered(exception) = 0;
img_texture = (hrres_filtered .* (1-exception) + imb .*exception);
end
function [scanr_self scanra_self] = F22_SearchForSelfSimilarPatchesL2Norm(img_y,para)
ps = para.ps;
patcharea = ps^2;
[lh lw] = size(img_y);
%Find self similar patches
Fpatchnumber = 10;
scanr_self = zeros(5,Fpatchnumber,lh-ps+1,lw-ps+1); %scan results: mm, quan, r,c, similarity
scanra_self = Fpatchnumber * ones(lh-ps+1,lw-ps+1); %scan results active
in = (lh-ps+1)*(lw-ps+1);
fs = zeros(patcharea,in);
rec = zeros(2,in);
idx = 0;
for rl=1:lh-ps+1
for cl=1:lw-ps+1
idx = idx + 1;
rec(:,idx) = [rl;cl];
fs(:,idx) = reshape(img_y(rl:rl+ps-1,cl:cl+ps-1),patcharea,1);
end
end
%search
idx = 0;
for rl=1:lh-ps+1
for cl=1:lw-ps+1
idx = idx + 1;
fprintf('idx %d in %d\n',idx,in);
qf = fs(:,idx);
diff = fs - repmat(qf,1,in);
sqr = sum(diff.^2);
[ssqr ix] = sort(sqr);
saveidx = 0;
for j=1:11
indexinsort = ix(j);
sr = rec(1,indexinsort);
sc = rec(2,indexinsort);
if sr ~= rl || sc ~= cl
saveidx = saveidx + 1;
l2norm = sqrt(ssqr(j));
similarity = exp(-l2norm/25);
scanr_self(:,saveidx,rl,cl) = cat(1,-1,-1,sr,sc,similarity);
end
end
end
end
end
function [img_edge ProbOfEdge] = F1_EdgePreserving(img_y,para,zooming,Gau_sigma)
para.LowMagSuppression = 0;
para.DistanceUpperBound = 2.0;
para.ContrastEnhenceCoef = 1.0;
I_s = SmoothnessPreservingFunction(img_y,para,zooming);
T = F15_ComputeSRSSD(I_s);
Dissimilarity = EvaluateDissimilarity8(I_s);
Grad_high_initial = Img2Grad(I_s);
%SaveFolder = para.tuningfolder;
[h w] = size(T);
StatisticsFolder = fullfile('EdgePriors');
LoadFileName = sprintf('Statistics_Sc%d_Si%0.1f.mat',zooming,Gau_sigma);
LoadData = load(fullfile(StatisticsFolder,LoadFileName));
Statistics = LoadData.Statistics;
RidgeMap = edge(I_s,'canny',[0 0.01],0.05);
%filter out small ridge and non-maximun ridges
RidgeMap_filtered = RidgeMap;
[r_set c_set] = find(RidgeMap);
SetLength = length(r_set);
for j=1:SetLength
r = r_set(j);
c = c_set(j);
CenterMagValue = T(r,c);
if CenterMagValue < para.LowMagSuppression
RidgeMap_filtered(r,c) = false;
end
end
[r_set c_set] = find(RidgeMap_filtered);
SetLength = length(r_set);
[X Y] = meshgrid(1:11,1:11);
DistPatch = sqrt((X-6).^2 + (Y-6).^2);
DistMap = inf(h,w);
UsedPixel = false(h,w);
CenterCoor = zeros(h,w,2);
%Compute DistMap and CneterCoor
[r_set c_set] = find(RidgeMap_filtered);
for j=1:SetLength
r = r_set(j);
r1 = r-5;
r2 = r+5;
c = c_set(j);
c1 = c-5;
c2 = c+5;
if r1>=1 && r2<=h && c1>=1 && c2<=w %discrad boundary?
MapPatch = DistMap(r1:r2,c1:c2);
MinPatch = min(MapPatch, DistPatch);
DistMap(r1:r2,c1:c2) = MinPatch;
UsedPixel(r1:r2,c1:c2) = true;
ChangedPixels = MinPatch < MapPatch;
OriginalCenterCoorPatch = CenterCoor(r1:r2,c1:c2,:);
NewCoor = cat(3,r*ones(11), c*ones(11));
NewCenterCoorPatch = OriginalCenterCoorPatch .* repmat(1-ChangedPixels,[1,1,2]) + NewCoor .* repmat(ChangedPixels,[1,1,2]);
CenterCoor(r1:r2,c1:c2,:) = NewCenterCoorPatch;
end
end
%Convert dist to table index
TableIndexMap = zeros(h,w);
b = unique(DistPatch(:));
for i=1:length(b)
SetPixels = DistMap == b(i);
TableIndexMap(SetPixels) = i;
end
%mapping (T_p, T_r, d) to S_p
[r_set c_set] = find(UsedPixel);
SetLength = length(r_set);
UpdatedPixel = false(h,w);
S = zeros(h,w);
for i=1:SetLength
r = r_set(i);
c = c_set(i);
r_Center = CenterCoor(r,c,1);
c_Center = CenterCoor(r,c,2);
CurrentMagValue = T(r,c);
BinIdx_Current = ceil(CurrentMagValue /0.005);
%Zebra have super strong Mag
if BinIdx_Current > 100
BinIdx_Current = 100;
end
TableIndex = TableIndexMap(r,c);
if TableIndex > para.DistanceUpperBound
continue
end
CenterMagValue = T(r_Center,c_Center);
%Low Mag Edge suppresion
if CenterMagValue < para.LowMagSuppression
continue
end
BinIdx_Center = ceil(CenterMagValue /0.005);
if BinIdx_Center > 100
BinIdx_Center = 100;
end
%consult the table
if TableIndex == 1 %1 is the index of b(1) where dist = 0, enhance the contrast of pixel on edge
S_p = para.ContrastEnhenceCoef * Statistics(TableIndex).EstimatedMag(BinIdx_Current,BinIdx_Center);
else
S_p = Statistics(TableIndex).EstimatedMag(BinIdx_Current,BinIdx_Center);
end
if isnan(S_p)
else
UpdatedPixel(r,c) = true;
S(r,c) = S_p;
end
end
%Record the RidgeMapMap, for computing te ProbOfMag
%the Mag is the consulted Mag
%here is the problem, when the S is very strong, the affect range of ProbMagOut exceeds 1 pixel
RidgeMapMagValue = zeros(h,w);
for i=1:SetLength
r = r_set(i);
c = c_set(i);
r_Center = CenterCoor(r,c,1);
c_Center = CenterCoor(r,c,2);
RidgeMapMagValue(r,c) = S(r_Center,c_Center);
end
S(~UpdatedPixel) = T(~UpdatedPixel);
img_in = I_s;
if min(Dissimilarity(:)) == 0
d = Dissimilarity + 1e-6; %avoid 0 case; some images may have d(:,:,1) as 0
else
d = Dissimilarity;
end
ratio = d ./ repmat(d(:,:,1),[1,1,8]);
%here is the problem, I need to amplify the gradient directionally
Grad_in = Img2Grad(img_in);
Product = Grad_in .* ratio;
Sqr = Product.^2;
Sum = sum(Sqr,3);
Sqrt = sqrt(Sum); %the Sqrt might be 0, because Grad_in may be pure 0;
r1 = S ./Sqrt;
r1(isnan(r1)) = 0;
Grad_exp = Grad_high_initial .*( ratio .*(repmat(r1,[1,1,8])));
%consolidate inconsistatnt gradient
NewGrad_exp = zeros(h,w,8);
for k=1:4
switch k
case 1
ShiftOp = [0 -1];
case 2
ShiftOp = [1 -1];
case 3
ShiftOp = [1 0];
case 4
ShiftOp = [1 1];
end
k2 =k+4;
Grad1 = Grad_exp(:,:,k);
Grad2 = Grad_exp(:,:,k2);
Grad2Shift = circshift(Grad2,ShiftOp);
Grad1Abs = abs(Grad1);
Grad2AbsShift = abs(Grad2Shift);
Grad1Larger = Grad1Abs > Grad2AbsShift;
Grad2Larger = Grad2AbsShift > Grad1Abs;
NewGrad1 = Grad1 .* Grad1Larger + (-Grad2Shift) .* Grad2Larger;
NewGrad2Shift = Grad2Shift .* Grad2Larger + (-Grad1) .* Grad1Larger;
NewGrad2 = circshift(NewGrad2Shift,-ShiftOp);
NewGrad_exp(:,:,k) = NewGrad1;
NewGrad_exp(:,:,k2) = NewGrad2;
end
%current problem is the over-enhanced gradient (NewMagExp too large)
para.bReport = true;
img_edge = GenerateIntensityFromGradient(img_y,img_in,NewGrad_exp,para,zooming,Gau_sigma);
%compute the Map of edge weight
lambda_m = 2;
m0 = 0;
ProbMagOut = lambda_m * RidgeMapMagValue + m0;
lambda_d = 0.25;
d0 = 0.25;
ProbDistMap = exp(- (lambda_d * DistMap + d0) ); %this coef should be decied by zooming
Product = ProbMagOut .* ProbDistMap;
ProbOfEdge = min(Product,1); %the two terms are not sufficient, direction is not taken into considertion
para.bDumpInformation = false;
if para.bDumpInformation
scc(ProbOfEdge);
title('Edge Weight Map');
hfig = gcf;
fn = sprintf('%s_%s_%d_%d_EdgeWeightMap.png',para.SaveName,para.Legend,para.setting,para.tuning);
saveas(hfig,fullfile(para.tuningfolder,fn));
close(hfig)
scc(ProbMagOut,[0 1]);
hFig = gcf;
title('$b_1 s_r + b_0$, sharpness term','interpreter','latex');
axis off
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_Weight_SharpnessTerm.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_Weight_SharpnessTerm.fig']));
close(hFig);
scc(ProbDistMap,[0 1]);
hFig = gcf;
title('$e^{a_1 d+a_0}$, distance term','interpreter','latex');
axis off
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_Weight_DistanceTerm.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_Weight_DistanceTerm.fig']));
close(hFig);
scc(ProbOfEdge,[0 1]);
hFig = gcf;
title(''); %remove title, make it blank
axis off
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_W_e.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_W_e.fig']));
close(hFig);
scc(RidgeMap,'g');
hFig = gcf;
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_R_WithFrame.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_R_WithFrame.fig']));
close(hFig);
RidgeMap_filtered_inverted = 1-RidgeMap_filtered;
scc(RidgeMap_filtered_inverted,'g');
colorbar off
hFig = gcf;
title('$R$','interpreter','latex');
saveas(hFig,fullfile(SaveFolder,[para.SaveName 'RidgeMap_WithFrame.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName 'RidgeMap_WithFrame.fig']));
close(hFig);
imwrite(1-RidgeMap_filtered,fullfile(para.tuningfolder,[para.SaveName '_R.png']));
MaxS = max(S(:));
scc(T,[0 MaxS]);
hFig = gcf;
%title('$M^*$, Mangnitude of gradient of $I^*$','interpreter','latex');
title('');
axis off
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_T.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_T.fig']));
close(hFig);
scc(S,[0 MaxS]);
%title('$M''$, Predicted mangnitude of gradient','interpreter','latex');
title('');
hFig = gcf;
axis off
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_S.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_S.fig']));
close(hFig);
imwrite(I_s,fullfile(SaveFolder, [para.SaveName '_I_s.png']));
MagOut = F15_ComputeSRSSD(img_edge);
scc(MagOut,[0 0.9]);
hFig = gcf;
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_I_e_WithFrame.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_I_e_WithFrame.fig']));
close(hFig);
imwrite(img_edge,fullfile(SaveFolder, [para.SaveName '_I_e.png']));
% scc(img_edge, 'g',[0 1]);
% hFig = gcf;
% saveas(hFig,fullfile(SaveFolder,'img_edge.fig'));
% imwrite(img_edge,fullfile(SaveFolder,'img_edge.png'));
% close(hFig);
end
end
function img_out = SmoothnessPreservingFunction(img_y,para,zooming)
img_bb = imresize(img_y,zooming);
Kernel = Sigma2Kernel(para.Gau_sigma);
%compute the similarity from low
Coef = 10;
PatchSize = 3;
Sqrt_low = SimilarityEvaluation(img_y,PatchSize);
Similarity_low = exp(-Sqrt_low*Coef);
[h_high w_high] = size(img_bb);
ExpectedSimilarity = zeros(h_high,w_high,16);
%upsamplin the similarity
for dir=1:16
ExpectedSimilarity(:,:,dir) = imresize(Similarity_low(:,:,dir),zooming,'bilinear');
end
%refind the Grad_high by Similarity_high
LoopNumber = 10;
img = img_bb;
for loop = 1:LoopNumber
%refine gradient by ExpectedSimilarity
ValueSum = zeros(h_high,w_high);
WeightSum = sum(ExpectedSimilarity,3); %if thw weight sum is low, it is unsuitable to generate the grad by interpolation
for dir = 1:16
[MoveOp N] = GetMoveKernel16(dir);
if N == 1
MovedData = imfilter(img,MoveOp{1},'replicate');
else %N ==2
MovedData1 = imfilter(img,MoveOp{1},'replicate');
MovedData2 = imfilter(img,MoveOp{2},'replicate');
MovedData = (MovedData1 + MovedData2)/2;
end
Product = MovedData .* ExpectedSimilarity(:,:,dir);
ValueSum = ValueSum + Product;
end
I = ValueSum ./ WeightSum;
%intensity compensate
Diff = imresize(imfilter(I,Kernel,'replicate'),1/zooming, 'nearest') - img_y;
UpSampled = imresize(Diff,zooming,'bilinear');
Grad0 = imfilter(UpSampled,Kernel,'replicate');
Term_LowHigh_in = ComputeFunctionValue_lowhigh(I,img_y,para.Gau_sigma);
I_in = I; %make a copy, restore the value if all beta fails
bDecrease = false;
tau = 0.2;
for line_search_loop=1:10
%line search for the beta, fixed 1/32 is not a good choice
I = I_in - tau * Grad0;
Term_LowHigh_out = ComputeFunctionValue_lowhigh(I,img_y,para.Gau_sigma);
if Term_LowHigh_out < Term_LowHigh_in
bDecrease = true;
break;
else
tau = tau * 0.5;
end
end
if bDecrease == true
I_best = I;
else
break;
end
% fprintf('loop=%d, LowHihg_in=%0.1f, LowHigh_out=%0.1f,\n',loop,Term_LowHigh_in,Term_LowHigh_out);
% imwrite(I,fullfile(SaveFolder, [num2str(loop) '_GenIntenFromGrad.png']));
img = I_best;
end
img_out = img;
end
function SqrtData = SimilarityEvaluation(Img_in,PatchSize)
HalfPatchSize = (PatchSize-1)/2;
[h w] = size(Img_in);
SqrtData = zeros(h,w,16);
f3x3 = ones(3);
for i = 1:16
[DiffOp N] = RetGradientKernel16(i);
if N == 1
Diff = imfilter(Img_in,DiffOp{1},'symmetric');
else
Diff1 = imfilter(Img_in,DiffOp{1},'symmetric');
Diff2 = imfilter(Img_in,DiffOp{2},'symmetric');
Diff = (Diff1+Diff2)/2;
end
Sqr = Diff.^2;
Sum = imfilter(Sqr,f3x3,'replicate');
Mean = Sum/9;
SqrtData(:,:,i) = sqrt(Mean);
end
end
function [DiffOp N] = RetGradientKernel16(dir)
DiffOp = cell(2,1);
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
switch dir
case 1
N = 1;
DiffOp{1} = f{1};
DiffOp{2} = [];
case 2
N = 2;
DiffOp{1} = f{1};
DiffOp{2} = f{2};
case 3
N = 1;
DiffOp{1} = f{2};
DiffOp{2} = [];
case 4
N = 2;
DiffOp{1} = f{2};
DiffOp{2} = f{3};
case 5
N = 1;
DiffOp{1} = f{3};
DiffOp{2} = [];
case 6
N = 2;
DiffOp{1} = f{3};
DiffOp{2} = f{4};
case 7
N = 1;
DiffOp{1} = f{4};
DiffOp{2} = [];
case 8
N = 2;
DiffOp{1} = f{4};
DiffOp{2} = f{5};
case 9
N = 1;
DiffOp{1} = f{5};
DiffOp{2} = [];
case 10
N = 2;
DiffOp{1} = f{5};
DiffOp{2} = f{6};
case 11
DiffOp{1} = f{6};
DiffOp{2} = [];
N = 1;
case 12
N = 2;
DiffOp{1} = f{6};
DiffOp{2} = f{7};
case 13
N = 1;
DiffOp{1} = f{7};
DiffOp{2} = [];
case 14
N = 2;
DiffOp{1} = f{7};
DiffOp{2} = f{8};
case 15
DiffOp{1} = f{8};
DiffOp{2} = [];
N = 1;
case 16
N = 2;
DiffOp{1} = f{8};
DiffOp{2} = f{1};
end
end
function [Kernel N] = GetMoveKernel16(dir)
Kernel = cell(2,1);
f{1} = [0 0 0;
0 0 1;
0 0 0];
f{2} = [0 0 1;
0 0 0;
0 0 0];
f{3} = [0 1 0;
0 0 0;
0 0 0];
f{4} = [1 0 0;
0 0 0;
0 0 0];
f{5} = [0 0 0;
1 0 0;
0 0 0];
f{6} = [0 0 0;
0 0 0;
1 0 0];
f{7} = [0 0 0;
0 0 0;
0 1 0];
f{8} = [0 0 0;
0 0 0;
0 0 1];
switch dir
case 1
N = 1;
Kernel{1} = f{1};
Kernel{2} = [];
case 2
N = 2;
Kernel{1} = f{1};
Kernel{2} = f{2};
case 3
N = 1;
Kernel{1} = f{2};
Kernel{2} = [];
case 4
N = 2;
Kernel{1} = f{2};
Kernel{2} = f{3};
case 5
N = 1;
Kernel{1} = f{3};
Kernel{2} = [];
case 6
N = 2;
Kernel{1} = f{3};
Kernel{2} = f{4};
case 7
N = 1;
Kernel{1} = f{4};
Kernel{2} = [];
case 8
N = 2;
Kernel{1} = f{4};
Kernel{2} = f{5};
case 9
N = 1;
Kernel{1} = f{5};
Kernel{2} = [];
case 10
N = 2;
Kernel{1} = f{5};
Kernel{2} = f{6};
case 11
Kernel{1} = f{6};
Kernel{2} = [];
N = 1;
case 12
N = 2;
Kernel{1} = f{6};
Kernel{2} = f{7};
case 13
N = 1;
Kernel{1} = f{7};
Kernel{2} = [];
case 14
N = 2;
Kernel{1} = f{7};
Kernel{2} = f{8};
case 15
Kernel{1} = f{8};
Kernel{2} = [];
N = 1;
case 16
N = 2;
Kernel{1} = f{8};
Kernel{2} = f{1};
end
end
function f = ComputeFunctionValue_lowhigh(img,img_low,Gau_sigma)
KernelSize = ceil(Gau_sigma) * 3 + 1;
G = fspecial('gaussian',KernelSize,Gau_sigma);
Conv = imfilter(img,G,'replicate');
SubSample = imresize(Conv,size(img_low),'antialias',false);
Diff = SubSample - img_low;
Sqr = Diff.^2;
f = sum(Sqr(:));
end
function Grad = Img2Grad(img)
[h w] = size(img);
Grad = zeros(h,w,8);
DiffOp = RetGradientKernel();
for i=1:8
Grad(:,:,i) = imfilter(img,DiffOp{i},'replicate');
end
end
function f = RetGradientKernel()
f = cell(8,1);
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
end
function Dissimilarity = EvaluateDissimilarity8(Img_in,PatchSize)
if ~exist('PatchSize','var');
PatchSize = 3;
end
[h w] = size(Img_in);
Dissimilarity = zeros(h,w,8);
f3x3 = ones(PatchSize)/(PatchSize^2);
for i = 1:8
DiffOp = RetGradientKernel8(i);
Diff = imfilter(Img_in,DiffOp,'symmetric');
Sqr = Diff.^2;
Sum = imfilter(Sqr,f3x3,'replicate');
Dissimilarity(:,:,i) = sqrt(Sum);
end
end
function DiffOp = RetGradientKernel8(dir)
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
DiffOp = f{dir};
end
function img_out = GenerateIntensityFromGradient(img_y,img_initial,Grad_exp,para,zooming,Gau_sigma)
if ~isfield(para,'bReport')
para.bReport = false;
end
if ~isfield(para,'LoopNumber')
para.LoopNumber = 30;
end
if ~isfield(para,'beta0')
beta0 = 1;
else
beta0 = para.beta0;
end
if ~isfield(para,'beta1')
beta1 = 1;
else
beta1 = para.beta1;
end
% TempFolder = para.tuningfolder;
% zooming = para.zooming;
%create dir
% if isfield(para,'tuning')
% SaveFolder = para.tuningfolder;
% else
% SaveFolder = fullfile(TempFolder,'OptimizationProgress');
% end
% if ~exist(SaveFolder,'dir')
% mkdir( SaveFolder );
% end
Kernel = Sigma2Kernel(Gau_sigma);
%compute gradient
I = img_initial;
I_best = I;
for loop = 1:para.LoopNumber
%refine image by patch similarity
%refine image by low-high intensity
Diff = imresize(imfilter(I,Kernel,'replicate'),1/zooming, 'nearest') - img_y;
UpSampled = imresize(Diff,zooming,'bilinear');
Grad0 = imfilter(UpSampled,Kernel,'replicate');
%refine image by expected gradeint
%Gradient decent
%I = ModifyByGradient(I,Grad_exp);
OptDir = Grad_exp - Img2Grad(I);
Grad1 = sum(OptDir,3);
Grad_all = beta0 * Grad0 + beta1 * Grad1;
I_in = I; %make a copy, restore the value if all beta fails
bDecrease = false;
tau = 0.2;
Term_Grad_in = ComputeFunctionValue_Grad(I,Grad_exp);
Term_LowHigh_in = ComputeFunctionValue_lowhigh(I,img_y,para.Gau_sigma);
Term_all_in = Term_LowHigh_in * beta0 + Term_Grad_in * beta1;
for line_search_loop=1:10
%line search for the beta, fixed 1/32 is not a good choice
I = I_in - tau * Grad_all;
Term_Grad_out = ComputeFunctionValue_Grad(I,Grad_exp);
Term_LowHigh_out = ComputeFunctionValue_lowhigh(I,img_y,para.Gau_sigma);
Term_all_out = Term_LowHigh_out * beta0 + Term_Grad_out * beta1;
if Term_all_out < Term_all_in
bDecrease = true;
break;
else
tau = tau * 0.5;
end
end
if bDecrease == true
I_best = I;
else
break;
end
if para.bReport
fprintf(['loop=%d, all_in=%0.1f, all_out=%0.1f, LowHihg_in=%0.1f, LowHigh_out=%0.1f, ' ...
'Grad_in=%0.1f, Grad_out=%0.1f\n'],loop,Term_all_in,Term_all_out,Term_LowHigh_in,Term_LowHigh_out, ...
Term_Grad_in,Term_Grad_out);
end
% imwrite(I,fullfile(SaveFolder, [num2str(loop) '_GenIntenFromGrad.png']));
end
img_out = I_best;
end
function f = ComputeFunctionValue_Grad(img, Grad_exp)
Grad = Img2Grad(img);
Diff = Grad - Grad_exp;
Sqrt = Diff .^2;
f = sqrt(sum(Sqrt(:)));
end
function lrimg = U3_GenerateLRImage_BlurSubSample(hrimg,s,sigma)
[h w d] = size(hrimg);
htrim = h-mod(h,s);
wtrim = w-mod(w,s);
imtrim = hrimg(1:htrim,1:wtrim,1:d);
%detect image type
kernel = Sigma2Kernel(sigma);
if d == 1
blurimg = imfilter(imtrim,kernel,'replicate');
elseif d == 3
blurimg = zeros(htrim,wtrim,d);
for i=1:3
blurimg(:,:,i) = imfilter(imtrim(:,:,i),kernel,'replicate');
end
end
lrimg = imresize(blurimg,1/s,'bilinear','antialias',false);
end
function img_bp = F11_BackProjection(img_lr, img_hr, Gau_sigma, iternum)
[h_hr] = size(img_hr,1);
[h_lr] = size(img_lr,1);
zooming = h_hr/h_lr;
for i=1:iternum
img_lr_gen = U3_GenerateLRImage_BlurSubSample(img_hr,zooming,Gau_sigma);
diff_lr = img_lr - img_lr_gen;
diff_hr = imresize(diff_lr,zooming,'bilinear');
img_hr = img_hr + diff_hr;
end
img_bp = img_hr;
end
|
github
|
Liusifei/Face-Hallucination-master
|
F37f_GetTexturePatchMatch_Aligned.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F37f_GetTexturePatchMatch_Aligned.m
| 13,388 |
utf_8
|
9f5b9bae31553af0503055933b6977aa
|
%Chih-Yuan Yang
%10/07/12
%Use patchmatch to retrieve a texture background
function [gradients_texture, img_texture, img_texture_backprojection] = F37f_GetTexturePatchMatch_Aligned(img_y, ...
hrexampleimages, lrexampleimages, landmarks_test, rawexamplelandmarks)
%parameter
numberofHcandidate = 10;
%start
[h_lr, w_lr, exampleimagenumber] = size(lrexampleimages);
[h_hr, w_hr, ~] = size(hrexampleimages);
zooming = floor(h_hr/h_lr);
if zooming == 4
Gau_sigma = 1.6;
elseif zooming == 3
Gau_sigma = 1.2;
end
alignedexampleimage_hr = zeros(h_hr,w_hr,exampleimagenumber,'uint8'); %set as uint8 to reduce memory demand
alignedexampleimage_lr = zeros(h_lr,w_lr,exampleimagenumber);
disp('align images');
set = 28:48; %eyes and nose
basepoints = landmarks_test(set,:);
inputpoints = rawexamplelandmarks(set,:,:);
parfor k=1:exampleimagenumber
alignedexampleimage_hr(:,:,k) = F18_AlignExampleImageByLandmarkSet(hrexampleimages(:,:,k),inputpoints(:,:,k),basepoints);
%F19 automatically convert uint8 input to double
alignedexampleimage_lr(:,:,k) = F19a_GenerateLRImage_GaussianKernel(alignedexampleimage_hr(:,:,k),zooming,Gau_sigma);
end
cores = 2; % Use more cores for more speed
if cores==1
algo = 'cpu';
else
algo = 'cputiled';
end
patchsize_lr = 5;
nn_iters = 5;
A = repmat(img_y,[1 1 3]);
testnumber = exampleimagenumber;
xyandl2norm = zeros(h_lr,w_lr,3,testnumber,'int32');
disp('patchmatching');
parfor i=1:testnumber;
%run patchmatch
B = repmat(alignedexampleimage_lr(:,:,i),[1 1 3]);
xyandl2norm(:,:,:,i) = nnmex(A, B, algo, patchsize_lr, nn_iters, [], [], [], [], cores); %the return totalpatchnumber int32
end
l2norm_double = double(xyandl2norm(:,:,3,:));
[sortedl2norm, ix] = sort(l2norm_double,4);
hrpatchextractdata = zeros(h_lr-patchsize_lr+1,w_lr-patchsize_lr+1,numberofHcandidate,3); %ii,r_lr_src,c_lr_src
%here
hrpatchsimilarity = zeros(h_lr-patchsize_lr+1,w_lr-patchsize_lr+1,numberofHcandidate);
parameter_l2normtosimilarity = 625;
for rl = 1:h_lr-patchsize_lr+1
for cl = 1:w_lr-patchsize_lr+1
for k=1:numberofHcandidate
knnidx = ix(rl,cl,1,k);
x = xyandl2norm(rl,cl,1,knnidx); %start from 0
y = xyandl2norm(rl,cl,2,knnidx);
clsource = x+1;
rlsource = y+1;
hrpatchextractdata(rl,cl,k,:) = reshape([knnidx rlsource clsource],[1 1 1 3]);
hrpatchsimilarity(rl,cl,k) = exp(-sortedl2norm(rl,cl,1,knnidx)/parameter_l2normtosimilarity);
end
end
end
hrpatch = F39_ExtractAllHrPatches(patchsize_lr,zooming, hrpatchextractdata,alignedexampleimage_hr);
hrpatch = F40_CompensateHRpatches(hrpatch, img_y, zooming, hrpatchextractdata,alignedexampleimage_lr);
%mostsimilarinputpatchrecord = IF2_SearchForSelfSimilarPatchesL2Norm(img_y,patchsize_lr);
%hrpatch_filtered = IF3_SimilarityFilter(hrpatch,hrpatchsimilarity,mostsimilarinputpatchrecord);
%img_texture = IF4_BuildHRimagefromHRPatches(hrpatch_filtered,zooming);
img_texture = IF4_BuildHRimagefromHRPatches(hrpatch,zooming);
iternum = 1000;
Tolf = 0.0001;
breport = false;
disp('backprojection for img_texture');
img_texture_backprojection = F11d_BackProjection_GaussianKernel(img_y, img_texture, Gau_sigma, iternum,breport,Tolf);
%extract the graident
gradients_texture = F14_Img2Grad(img_texture_backprojection);
end
function scanresult = IF2_SearchForSelfSimilarPatchesL2Norm(img_y,patchsize_lr)
%out:
%scanresult: 3 x numberofFcandidate x (h_lr-patchsize+1) x (w_lr-patchsize+1)
patcharea = patchsize_lr^2;
[lh lw] = size(img_y);
%Find self similar patches
numberofFcandidate = 10;
scanresult = zeros(3,numberofFcandidate,lh-patchsize_lr+1,lw-patchsize_lr+1); %scan results: r,c, similarity
totalpatchnumber = (lh-patchsize_lr+1)*(lw-patchsize_lr+1);
featurematrix = zeros(patcharea,totalpatchnumber);
rec = zeros(2,totalpatchnumber);
idx = 0;
for rl=1:lh-patchsize_lr+1
rl1 = rl+patchsize_lr-1;
for cl=1:lw-patchsize_lr+1
cl1 = cl+patchsize_lr-1;
idx = idx + 1;
rec(:,idx) = [rl;cl];
featurematrix(:,idx) = reshape(img_y(rl:rl1,cl:cl1),patcharea,1);
end
end
%search
idx = 0;
for rl=1:lh-patchsize_lr+1
for cl=1:lw-patchsize_lr+1
idx = idx + 1;
fprintf('idx %d totalpatchnumber %d\n',idx,totalpatchnumber);
queryfeature = featurematrix(:,idx);
diff = featurematrix - repmat(queryfeature,1,totalpatchnumber);
sqr = sum(diff.^2);
[ssqr ix] = sort(sqr);
saveidx = 0;
for j=1:numberofFcandidate+1 %add one to prevent find itself
indexinsort = ix(j);
sr = rec(1,indexinsort);
sc = rec(2,indexinsort);
%explanation: it is possible that there are 11 lr patches with the same appearance
%and the input one is sorted at item indexed more than 11 so that sr and cl are insufficient
%to prevenet the problem
if sr ~= rl || sc ~= cl
saveidx = saveidx + 1;
if saveidx <= numberofFcandidate
l2norm = sqrt(ssqr(j));
similarity = exp(-l2norm/25);
scanresult(1:3,saveidx,rl,cl) = [sr;sc;similarity];
end
end
end
end
end
end
function hrpatch_filtered = IF3_SimilarityFilter(hrpatch,hrpatchsimilarity,mostsimilarinputpatches)
%totalpatchnumber
%hrpatch: patchsize_hr x patchsize_hr x (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1) x numberofHcandidate
%hrpatchsimilarity: (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1) x numberofHcandidate
%mostsimilarinputpatches: 3 x numberofFcandidate x (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1)
%out
%hrpatch_filtered: patchsize_hr x patchsize_hr x (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1)
zooming = 4;
patchsize_hr = size(hrpatch,1);
patchsize_lr = patchsize_hr /zooming;
h_lr = size(hrpatch,3) + patchsize_lr -1;
w_lr = size(hrpatch,4) + patchsize_lr -1;
numberofHcandidate = size(hrpatch,5);
numberofFcandidate = size(mostsimilarinputpatches,2);
%allocate for out
hrpatch_filtered = zeros(patchsize_hr,patchsize_hr,h_lr-patchsize_lr+1,w_lr-patchsize_lr+1);
for rl= 1:h_lr-patchsize_lr+1
fprintf('rl:%d total:%d\n',rl,h_lr-patchsize_lr+1);
for cl = 1:w_lr-patchsize_lr+1
%load candidates
H = zeros(patchsize_hr,patchsize_hr,numberofHcandidate);
similarityHtolrpatch = zeros(numberofHcandidate,1);
for j=1:numberofHcandidate
H(:,:,j) = hrpatch(:,:,rl,cl,j); %H
similarityHtolrpatch(j) = hrpatchsimilarity(rl,cl,j);
end
%self similar patch instance number
similarityFtolrpatch = reshape( mostsimilarinputpatches(3,:,rl,cl) , [numberofFcandidate , 1]);
%load all of the two step patches
R = zeros(patchsize_hr,patchsize_hr,numberofFcandidate,numberofHcandidate);
RSimbasedonF = zeros(numberofFcandidate,numberofHcandidate);
for i=1:numberofFcandidate
sr = mostsimilarinputpatches(1,i,rl,cl);
sc = mostsimilarinputpatches(2,i,rl,cl);
%hr candidate number
for j=1:numberofHcandidate
R(:,:,i,j) = hrpatch(:,:,sr,sc,j);
RSimbasedonF(i,j) = hrpatchsimilarity(sr,sc,j);
end
end
%here is a question, how to define the similarity between H and R?
%L2norm?
hscore = zeros(numberofHcandidate,1);
for i=1:numberofHcandidate
theH = H(:,:,i);
for j=1:numberofFcandidate
for k=1:numberofHcandidate
theR = R(:,:,j,k);
similarityRbasedonF = RSimbasedonF(j,k);
%similarity between H and R
diff = theH - theR;
L2N = norm(diff(:));
similarityRtoH = exp(- L2N/25); %the 25 is a parameter, need to be tuned totalpatchnumber the future
hscore(i) = hscore(i) + similarityHtolrpatch(i) * similarityRbasedonF * similarityRtoH * similarityFtolrpatch(j);
end
end
end
[~, idx] = max(hscore);
hrpatch_filtered(:,:,rl,cl) = hrpatch(:,:,rl,cl,idx(1));
end
end
end
function img_texture = IF4_BuildHRimagefromHRPatches(hrpatch,zooming)
%reconstruct the high-resolution image
patchsize_hr = size(hrpatch,1);
patchsize_lr = patchsize_hr/zooming;
h_lr = size(hrpatch,3) + patchsize_lr - 1;
w_lr = size(hrpatch,4) + patchsize_lr - 1;
h_expected = h_lr * zooming;
w_expected = w_lr * zooming;
img_texture = zeros(h_expected,w_expected);
%most cases
rpixelshift = 2; %this should be modified according to patchsize_lr
cpixelshift = 2;
for rl = 2:h_lr - patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
for cl = 2:w_lr - patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
img_texture(rh:rh1,ch:ch1) = usedhrpatch(9:12,9:12);
end
end
%left
cl = 1;
ch = 1;
ch1 = ch+3*zooming-1;
for rl=2:h_lr-patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%right
cl = w_lr - patchsize_lr+1;
ch = w_expected - 3*zooming+1;
ch1 = w_expected;
for rl=2:h_lr-patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%top
rl = 1;
rh = 1;
rh1 = rh+3*zooming-1;
for cl=2:w_lr-patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%bottom
rl = h_lr-patchsize_lr+1;
rh = h_expected - 3*zooming+1;
rh1 = h_expected;
for cl=2:w_lr-patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%left-top corner
rl=1;
cl=1;
rh = 1;
rh1 = rh+3*zooming-1;
ch = 1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
%right-top corner
rl=1;
cl=w_lr-patchsize_lr+1;
rh = (rl-1)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
%left-bottom corner
rl=h_lr-patchsize_lr+1;
cl=1;
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
%left-bottom corner
rl=h_lr-patchsize_lr+1;
cl=w_lr-patchsize_lr+1;
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F40_CompensateHRpatches.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F40_CompensateHRpatches.m
| 1,341 |
utf_8
|
ece1f9e17377677e192ae02b610fb56a
|
%Chih-Yuan Yang
%10/07/12
function hrpatch_compensate = F40_CompensateHRpatches(hrpatch, img_y, zooming, hrpatchextractdata,lrexampleimages)
%in:
%hrpatchextractdata: (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1) x numberofHcandidate x 3 %ii,r_lr_src,c_lr_src
hrpatch_compensate = zeros(size(hrpatch));
patchsize_hr = size(hrpatch,1);
patchsize_lr = patchsize_hr / zooming;
[h_lr_active w_lr_active numberofHcandidate ~] = size(hrpatchextractdata);
for rl = 1:h_lr_active
rl1 = rl + patchsize_lr -1;
for cl = 1:w_lr_active
cl1 = cl + patchsize_lr -1;
patch_lr = img_y(rl:rl1,cl:cl1);
for k=1:numberofHcandidate
patch_hr = hrpatch(:,:,rl,cl,k);
ii = hrpatchextractdata(rl,cl,k,1);
sr = hrpatchextractdata(rl,cl,k,2);
sc = hrpatchextractdata(rl,cl,k,3);
sr1 = sr+patchsize_lr-1;
sc1 = sc+patchsize_lr-1;
patch_lr_found = lrexampleimages(sr:sr1,sc:sc1,ii);
diff_lr = patch_lr - patch_lr_found;
diff_hr = imresize(diff_lr,zooming,'bilinear');
patch_hr_compensated = patch_hr + diff_hr;
hrpatch_compensate(:,:,rl,cl,k) = patch_hr_compensated;
end
end
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F18a_AlignExampleImageByTwoPoints.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F18a_AlignExampleImageByTwoPoints.m
| 459 |
utf_8
|
5bdf68475e0ffcc19064b4878111e604
|
%Chih-Yuan Yang
%09/15/12
%Change from F18 to F18, two points only, no optimization
function alignedexampleimage = F18a_AlignExampleImageByTwoPoints(exampleimage,inputpoints,basepoints)
[h w d] = size(exampleimage);
tform = cp2tform(inputpoints, basepoints,'nonreflective similarity');
%generated the transform images
XData = [1 w];
YData = [1 h];
alignedexampleimage = imtransform(exampleimage,tform,'XData',XData,'YData',YData);
end
|
github
|
Liusifei/Face-Hallucination-master
|
U21a_DrawLandmarks_Points_ReturnHandle.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U21a_DrawLandmarks_Points_ReturnHandle.m
| 933 |
utf_8
|
c9419254c3dd43155fec87373fe5e03a
|
%Chih-Yuan Yang
%09/15/12
%Sometimes the format of landmarks are coordinates rather than boxes
%03/19/14 update the fucntion, the bdrawpose does not work
function hfig = U21a_DrawLandmarks_Points_ReturnHandle(im, points, posemap,bshownumbers,bdrawpose,bvisible)
if bvisible
hfig = figure;
else
hfig = figure('Visible','off');
end
imshow(im);
hold on;
axis image;
axis off;
setnumber = size(points,1);
if bdrawpose
tx = (min(points(:,1)) + max(points(:,1)))/2; %tx means half face width
ty = min(points(:,2)) - tx;
text(tx,ty, num2str(posemap(b.c)),'fontsize',18,'color','c');
end
for i=1:setnumber
x = points(i,1);
y = points(i,2);
plot(x,y,'r.','markersize',9);
if bshownumbers
text((x1+x2)/2,(y1+y2)/2, num2str(i), 'fontsize',9,'color','k');
end
end
drawnow;
end
|
github
|
Liusifei/Face-Hallucination-master
|
F18b_AlignExampleImageByLandmarkSet.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F18b_AlignExampleImageByLandmarkSet.m
| 2,230 |
utf_8
|
b636615d164ee40bc5b4be7afc6a6730
|
%Chih-Yuan Yang
%06/12/13
%F18: only return the aligned image
%F18a: AlignExampleImagesByTwoPoints, this must be an old function.
%F18b: from F18, return the aligned landmarks
function [alignedexampleimage, landmarks_aligned] = F18b_AlignExampleImageByLandmarkSet(exampleimage,inputpoints,basepoints)
%use shift and scaling only as the initial variables
initial_shift = mean(basepoints(:,:)) - mean(inputpoints(:,1));
initial_deltax = initial_shift(1);
initial_deltay = initial_shift(2);
initial_lambda = 1;
initial_theta = 0;
initial_variable = [initial_theta, initial_lambda, initial_deltax, initial_deltay];
%solve the optimization problem
options = optimset('Display','off','TolX',1e-4);
[x, fval]= fminsearch(@(x) OptProblem(x,inputpoints,basepoints), initial_variable, options);
theta = x(1);
lambda = x(2);
deltax = x(3);
deltay = x(4);
transformmatrix = [lambda*cos(theta) -lambda*sin(theta) deltax;
lambda*sin(theta) lambda*cos(theta) deltay];
landmarks_input_rowvector = inputpoints;
landmarks_input_colvector = landmarks_input_rowvector';
num_landmarks = size(landmarks_input_colvector,2);
landmarks_input_plus1 = cat(1,landmarks_input_colvector,ones(1,num_landmarks));
landmarks_aligned = transformmatrix * landmarks_input_plus1;
%take two points most apart to generate input points
inputpoint1 = inputpoints(1,:)';
setsize = size(inputpoints,1);
dxdy = inputpoints - repmat(inputpoints(1,:),[setsize,1]);
distsqr = sum(dxdy.^2,2);
[sortresults, ix] = sort(distsqr,'descend');
farestpointidx = ix(1);
inputpoint2 = inputpoints(farestpointidx,:)';
inputpoints_2points = cat(1, inputpoint1',inputpoint2');
basepoint1 = transformmatrix * cat(1,inputpoint1,1);
basepoint2 = transformmatrix * cat(1,inputpoint2,1);
basepoints_2points = cat(1, basepoint1', basepoint2');
[h, w, d] = size(exampleimage);
tform = cp2tform(inputpoints_2points, basepoints_2points,'nonreflective similarity');
%generated the transform images
XData = [1 w];
YData = [1 h];
alignedexampleimage = imtransform(exampleimage,tform,'XData',XData,'YData',YData);
end
|
github
|
Liusifei/Face-Hallucination-master
|
T1_Facesmoother.m
|
.m
|
Face-Hallucination-master/Code/Ours2/T1_Facesmoother.m
| 2,384 |
utf_8
|
9169e2fd4a51a23c10447e4562d7a510
|
function img = T1_Facesmoother(img)
% for super-resolution results smmother
dimg = T2_MultiDoG(img);
edge_bw = edge(rgb2gray(img),'canny',[0.01,0.12]);
img = T1_EdgeSmoothing(img, dimg, edge_bw);
end
% EDGESOMMTHIMG.m produce the edages by preserveing the edge-data in
% ref_im, and generates new pixels of other regions from dst_im.
% In face denoising it helps preserving the edge area.
% In face beautification, it helps manipulating the mask's boundaries.
% Input:
% ref_im: original image or image layer;
% dst_im: processed image or image layer
% edge_bw: binary image of edge map;
% Output:
% rut_im: combined result image
% Sifei Liu, 05/30/2013
function rut_im = T1_EdgeSmoothing(ref_im, dst_im, edge_bw)
%% parameters configuration
w_im = double(edge_bw);
% thr = 10;
[r,c,n] = size(ref_im);
thr = floor(min(r,c)/40);
w_im(1:thr,:) = 1; w_im(end-thr:end,:) = 1;
w_im(:,1:thr) = 1; w_im(:,end-thr:end) = 1;
%% find the nearest edge point
[er,ec] = find(edge_bw == 1);
e_len = length(er);
mat = T1_RangeMat(thr);
for m = 1:e_len
if and(and(er(m)>thr+1,er(m)<r-thr-1),and(ec(m)>thr+1,ec(m)<c-thr-1))
w_im(er(m)-thr:er(m)+thr,ec(m)-thr:ec(m)+thr) = max(w_im(er(m)-thr:er(m)+thr,ec(m)-thr:...
ec(m)+thr),mat);
end
end
%% update image according to weighing map
rut_im = uint8(repmat(w_im,[1,1,n]) .* double(ref_im) + (1-repmat(w_im,[1,1,n])) .* double(dst_im));
end
%% T2_MultiDoG.m
function RI = T2_MultiDoG(I)
layer = 6;
l = floor(min(size(I,1),size(I,2))/10);
if l >= 50
hl = fspecial('gaussian',[3,3],sqrt(2));
l1=2;l2=3;l3=5;
else
hl = fspecial('gaussian',[2,2],sqrt(2));
l1=1;l2=5;l3 = l2;
end
hh = fspecial('gaussian',[l,l],sqrt(l/2));
if length(size(I)) == 3
[L,a,b] = RGB2Lab(I);
maxL = max(max(L));minL = min(min(L));
L = Normalize(L,maxL,minL);
else
maxL = max(max(I));minL = min(min(I));
L = Normalize(I,maxL,minL);
end
DI = cell(1,layer+1);
DI{1,1} = L;
da = imfilter(a,hh);
db = imfilter(b,hh);
for m = 1:layer
DI{1,m+1} = imfilter(DI{1,m},hl);
end
Dh = imfilter(L,hh);
% RL = DI{1,1}-(DI{1,l1}-DI{1,l2})-(DI{1,l3}-Dh);
RL = DI{1,1}-(DI{1,3}-Dh);
RI = ReNormalize(RL, maxL,minL);
if length(size(I)) == 3
RI = Lab2RGB(RI,da,db);
end
close all
% imshow(RI);
end
function I = Normalize(I,MAX,MIN)
I = (I - MIN)/(MAX-MIN);
end
function I = ReNormalize(I, MAX, MIN)
I = I * (MAX-MIN) + MIN;
end
|
github
|
Liusifei/Face-Hallucination-master
|
U23_PrepareResultFolder.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U23_PrepareResultFolder.m
| 1,104 |
utf_8
|
85619b24913ae5e317fd6fc41deeea7b
|
%Chih-Yuan Yang
%09/15/12
%simplify main function
function para = U23_PrepareResultFolder(resultfolder,para)
settingfolder = fullfile(resultfolder,sprintf('%s%d',para.SaveName,para.setting));
tuningfolder = fullfile(settingfolder, sprintf('Tuning%d',para.tuning));
para.resultfolder = resultfolder;
para.settingfolder = settingfolder;
para.tuningfolder = tuningfolder;
U22_makeifnotexist(tuningfolder);
if ~isempty(para.settingnote)
fid = fopen(fullfile(settingfolder, 'SettingNote.txt'),'w');
fprintf(fid,'%s',para.settingnote);
fclose(fid);
end
if ~isempty(para.tuningnote)
fid = fopen(fullfile(para.tuningfolder ,'TunningNote.txt'),'w');
fprintf(fid,'%s',para.tuningnote);
fclose(fid);
end
%copy parameter setting
if ispc
cmd = ['copy ' para.MainFileName '.m ' fullfile(para.tuningfolder, [para.MainFileName '_backup.m '])];
elseif isunix
cmd = ['cp ' para.MainFileName '.m ' fullfile(para.tuningfolder, [para.MainFileName '_backup.m '])];
end
dos(cmd);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F37e_GetTexturePatchMatch_PatchCompensate_SimilarityFilter.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F37e_GetTexturePatchMatch_PatchCompensate_SimilarityFilter.m
| 12,898 |
utf_8
|
7e0e3d9bfb628f725e1490725478cf54
|
%Chih-Yuan Yang
%10/07/12
%Use patchmatch to retrieve a texture background
function [gradients_texture img_texture img_texture_backprojection] = ...
F37e_GetTexturePatchMatch_PatchCompensate_SimilarityFilter(img_y, ...
hrexampleimages, lrexampleimages)
%parameter
numberofHcandidate = 10;
%start
[h_lr, w_lr, exampleimagenumber] = size(lrexampleimages);
[h_hr, w_hr, ~] = size(hrexampleimages);
zooming = h_hr/h_lr;
if zooming == 4
Gau_sigma = 1.6;
elseif zooming == 3
Gau_sigma = 1.2;
end
cores = 2; % Use more cores for more speed
if cores==1
algo = 'cpu';
else
algo = 'cputiled';
end
patchsize_lr = 5;
nn_iters = 5;
%A =F38_ExtractFeatureFromAnImage(img_y);
A = repmat(img_y,[1 1 3]);
testnumber = exampleimagenumber;
xyandl2norm = zeros(h_lr,w_lr,3,testnumber,'int32');
%question: how to control the random seed in PatchMatch? in the nn.cpp, but it looks fixed because srand2() is never called
%However, since the numberofHcandiate are different in Test34 (as 10) and Test35 (as 1), the retrieved patches are different
disp('patchmatching');
parfor i=1:testnumber;
%run patchmatch
B = repmat(lrexampleimages(:,:,i),[1 1 3]);
xyandl2norm(:,:,:,i) = nnmex(A, B, algo, patchsize_lr, nn_iters, [], [], [], [], cores); %the return totalpatchnumber int32
end
l2norm_double = double(xyandl2norm(:,:,3,:));
[sortedl2norm, ix] = sort(l2norm_double,4);
hrpatchextractdata = zeros(h_lr-patchsize_lr+1,w_lr-patchsize_lr+1,numberofHcandidate,3); %ii,r_lr_src,c_lr_src
%here
hrpatchsimilarity = zeros(h_lr-patchsize_lr+1,w_lr-patchsize_lr+1,numberofHcandidate);
parameter_l2normtosimilarity = 625;
for rl = 1:h_lr-patchsize_lr+1
for cl = 1:w_lr-patchsize_lr+1
for k=1:numberofHcandidate
knnidx = ix(rl,cl,1,k);
x = xyandl2norm(rl,cl,1,knnidx); %start from 0
y = xyandl2norm(rl,cl,2,knnidx);
clsource = x+1;
rlsource = y+1;
hrpatchextractdata(rl,cl,k,:) = reshape([knnidx rlsource clsource],[1 1 1 3]);
hrpatchsimilarity(rl,cl,k) = exp(-sortedl2norm(rl,cl,1,knnidx)/parameter_l2normtosimilarity);
end
end
end
hrpatch = F39_ExtractAllHrPatches(patchsize_lr,zooming, hrpatchextractdata,hrexampleimages);
hrpatch = F40_CompensateHRpatches(hrpatch, img_y, zooming, hrpatchextractdata,lrexampleimages);
mostsimilarinputpatchrecord = IF2_SearchForSelfSimilarPatchesL2Norm(img_y,patchsize_lr);
hrpatch_filtered = IF3_SimilarityFilter(hrpatch,hrpatchsimilarity,mostsimilarinputpatchrecord);
img_texture = IF4_BuildHRimagefromHRPatches(hrpatch_filtered,zooming);
iternum = 1000;
Tolf = 0.0001;
breport = false;
disp('backprojection for img_texture');
img_texture_backprojection = F11d_BackProjection_GaussianKernel(img_y, img_texture, Gau_sigma, iternum,breport,Tolf);
%extract the graident
gradients_texture = F14_Img2Grad(img_texture_backprojection);
end
function scanresult = IF2_SearchForSelfSimilarPatchesL2Norm(img_y,patchsize_lr)
%out:
%scanresult: 3 x numberofFcandidate x (h_lr-patchsize+1) x (w_lr-patchsize+1)
patcharea = patchsize_lr^2;
[lh lw] = size(img_y);
%Find self similar patches
numberofFcandidate = 10;
scanresult = zeros(3,numberofFcandidate,lh-patchsize_lr+1,lw-patchsize_lr+1); %scan results: r,c, similarity
totalpatchnumber = (lh-patchsize_lr+1)*(lw-patchsize_lr+1);
featurematrix = zeros(patcharea,totalpatchnumber);
rec = zeros(2,totalpatchnumber);
idx = 0;
for rl=1:lh-patchsize_lr+1
rl1 = rl+patchsize_lr-1;
for cl=1:lw-patchsize_lr+1
cl1 = cl+patchsize_lr-1;
idx = idx + 1;
rec(:,idx) = [rl;cl];
featurematrix(:,idx) = reshape(img_y(rl:rl1,cl:cl1),patcharea,1);
end
end
%search
idx = 0;
for rl=1:lh-patchsize_lr+1
for cl=1:lw-patchsize_lr+1
idx = idx + 1;
fprintf('idx %d totalpatchnumber %d\n',idx,totalpatchnumber);
queryfeature = featurematrix(:,idx);
diff = featurematrix - repmat(queryfeature,1,totalpatchnumber);
sqr = sum(diff.^2);
[ssqr ix] = sort(sqr);
saveidx = 0;
for j=1:numberofFcandidate+1 %add one to prevent find itself
indexinsort = ix(j);
sr = rec(1,indexinsort);
sc = rec(2,indexinsort);
%explanation: it is possible that there are 11 lr patches with the same appearance
%and the input one is sorted at item indexed more than 11 so that sr and cl are insufficient
%to prevenet the problem
if sr ~= rl || sc ~= cl
saveidx = saveidx + 1;
if saveidx <= numberofFcandidate
l2norm = sqrt(ssqr(j));
similarity = exp(-l2norm/25);
scanresult(1:3,saveidx,rl,cl) = [sr;sc;similarity];
end
end
end
end
end
end
function hrpatch_filtered = IF3_SimilarityFilter(hrpatch,hrpatchsimilarity,mostsimilarinputpatches)
%totalpatchnumber
%hrpatch: patchsize_hr x patchsize_hr x (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1) x numberofHcandidate
%hrpatchsimilarity: (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1) x numberofHcandidate
%mostsimilarinputpatches: 3 x numberofFcandidate x (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1)
%out
%hrpatch_filtered: patchsize_hr x patchsize_hr x (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1)
zooming = 4;
patchsize_hr = size(hrpatch,1);
patchsize_lr = patchsize_hr /zooming;
h_lr = size(hrpatch,3) + patchsize_lr -1;
w_lr = size(hrpatch,4) + patchsize_lr -1;
numberofHcandidate = size(hrpatch,5);
numberofFcandidate = size(mostsimilarinputpatches,2);
%allocate for out
hrpatch_filtered = zeros(patchsize_hr,patchsize_hr,h_lr-patchsize_lr+1,w_lr-patchsize_lr+1);
for rl= 1:h_lr-patchsize_lr+1
fprintf('rl:%d total:%d\n',rl,h_lr-patchsize_lr+1);
for cl = 1:w_lr-patchsize_lr+1
%load candidates
H = zeros(patchsize_hr,patchsize_hr,numberofHcandidate);
similarityHtolrpatch = zeros(numberofHcandidate,1);
for j=1:numberofHcandidate
H(:,:,j) = hrpatch(:,:,rl,cl,j); %H
similarityHtolrpatch(j) = hrpatchsimilarity(rl,cl,j);
end
%self similar patch instance number
similarityFtolrpatch = reshape( mostsimilarinputpatches(3,:,rl,cl) , [numberofFcandidate , 1]);
%load all of the two step patches
R = zeros(patchsize_hr,patchsize_hr,numberofFcandidate,numberofHcandidate);
RSimbasedonF = zeros(numberofFcandidate,numberofHcandidate);
for i=1:numberofFcandidate
sr = mostsimilarinputpatches(1,i,rl,cl);
sc = mostsimilarinputpatches(2,i,rl,cl);
%hr candidate number
for j=1:numberofHcandidate
R(:,:,i,j) = hrpatch(:,:,sr,sc,j);
RSimbasedonF(i,j) = hrpatchsimilarity(sr,sc,j);
end
end
%here is a question, how to define the similarity between H and R?
%L2norm?
hscore = zeros(numberofHcandidate,1);
for i=1:numberofHcandidate
theH = H(:,:,i);
for j=1:numberofFcandidate
for k=1:numberofHcandidate
theR = R(:,:,j,k);
similarityRbasedonF = RSimbasedonF(j,k);
%similarity between H and R
diff = theH - theR;
L2N = norm(diff(:));
similarityRtoH = exp(- L2N/25); %the 25 is a parameter, need to be tuned totalpatchnumber the future
hscore(i) = hscore(i) + similarityHtolrpatch(i) * similarityRbasedonF * similarityRtoH * similarityFtolrpatch(j);
end
end
end
[~, idx] = max(hscore);
hrpatch_filtered(:,:,rl,cl) = hrpatch(:,:,rl,cl,idx(1));
end
end
end
function img_texture = IF4_BuildHRimagefromHRPatches(hrpatch,zooming)
%reconstruct the high-resolution image
patchsize_hr = size(hrpatch,1);
patchsize_lr = patchsize_hr/zooming;
h_lr = size(hrpatch,3) + patchsize_lr - 1;
w_lr = size(hrpatch,4) + patchsize_lr - 1;
h_expected = h_lr * zooming;
w_expected = w_lr * zooming;
img_texture = zeros(h_expected,w_expected);
%most cases
rpixelshift = 2; %this should be modified according to patchsize_lr
cpixelshift = 2;
for rl = 2:h_lr - patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
for cl = 2:w_lr - patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
img_texture(rh:rh1,ch:ch1) = usedhrpatch(9:12,9:12);
end
end
%left
cl = 1;
ch = 1;
ch1 = ch+3*zooming-1;
for rl=2:h_lr-patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%right
cl = w_lr - patchsize_lr+1;
ch = w_expected - 3*zooming+1;
ch1 = w_expected;
for rl=2:h_lr-patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%top
rl = 1;
rh = 1;
rh1 = rh+3*zooming-1;
for cl=2:w_lr-patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%bottom
rl = h_lr-patchsize_lr+1;
rh = h_expected - 3*zooming+1;
rh1 = h_expected;
for cl=2:w_lr-patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%left-top corner
rl=1;
cl=1;
rh = 1;
rh1 = rh+3*zooming-1;
ch = 1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
%right-top corner
rl=1;
cl=w_lr-patchsize_lr+1;
rh = (rl-1)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
%left-bottom corner
rl=h_lr-patchsize_lr+1;
cl=1;
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
%left-bottom corner
rl=h_lr-patchsize_lr+1;
cl=w_lr-patchsize_lr+1;
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
|
github
|
Liusifei/Face-Hallucination-master
|
U24_sc.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U24_sc.m
| 1,764 |
utf_8
|
fed7b2eda56837174f633232855bd7f8
|
%Chih-Yuan Yang
%10/11/12
%Usage
%U24_sc(img,[0 1]);
function hfig = U24_sc(varargin)
bSetColormapGray = false;
for j=1:nargin
if isa(varargin{j},'char')
ControlString = varargin{j};
StringLength = length(ControlString);
for i=1:StringLength
switch ControlString(i)
case 'c'
close all
case 'g'
bSetColormapGray = true;
otherwise
end
end
end
[h w] = size(varargin{j});
if h == 1 && w == 2 && ~isa(varargin{j},'char')
caxis_min_high = varargin{j};
end
end
for i=1:nargin
if ~isa(varargin{i},'char')
[h w] = size(varargin{i});
if ~(h==1 && w==2)
layer = size(varargin{i},3);
for j=1:layer
hfig = figure;
imagesc(varargin{i}(:,:,j));
colorbar
axis image
axis on
if layer == 1
string = inputname(i);
else
string = sprintf('%s %d',inputname(i), j);
end
TitleString = regexprep(string,'\_','\\_');
title(TitleString);
set(hfig,'Name',string,'NumberTitle','off');
if bSetColormapGray
colormap gray
end
if exist('caxis_min_high','var')
caxis(caxis_min_high);
end
end
end
end
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F6f_RetriveImage_DrawFlowChart.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F6f_RetriveImage_DrawFlowChart.m
| 2,688 |
utf_8
|
4062d227dc2025015b7a09ca66eddc0d
|
%Chih-Yuan Yang
%2/2/15
%F6d: return the alinged images to draw the flowchart. I update the function F19a to F19c.
%F6f: The parallel command parfor is unstall on Linux. Thus I change it back to normal for loop.
function [retrievedhrimage, retrievedlrimage, retrievedidx, alignedexampleimage_hr, alignedexampleimage_lr] = ...
F6f_RetriveImage_DrawFlowChart(testimage_lr, ...
rawexampleimage, inputpoints, basepoints, mask_lr, zooming, Gau_sigma, glasslist, bglassavoid)
%the rawexampleimage should be double
if ~isa(rawexampleimage,'uint8')
error('wrong class');
end
[h_hr, w_hr, exampleimagenumber] = size(rawexampleimage);
[h_lr, w_lr] = size(testimage_lr);
%find the transform matrix by solving an optimization problem
alignedexampleimage_hr = zeros(h_hr,w_hr,exampleimagenumber,'uint8'); %set as uint8 to reduce memory demand
alignedexampleimage_lr = zeros(h_lr,w_lr,exampleimagenumber);
for i=1:exampleimagenumber
alignedexampleimage_hr(:,:,i) = F18b_AlignExampleImageByLandmarkSet(rawexampleimage(:,:,i),inputpoints(:,:,i),basepoints);
%F19 automatically convert uint8 input to double
alignedexampleimage_lr(:,:,i) = F19c_GenerateLRImage_GaussianKernel(alignedexampleimage_hr(:,:,i),zooming,Gau_sigma);
end
[r_set, c_set] = find(mask_lr);
top = min(r_set);
bottom = max(r_set);
left = min(c_set);
right = max(c_set);
area_test = im2double(testimage_lr(top:bottom,left:right));
area_mask = mask_lr(top:bottom,left:right);
area_test_aftermask = area_test .* area_mask;
%extract feature from the eyerange, the features are the gradient of LR eye region
feature_test = F24_ExtractFeatureFromArea(area_test_aftermask); %the unit is double
%search for the thousand example images to find the most similar eyerange
normvalue = zeros(exampleimagenumber,1);
for j=1:exampleimagenumber
examplearea_lr = alignedexampleimage_lr(top:bottom,left:right,j);
examplearea_lr_aftermask = examplearea_lr .* area_mask;
feature_example_lr = F24_ExtractFeatureFromArea(examplearea_lr_aftermask); %the unit is double
normvalue(j) = norm(feature_test - feature_example_lr);
end
%find the small norm
[sortnorm ix] = sort(normvalue);
%some of them are very similar
%only return the 1nn
if bglassavoid
for k=1:exampleimagenumber
if glasslist(ix(k)) == false
break
end
end
else
k =1;
end
retrievedhrimage = alignedexampleimage_hr(:,:,ix(k));
retrievedlrimage = alignedexampleimage_lr(:,:,ix(k));
retrievedidx = ix(k);
end
|
github
|
Liusifei/Face-Hallucination-master
|
U26_DrawMask.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U26_DrawMask.m
| 971 |
utf_8
|
b784d543172316f3d30cd8a78746ed36
|
%Chih-Yuan Yang
%10/10/12
function U26_DrawMask(img, componentrecord, bsave, fn_save)
if nargin < 3
bsave = false;
end
codefolder = fileparts(pwd);
addpath(fullfile(codefolder,'Lib','YIQConverter'));
[h w] = size(img);
img = repmat(img,[1 1 3]); %prepare for color
img_yiq = RGB2YIQ(img);
img_y = img_yiq(:,:,1);
img_iq = img_yiq(:,:,2:3);
color{1} = [1 0 0]; %red
color{2} = [1 1 0]; %yellow
color{3} = [0 1 0]; %green
color{4} = [0 0 1]; %blue
for k=1:4
rgb_color = reshape(color{k},[1 1 3]);
yiq_color = RGB2YIQ(rgb_color);
iq_color = yiq_color(2:3);
mask = componentrecord(k).mask_lr;
img_iq = img_iq + repmat(iq_color, [h w 1]) .* repmat(mask, [1 1 2]);
end
img_yiq = cat(3,img_y,img_iq);
img_rgb = YIQ2RGB(img_yiq);
if bsave
imwrite(img_rgb,fn_save);
else
figure
imshow(img_rgb);
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
U25a_ReadSemaphoreFile.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U25a_ReadSemaphoreFile.m
| 321 |
utf_8
|
b1430f2eb5cfb5ee4f131ef486b3272f
|
%Chih-Yuan Yang
%08/28/13
%To parallel run Glasner's algorithm
%Just read, do not change the semaphore file
function [arr_filename, arr_label] = U25a_ReadSemaphoreFile(fn_symphony)
fid = fopen(fn_symphony,'r+');
C = textscan(fid,'%05d %s %d\n');
arr_filename = C{2};
arr_label = C{3};
fclose(fid);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F29_AddPixelIdxFromCoor.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F29_AddPixelIdxFromCoor.m
| 360 |
utf_8
|
df5423a57f2287ff5eee471e2b6ce3ed
|
%Chih-Yuan Yang
%10/01/12
%Correct error, the mapping from coordinate to index is round rather than floor or ceil
function region = F29_AddPixelIdxFromCoor(region)
region.left_idx = round(region.left_coor);
region.top_idx = round(region.top_coor);
region.right_idx = round(region.right_coor);
region.bottom_idx = round(region.bottom_coor);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F27b_SmoothnessPreserving.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F27b_SmoothnessPreserving.m
| 7,564 |
utf_8
|
9a939481df544fadc2c13f24d293c052
|
%Chih-Yuan Yang
%1/3/15
%Seperate this function from F21, because this function is required in training phase
%F27a: save similarity_lr and similarity_hr to draw figures required for the paper
%F27b: Now I run the code on a Linux machine through SSH. There is no GUI so that I
%would like to disable all image rendering.
function img_out = F27b_SmoothnessPreserving(img_y,zooming,Gau_sigma)
img_bb = imresize(img_y,zooming);
%compute the similarity from low
Coef = 10;
Sqrt_low = IF1_SimilarityEvaluation(img_y); %I may need more directions, 16 may be too small
Similarity_low = exp(-Sqrt_low*Coef);
[h_high, w_high] = size(img_bb);
ExpectedSimilarity = zeros(h_high,w_high,16);
%upsamplin the similarity
for dir=1:16
ExpectedSimilarity(:,:,dir) = imresize(Similarity_low(:,:,dir),zooming,'bilinear');
end
%refind the Grad_high by Similarity_high
LoopNumber = 10;
img = img_bb;
for loop = 1:LoopNumber
%refine gradient by ExpectedSimilarity
ValueSum = zeros(h_high,w_high);
WeightSum = sum(ExpectedSimilarity,3); %if thw weight sum is low, it is unsuitable to generate the grad by interpolation
for dir = 1:16
[MoveOp, N] = IF3_GetMoveKernel16(dir);
if N == 1
MovedData = imfilter(img,MoveOp{1},'replicate');
else %N ==2
MovedData1 = imfilter(img,MoveOp{1},'replicate');
MovedData2 = imfilter(img,MoveOp{2},'replicate');
MovedData = (MovedData1 + MovedData2)/2;
end
Product = MovedData .* ExpectedSimilarity(:,:,dir);
ValueSum = ValueSum + Product;
end
I = ValueSum ./ WeightSum;
%intensity compensate
diff_lr = F19c_GenerateLRImage_GaussianKernel(I,zooming,Gau_sigma) - img_y;
diff_hr = F26_UpsampleAndBlur(diff_lr,zooming,Gau_sigma);
Grad0 = diff_hr;
Term_LowHigh_in = F28_ComputeSquareSumLowHighDiff(I,img_y,Gau_sigma);
I_in = I; %make a copy, restore the value if all beta fails
bDecrease = false;
%should I use the strict constraint?
tau = 0.2;
for line_search_loop=1:10
%line search for the beta, fixed 1/32 is not a good choice
I = I_in - tau * Grad0;
Term_LowHigh_out = F28_ComputeSquareSumLowHighDiff(I,img_y,Gau_sigma);
if Term_LowHigh_out < Term_LowHigh_in
bDecrease = true;
break;
else
tau = tau * 0.5;
end
end
if bDecrease == true
I_best = I;
else
break;
end
img = I_best;
end
img_out = img;
end
function SqrtData = IF1_SimilarityEvaluation(Img_in)
[h, w] = size(Img_in);
SqrtData = zeros(h,w,16);
f3x3 = ones(3);
for i = 1:16
[DiffOp, N] = IF2_RetGradientKernel16(i);
if N == 1
Diff = imfilter(Img_in,DiffOp{1},'symmetric');
else
Diff1 = imfilter(Img_in,DiffOp{1},'symmetric');
Diff2 = imfilter(Img_in,DiffOp{2},'symmetric');
Diff = (Diff1+Diff2)/2;
end
Sqr = Diff.^2;
Sum = imfilter(Sqr,f3x3,'replicate');
Mean = Sum/9;
SqrtData(:,:,i) = sqrt(Mean);
end
end
function [DiffOp, N] = IF2_RetGradientKernel16(dir)
DiffOp = cell(2,1);
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
switch dir
case 1
N = 1;
DiffOp{1} = f{1};
DiffOp{2} = [];
case 2
N = 2;
DiffOp{1} = f{1};
DiffOp{2} = f{2};
case 3
N = 1;
DiffOp{1} = f{2};
DiffOp{2} = [];
case 4
N = 2;
DiffOp{1} = f{2};
DiffOp{2} = f{3};
case 5
N = 1;
DiffOp{1} = f{3};
DiffOp{2} = [];
case 6
N = 2;
DiffOp{1} = f{3};
DiffOp{2} = f{4};
case 7
N = 1;
DiffOp{1} = f{4};
DiffOp{2} = [];
case 8
N = 2;
DiffOp{1} = f{4};
DiffOp{2} = f{5};
case 9
N = 1;
DiffOp{1} = f{5};
DiffOp{2} = [];
case 10
N = 2;
DiffOp{1} = f{5};
DiffOp{2} = f{6};
case 11
DiffOp{1} = f{6};
DiffOp{2} = [];
N = 1;
case 12
N = 2;
DiffOp{1} = f{6};
DiffOp{2} = f{7};
case 13
N = 1;
DiffOp{1} = f{7};
DiffOp{2} = [];
case 14
N = 2;
DiffOp{1} = f{7};
DiffOp{2} = f{8};
case 15
DiffOp{1} = f{8};
DiffOp{2} = [];
N = 1;
case 16
N = 2;
DiffOp{1} = f{8};
DiffOp{2} = f{1};
end
end
function [Kernel, N] = IF3_GetMoveKernel16(dir)
Kernel = cell(2,1);
f{1} = [0 0 0;
0 0 1;
0 0 0];
f{2} = [0 0 1;
0 0 0;
0 0 0];
f{3} = [0 1 0;
0 0 0;
0 0 0];
f{4} = [1 0 0;
0 0 0;
0 0 0];
f{5} = [0 0 0;
1 0 0;
0 0 0];
f{6} = [0 0 0;
0 0 0;
1 0 0];
f{7} = [0 0 0;
0 0 0;
0 1 0];
f{8} = [0 0 0;
0 0 0;
0 0 1];
switch dir
case 1
N = 1;
Kernel{1} = f{1};
Kernel{2} = [];
case 2
N = 2;
Kernel{1} = f{1};
Kernel{2} = f{2};
case 3
N = 1;
Kernel{1} = f{2};
Kernel{2} = [];
case 4
N = 2;
Kernel{1} = f{2};
Kernel{2} = f{3};
case 5
N = 1;
Kernel{1} = f{3};
Kernel{2} = [];
case 6
N = 2;
Kernel{1} = f{3};
Kernel{2} = f{4};
case 7
N = 1;
Kernel{1} = f{4};
Kernel{2} = [];
case 8
N = 2;
Kernel{1} = f{4};
Kernel{2} = f{5};
case 9
N = 1;
Kernel{1} = f{5};
Kernel{2} = [];
case 10
N = 2;
Kernel{1} = f{5};
Kernel{2} = f{6};
case 11
Kernel{1} = f{6};
Kernel{2} = [];
N = 1;
case 12
N = 2;
Kernel{1} = f{6};
Kernel{2} = f{7};
case 13
N = 1;
Kernel{1} = f{7};
Kernel{2} = [];
case 14
N = 2;
Kernel{1} = f{7};
Kernel{2} = f{8};
case 15
Kernel{1} = f{8};
Kernel{2} = [];
N = 1;
case 16
N = 2;
Kernel{1} = f{8};
Kernel{2} = f{1};
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F7_ComputePSNR_SSIM_DIIVINE.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F7_ComputePSNR_SSIM_DIIVINE.m
| 1,163 |
utf_8
|
5eee7e53e792342dbae52c1943352c21
|
%09/12/12
%Chih-Yuan Yang, EECS, UC Merced
%Compute PSNR, SSIM, DIVINE
%If you encounter a MATLAB error: Undefined function 'buildSFpyr' for input arguments of type 'double'.
%You need to install libraries which are dependencies of DIIVINE
%Steerable Pyramid Toolbox, Download from: http://www.cns.nyu.edu/~eero/steerpyr/
% action ==>compile mex in MEX subfolder, copy the pointOp.mexw64 to matlabPyrTools folder
% ==>addpath('matlabPyrTools')
%LibSVM package for MATLAB, Download from: http://www.csie.ntu.edu.tw/~cjlin/libsvm/
function [PSNR, SSIM, DIIVINE] = F7_ComputePSNR_SSIM_DIIVINE(img_gt, img_input, bComputeDIIVINE)
%check the type
if isa(img_gt,'double')
img_gt = im2uint8(img_gt);
img_input = im2uint8(img_input);
end
if size(img_gt,3) > 1
img_gt = rgb2gray(img_gt);
end
if size(img_input,3) > 1
img_input = rgb2gray(img_input);
end
PSNR = measerr(img_gt, img_input);
SSIM = ssim( img_gt, img_input);
DIIVINE = 0;
if ~exist('bComputeDIIVINE','var')
bComputeDIIVINE = false;
end
if bComputeDIIVINE
DIIVINE = divine(img_input);
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
U23a_PrepareResultFolder.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U23a_PrepareResultFolder.m
| 1,137 |
utf_8
|
3560d9fcfec329fa70657e0caeabb913
|
%Chih-Yuan Yang
%09/25/12
%from U23 to U23a, change the name of setting folder
function para = U23a_PrepareResultFolder(resultfolder,para)
settingfolder = fullfile(resultfolder,sprintf('%s%d',para.settingname, para.setting));
tuningfolder = fullfile(settingfolder, sprintf('Tuning%d',para.tuning));
para.resultfolder = resultfolder;
para.settingfolder = settingfolder;
para.tuningfolder = tuningfolder;
U22_makeifnotexist(tuningfolder);
if ~isempty(para.settingnote)
fid = fopen(fullfile(settingfolder, 'SettingNote.txt'),'w');
fprintf(fid,'%s',para.settingnote);
fclose(fid);
end
if ~isempty(para.tuningnote)
fid = fopen(fullfile(para.tuningfolder ,'TuningNote.txt'),'w');
fprintf(fid,'%s',para.tuningnote);
fclose(fid);
end
%copy parameter setting
if ispc
cmd = ['copy ' para.mainfilename '.m ' fullfile(para.tuningfolder, [para.mainfilename '_backup.m '])];
elseif isunix
cmd = ['cp ' para.mainfilename '.m ' fullfile(para.tuningfolder, [para.mainfilename '_backup.m '])];
end
dos(cmd);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F37b_GetTexturePatchMatch_PatchCompensate.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F37b_GetTexturePatchMatch_PatchCompensate.m
| 12,865 |
utf_8
|
849b03c185bdc637acc0f2492918d37b
|
%Chih-Yuan Yang
%10/07/12
%Use patchmatch to retrieve a texture background
function [gradients_texture img_texture img_texture_backprojection] = F37b_GetTexturePatchMatch_PatchCompensate(img_y, ...
hrexampleimages, lrexampleimages)
%parameter
numberofHcandidate = 1;
%start
[h_lr, w_lr, exampleimagenumber] = size(lrexampleimages);
[h_hr, w_hr, ~] = size(hrexampleimages);
zooming = h_hr/h_lr;
if zooming == 4
Gau_sigma = 1.6;
elseif zooming == 3
Gau_sigma = 1.2;
end
cores = 2; % Use more cores for more speed
if cores==1
algo = 'cpu';
else
algo = 'cputiled';
end
patchsize_lr = 5;
nn_iters = 5;
%A =F38_ExtractFeatureFromAnImage(img_y);
A = repmat(img_y,[1 1 3]);
testnumber = exampleimagenumber;
xyandl2norm = zeros(h_lr,w_lr,3,testnumber,'int32');
%question: how to control the random seed in PatchMatch? in the nn.cpp, but it looks fixed because srand2() is never called
%However, since the numberofHcandiate are different in Test34 (as 10) and Test35 (as 1), the retrieved patches are different
disp('patchmatching');
parfor i=1:testnumber;
%run patchmatch
B = repmat(lrexampleimages(:,:,i),[1 1 3]);
xyandl2norm(:,:,:,i) = nnmex(A, B, algo, patchsize_lr, nn_iters, [], [], [], [], cores); %the return totalpatchnumber int32
end
l2norm_double = double(xyandl2norm(:,:,3,:));
[sortedl2norm, ix] = sort(l2norm_double,4);
hrpatchextractdata = zeros(h_lr-patchsize_lr+1,w_lr-patchsize_lr+1,numberofHcandidate,3); %ii,r_lr_src,c_lr_src
%here
hrpatchsimilarity = zeros(h_lr-patchsize_lr+1,w_lr-patchsize_lr+1,numberofHcandidate);
parameter_l2normtosimilarity = 625;
for rl = 1:h_lr-patchsize_lr+1
for cl = 1:w_lr-patchsize_lr+1
for k=1:numberofHcandidate
knnidx = ix(rl,cl,1,k);
x = xyandl2norm(rl,cl,1,knnidx); %start from 0
y = xyandl2norm(rl,cl,2,knnidx);
clsource = x+1;
rlsource = y+1;
hrpatchextractdata(rl,cl,k,:) = reshape([knnidx rlsource clsource],[1 1 1 3]);
hrpatchsimilarity(rl,cl,k) = exp(-sortedl2norm(rl,cl,1,knnidx)/parameter_l2normtosimilarity);
end
end
end
hrpatch = F39_ExtractAllHrPatches(patchsize_lr,zooming, hrpatchextractdata,hrexampleimages);
hrpatch = F40_CompensateHRpatches(hrpatch, img_y, zooming, hrpatchextractdata,lrexampleimages);
% mostsimilarinputpatchrecord = IF2_SearchForSelfSimilarPatchesL2Norm(img_y,patchsize_lr);
% hrpatch_filtered = IF3_SimilarityFilter(hrpatch,hrpatchsimilarity,mostsimilarinputpatchrecord);
img_texture = IF4_BuildHRimagefromHRPatches(hrpatch,zooming);
iternum = 1000;
Tolf = 0.0001;
breport = false;
disp('backprojection for img_texture');
img_texture_backprojection = F11d_BackProjection_GaussianKernel(img_y, img_texture, Gau_sigma, iternum,breport,Tolf);
%extract the graident
gradients_texture = F14_Img2Grad(img_texture_backprojection);
end
function scanresult = IF2_SearchForSelfSimilarPatchesL2Norm(img_y,patchsize_lr)
%out:
%scanresult: 3 x numberofFcandidate x (h_lr-patchsize+1) x (w_lr-patchsize+1)
patcharea = patchsize_lr^2;
[lh lw] = size(img_y);
%Find self similar patches
numberofFcandidate = 10;
scanresult = zeros(3,numberofFcandidate,lh-patchsize_lr+1,lw-patchsize_lr+1); %scan results: r,c, similarity
totalpatchnumber = (lh-patchsize_lr+1)*(lw-patchsize_lr+1);
featurematrix = zeros(patcharea,totalpatchnumber);
rec = zeros(2,totalpatchnumber);
idx = 0;
for rl=1:lh-patchsize_lr+1
rl1 = rl+patchsize_lr-1;
for cl=1:lw-patchsize_lr+1
cl1 = cl+patchsize_lr-1;
idx = idx + 1;
rec(:,idx) = [rl;cl];
featurematrix(:,idx) = reshape(img_y(rl:rl1,cl:cl1),patcharea,1);
end
end
%search
idx = 0;
for rl=1:lh-patchsize_lr+1
for cl=1:lw-patchsize_lr+1
idx = idx + 1;
fprintf('idx %d totalpatchnumber %d\n',idx,totalpatchnumber);
queryfeature = featurematrix(:,idx);
diff = featurematrix - repmat(queryfeature,1,totalpatchnumber);
sqr = sum(diff.^2);
[ssqr ix] = sort(sqr);
saveidx = 0;
for j=1:numberofFcandidate+1 %add one to prevent find itself
indexinsort = ix(j);
sr = rec(1,indexinsort);
sc = rec(2,indexinsort);
%explanation: it is possible that there are 11 lr patches with the same appearance
%and the input one is sorted at item indexed more than 11 so that sr and cl are insufficient
%to prevenet the problem
if sr ~= rl || sc ~= cl
saveidx = saveidx + 1;
if saveidx <= numberofFcandidate
l2norm = sqrt(ssqr(j));
similarity = exp(-l2norm/25);
scanresult(1:3,saveidx,rl,cl) = [sr;sc;similarity];
end
end
end
end
end
end
function hrpatch_filtered = IF3_SimilarityFilter(hrpatch,hrpatchsimilarity,mostsimilarinputpatches)
%totalpatchnumber
%hrpatch: patchsize_hr x patchsize_hr x (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1) x numberofHcandidate
%hrpatchsimilarity: (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1) x numberofHcandidate
%mostsimilarinputpatches: 3 x numberofFcandidate x (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1)
%out
%hrpatch_filtered: patchsize_hr x patchsize_hr x (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1)
zooming = 4;
patchsize_hr = size(hrpatch,1);
patchsize_lr = patchsize_hr /zooming;
h_lr = size(hrpatch,3) + patchsize_lr -1;
w_lr = size(hrpatch,4) + patchsize_lr -1;
numberofHcandidate = size(hrpatch,5);
numberofFcandidate = size(mostsimilarinputpatches,2);
%allocate for out
hrpatch_filtered = zeros(patchsize_hr,patchsize_hr,h_lr-patchsize_lr+1,w_lr-patchsize_lr+1);
for rl= 1:h_lr-patchsize_lr+1
fprintf('rl:%d total:%d\n',rl,h_lr-patchsize_lr+1);
for cl = 1:w_lr-patchsize_lr+1
%load candidates
H = zeros(patchsize_hr,patchsize_hr,numberofHcandidate);
similarityHtolrpatch = zeros(numberofHcandidate,1);
for j=1:numberofHcandidate
H(:,:,j) = hrpatch(:,:,rl,cl,j); %H
similarityHtolrpatch(j) = hrpatchsimilarity(rl,cl,j);
end
%self similar patch instance number
similarityFtolrpatch = reshape( mostsimilarinputpatches(3,:,rl,cl) , [numberofFcandidate , 1]);
%load all of the two step patches
R = zeros(patchsize_hr,patchsize_hr,numberofFcandidate,numberofHcandidate);
RSimbasedonF = zeros(numberofFcandidate,numberofHcandidate);
for i=1:numberofFcandidate
sr = mostsimilarinputpatches(1,i,rl,cl);
sc = mostsimilarinputpatches(2,i,rl,cl);
%hr candidate number
for j=1:numberofHcandidate
R(:,:,i,j) = hrpatch(:,:,sr,sc,j);
RSimbasedonF(i,j) = hrpatchsimilarity(sr,sc,j);
end
end
%here is a question, how to define the similarity between H and R?
%L2norm?
hscore = zeros(numberofHcandidate,1);
for i=1:numberofHcandidate
theH = H(:,:,i);
for j=1:numberofFcandidate
for k=1:numberofHcandidate
theR = R(:,:,j,k);
similarityRbasedonF = RSimbasedonF(j,k);
%similarity between H and R
diff = theH - theR;
L2N = norm(diff(:));
similarityRtoH = exp(- L2N/25); %the 25 is a parameter, need to be tuned totalpatchnumber the future
hscore(i) = hscore(i) + similarityHtolrpatch(i) * similarityRbasedonF * similarityRtoH * similarityFtolrpatch(j);
end
end
end
[~, idx] = max(hscore);
hrpatch_filtered(:,:,rl,cl) = hrpatch(:,:,rl,cl,idx(1));
end
end
end
function img_texture = IF4_BuildHRimagefromHRPatches(hrpatch,zooming)
%reconstruct the high-resolution image
patchsize_hr = size(hrpatch,1);
patchsize_lr = patchsize_hr/zooming;
h_lr = size(hrpatch,3) + patchsize_lr - 1;
w_lr = size(hrpatch,4) + patchsize_lr - 1;
h_expected = h_lr * zooming;
w_expected = w_lr * zooming;
img_texture = zeros(h_expected,w_expected);
%most cases
rpixelshift = 2; %this should be modified according to patchsize_lr
cpixelshift = 2;
for rl = 2:h_lr - patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
for cl = 2:w_lr - patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
img_texture(rh:rh1,ch:ch1) = usedhrpatch(9:12,9:12);
end
end
%left
cl = 1;
ch = 1;
ch1 = ch+3*zooming-1;
for rl=2:h_lr-patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%right
cl = w_lr - patchsize_lr+1;
ch = w_expected - 3*zooming+1;
ch1 = w_expected;
for rl=2:h_lr-patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%top
rl = 1;
rh = 1;
rh1 = rh+3*zooming-1;
for cl=2:w_lr-patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%bottom
rl = h_lr-patchsize_lr+1;
rh = h_expected - 3*zooming+1;
rh1 = h_expected;
for cl=2:w_lr-patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%left-top corner
rl=1;
cl=1;
rh = 1;
rh1 = rh+3*zooming-1;
ch = 1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
%right-top corner
rl=1;
cl=w_lr-patchsize_lr+1;
rh = (rl-1)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
%left-bottom corner
rl=h_lr-patchsize_lr+1;
cl=1;
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
%left-bottom corner
rl=h_lr-patchsize_lr+1;
cl=w_lr-patchsize_lr+1;
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
|
github
|
Liusifei/Face-Hallucination-master
|
U12_AlignExampleImageByTestEyes.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U12_AlignExampleImageByTestEyes.m
| 797 |
utf_8
|
0c2c35f5e7ac99a7309df8637e349a4c
|
%Chih-Yuan Yang
%08/31/12
function [alignedexampleimage alignedlandmarks] = U12_AlignExampleImageByTestEyes(exampleimage,landmarks,eyecenter,testeyecenter)
[h w exampleimagenumber] = size(exampleimage);
alignedexampleimage = zeros(h, w, exampleimagenumber,'uint8');
alignedlandmarks = zeros(68,2,exampleimagenumber);
parfor i=1:exampleimagenumber
%show the message if not matlatpool
%if mod(i,100) == 0
% fprintf('aligning images and landmarks i=%d\n',i);
%end
[alignedimage_this alignedlandmark_this] = U10_ConvertImageAndLandmarksByTwoPoints(exampleimage(:,:,i),landmarks(:,:,i),eyecenter(:,:,i),testeyecenter);
alignedexampleimage(:,:,i) = alignedimage_this;
alignedlandmarks(:,:,i) = alignedlandmark_this;
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F21c_EdgePreserving_GaussianKernel.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F21c_EdgePreserving_GaussianKernel.m
| 8,595 |
utf_8
|
31c45f6b14eb21121a14bd50f78e5d4b
|
%Chih-Yuan Yang
%09/29/12
%F21b: Based on F21a, but change the square kernel to Gaussian, to see whether the square pattern disappear
%F21c: remove the para argument
function [gradient_expected gradient_actual weightmap_edge img_edge] = F21c_EdgePreserving_GaussianKernel(img_y,zooming,Gau_sigma)
LowMagSuppression = 0; %the three parameters should be adjusted later
DistanceUpperBound = 2.0;
ContrastEnhenceCoef = 1.0;
I_s = F27_SmoothnessPreserving(img_y,zooming,Gau_sigma);
T = F15_ComputeSRSSD(I_s);
Dissimilarity = EvaluateDissimilarity8(I_s);
Grad_high_initial = Img2Grad(I_s);
[h w] = size(T);
StatisticsFolder = fullfile('EdgePriors');
LoadFileName = sprintf('Statistics_Sc%d_Si%0.1f.mat',zooming,Gau_sigma);
LoadData = load(fullfile(StatisticsFolder,LoadFileName));
Statistics = LoadData.Statistics;
RidgeMap = edge(I_s,'canny',[0 0.01],0.05);
%filter out small ridge and non-maximun ridges
RidgeMap_filtered = RidgeMap;
[r_set c_set] = find(RidgeMap);
SetLength = length(r_set);
for j=1:SetLength
r = r_set(j);
c = c_set(j);
CenterMagValue = T(r,c);
if CenterMagValue < LowMagSuppression
RidgeMap_filtered(r,c) = false;
end
end
[r_set c_set] = find(RidgeMap_filtered);
SetLength = length(r_set);
[X Y] = meshgrid(1:11,1:11);
DistPatch = sqrt((X-6).^2 + (Y-6).^2);
DistMap = inf(h,w);
UsedPixel = false(h,w);
CenterCoor = zeros(h,w,2);
%Compute DistMap and CneterCoor
[r_set c_set] = find(RidgeMap_filtered);
for j=1:SetLength
r = r_set(j);
r1 = r-5;
r2 = r+5;
c = c_set(j);
c1 = c-5;
c2 = c+5;
if r1>=1 && r2<=h && c1>=1 && c2<=w %discrad boundary?
MapPatch = DistMap(r1:r2,c1:c2);
MinPatch = min(MapPatch, DistPatch);
DistMap(r1:r2,c1:c2) = MinPatch;
UsedPixel(r1:r2,c1:c2) = true;
ChangedPixels = MinPatch < MapPatch;
OriginalCenterCoorPatch = CenterCoor(r1:r2,c1:c2,:);
NewCoor = cat(3,r*ones(11), c*ones(11));
NewCenterCoorPatch = OriginalCenterCoorPatch .* repmat(1-ChangedPixels,[1,1,2]) + NewCoor .* repmat(ChangedPixels,[1,1,2]);
CenterCoor(r1:r2,c1:c2,:) = NewCenterCoorPatch;
end
end
%Convert dist to table index
TableIndexMap = zeros(h,w);
b = unique(DistPatch(:));
for i=1:length(b)
SetPixels = DistMap == b(i);
TableIndexMap(SetPixels) = i;
end
%mapping (T_p, T_r, d) to S_p
[r_set c_set] = find(UsedPixel);
SetLength = length(r_set);
UpdatedPixel = false(h,w);
S = zeros(h,w);
for i=1:SetLength
r = r_set(i);
c = c_set(i);
r_Center = CenterCoor(r,c,1);
c_Center = CenterCoor(r,c,2);
CurrentMagValue = T(r,c);
BinIdx_Current = ceil(CurrentMagValue /0.005);
%Zebra have super strong Mag
if BinIdx_Current > 100
BinIdx_Current = 100;
end
TableIndex = TableIndexMap(r,c);
if TableIndex > DistanceUpperBound
continue
end
CenterMagValue = T(r_Center,c_Center);
%Low Mag Edge suppresion
if CenterMagValue < LowMagSuppression
continue
end
BinIdx_Center = ceil(CenterMagValue /0.005);
if BinIdx_Center > 100
BinIdx_Center = 100;
end
%consult the table
if TableIndex == 1 %1 is the index of b(1) where dist = 0, enhance the contrast of pixel on edge
S_p = ContrastEnhenceCoef * Statistics(TableIndex).EstimatedMag(BinIdx_Current,BinIdx_Center);
else
S_p = Statistics(TableIndex).EstimatedMag(BinIdx_Current,BinIdx_Center);
end
if isnan(S_p)
else
UpdatedPixel(r,c) = true;
S(r,c) = S_p;
end
end
%Record the RidgeMapMap, for computing te ProbOfMag
%the Mag is the consulted Mag
%here is the problem, when the S is very strong, the affect range of ProbMagOut exceeds 1 pixel
RidgeMapMagValue = zeros(h,w);
for i=1:SetLength
r = r_set(i);
c = c_set(i);
r_Center = CenterCoor(r,c,1);
c_Center = CenterCoor(r,c,2);
RidgeMapMagValue(r,c) = S(r_Center,c_Center);
end
S(~UpdatedPixel) = T(~UpdatedPixel);
img_in = I_s;
if min(Dissimilarity(:)) == 0
d = Dissimilarity + 1e-6; %avoid 0 case; some images may have d(:,:,1) as 0
else
d = Dissimilarity;
end
ratio = d ./ repmat(d(:,:,1),[1,1,8]);
%here is the problem, I need to amplify the gradient directionally
Grad_in = Img2Grad(img_in);
Product = Grad_in .* ratio;
Sqr = Product.^2;
Sum = sum(Sqr,3);
Sqrt = sqrt(Sum); %the Sqrt might be 0, because Grad_in may be pure 0;
r1 = S ./Sqrt;
r1(isnan(r1)) = 0;
Grad_exp = Grad_high_initial .*( ratio .*(repmat(r1,[1,1,8])));
%consolidate inconsistatnt gradient
NewGrad_exp = zeros(h,w,8);
for k=1:4
switch k
case 1
ShiftOp = [0 -1];
case 2
ShiftOp = [1 -1];
case 3
ShiftOp = [1 0];
case 4
ShiftOp = [1 1];
end
k2 =k+4;
Grad1 = Grad_exp(:,:,k);
Grad2 = Grad_exp(:,:,k2);
Grad2Shift = circshift(Grad2,ShiftOp);
Grad1Abs = abs(Grad1);
Grad2AbsShift = abs(Grad2Shift);
Grad1Larger = Grad1Abs > Grad2AbsShift;
Grad2Larger = Grad2AbsShift > Grad1Abs;
NewGrad1 = Grad1 .* Grad1Larger + (-Grad2Shift) .* Grad2Larger;
NewGrad2Shift = Grad2Shift .* Grad2Larger + (-Grad1) .* Grad1Larger;
NewGrad2 = circshift(NewGrad2Shift,-ShiftOp);
NewGrad_exp(:,:,k) = NewGrad1;
NewGrad_exp(:,:,k2) = NewGrad2;
end
%current problem is the over-enhanced gradient (NewMagExp too large)
gradient_expected = NewGrad_exp;
bReport = true;
img_edge = F4b_GenerateIntensityFromGradient(img_y,img_in,NewGrad_exp,Gau_sigma,bReport);
gradient_actual = Img2Grad(img_edge);
%compute the Map of edge weight
lambda_m = 2;
m0 = 0;
ProbMagOut = lambda_m * RidgeMapMagValue + m0;
lambda_d = 0.25;
d0 = 0.25;
ProbDistMap = exp(- (lambda_d * DistMap + d0) ); %this coef should be decied by zooming
Product = ProbMagOut .* ProbDistMap;
weightmap_edge = min(Product,1); %the two terms are not sufficient, direction is not taken into considertion
end
function Grad = Img2Grad(img)
[h w] = size(img);
Grad = zeros(h,w,8);
DiffOp = RetGradientKernel();
for i=1:8
Grad(:,:,i) = imfilter(img,DiffOp{i},'replicate');
end
end
function f = RetGradientKernel()
f = cell(8,1);
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
end
function Dissimilarity = EvaluateDissimilarity8(Img_in,PatchSize)
if ~exist('PatchSize','var');
PatchSize = 3;
end
[h w] = size(Img_in);
Dissimilarity = zeros(h,w,8);
f3x3 = ones(PatchSize)/(PatchSize^2);
for i = 1:8
DiffOp = RetGradientKernel8(i);
Diff = imfilter(Img_in,DiffOp,'symmetric');
Sqr = Diff.^2;
Sum = imfilter(Sqr,f3x3,'replicate');
Dissimilarity(:,:,i) = sqrt(Sum);
end
end
function DiffOp = RetGradientKernel8(dir)
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
DiffOp = f{dir};
end
function f = ComputeFunctionValue_Grad(img, Grad_exp)
Grad = Img2Grad(img);
Diff = Grad - Grad_exp;
Sqrt = Diff .^2;
f = sqrt(sum(Sqrt(:)));
end
|
github
|
Liusifei/Face-Hallucination-master
|
F2_ReturnLandmarks.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F2_ReturnLandmarks.m
| 1,429 |
utf_8
|
7bfe348598b5785b1ac920493bfaef49
|
%09/29/12
%Chih-Yuan Yang
%This file has to be executed on Linux
function [bs posemap]= F2_ReturnLandmarks(im, modelname)
% load and visualize model
% Pre-trained model with 146 parts. Works best for faces larger than 80*80
%load face_p146_small.mat
% % Pre-trained model with 99 parts. Works best for faces larger than 150*150
% load face_p99.mat
% % Pre-trained model with 1050 parts. Give best performance on localization, but very slow
% load multipie_independent.mat
switch modelname
case 'p146'
load face_p146_small.mat
case 'p99'
load face_p99.mat
case 'mi'
load multipie_independent.mat
otherwise
error('no such a model');
end
% 5 levels for each octave
model.interval = 5;
% set up the threshold
model.thresh = min(-0.65, model.thresh);
% define the mapping from view-specific mixture id to viewpoint
if length(model.components)==13
posemap = 90:-15:-90;
elseif length(model.components)==18
posemap = [90:-15:15 0 0 0 0 0 0 -15:-15:-90];
else
error('Can not recognize this model');
end
bs = detect(im, model, model.thresh); %this function only can be executed on Linux
bs = clipboxes(im, bs); %this function removes the boxes exceeding the boundary
bs = nms_face(bs,0.3); %non-maximum suppression
end
|
github
|
Liusifei/Face-Hallucination-master
|
F32a_ComputeMask_Mouth.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F32a_ComputeMask_Mouth.m
| 2,375 |
utf_8
|
272accc010ebb93eccf1bac4f6d2eb9f
|
%Chih-Yuan Yang
%07/20/14
%Blur the mask_hr to prevent discontinue boundary
%F32a: I add new inputs to support the scaling factor of 3. I also replace F19a to F19c.
function [mask_lr, mask_hr] = F32a_ComputeMask_Mouth(landmarks_hr, scalingfactor, Gau_sigma)
%poits 49:68 are mouth
mask_hr = zeros(480,640);
setrange = 49:68;
checkpair = [49 50;
50 51;
51 52;
52 53;
53 54;
54 55;
55 56;
56 57;
57 58;
58 59;
59 60;
60 49];
for k=1:size(checkpair,1);
i = checkpair(k,1);
j = checkpair(k,2);
%mark the pixel between i and j
coor1 = landmarks_hr(i,:);
coor2 = landmarks_hr(j,:);
x1 = coor1(1);
c1 = round(x1);
y1 = coor1(2);
r1 = round(y1);
x2 = coor2(1);
c2 = round(x2);
y2 = coor2(2);
r2 = round(y2);
a = y2-y1;
b = x1-x2;
c = (x2-x1)*y1 - (y2-y1)*x1;
sqra2b2 = sqrt(a^2+b^2);
rmin = min(r1,r2);
rmax = max(r1,r2);
cmin = min(c1,c2);
cmax = max(c1,c2);
for rl=rmin:rmax
for cl=cmin:cmax
y_test = rl;
x_test = cl;
distance = abs(a*x_test+b*y_test +c)/sqra2b2;
if distance <= sqrt(2)/2
mask_hr(rl,cl) = 1;
end
end
end
end
%fill the interior
left_coor = min(landmarks_hr(setrange,1));
right_coor = max(landmarks_hr(setrange,1));
left_idx = round(left_coor);
right_idx = round(right_coor);
for cl = left_idx:right_idx
rmin = find(mask_hr(:,cl),1,'first');
rmax = find(mask_hr(:,cl),1,'last');
if rmin ~= rmax
mask_hr(rmin+1:rmax-1,cl) = 1;
end
end
%blur the boundary
mask_hr = imfilter(mask_hr,fspecial('gaussian',11,1.6));
%do not dilate, otherwise the beard will be copied and generate wrong patterns
%dilate the get the surrounding region
% radius = 6;
% approximateN = 0;
% se = strel('disk',radius,approximateN);
% mask_hr = imdilate(mask_hr,se);
mask_lr = F19c_GenerateLRImage_GaussianKernel(mask_hr,scalingfactor,Gau_sigma);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F37_GetTexturePatchMatch.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F37_GetTexturePatchMatch.m
| 7,673 |
utf_8
|
14c2ead333bda6b48e4e390a7728a074
|
%Chih-Yuan Yang
%10/05/12
%Use patchmatch to retrieve a texture background
function [gradients_texture img_texture img_texture_backprojection] = F37_GetTexturePatchMatch(img_y, ...
hrexampleimages, lrexampleimages)
[h_lr, w_lr, exampleimagenumber] = size(lrexampleimages);
[h_hr, w_hr, ~] = size(hrexampleimages);
zooming = h_hr/h_lr;
if zooming == 4
Gau_sigma = 1.6;
elseif zooming == 3
Gau_sigma = 1.2;
end
cores = 4; % Use more cores for more speed
if cores==1
algo = 'cpu';
else
algo = 'cputiled';
end
patchsize = 5;
nn_iters = 5;
%A =F38_ExtractFeatureFromAnImage(img_y);
A = repmat(img_y,[1 1 3]);
testnumber = exampleimagenumber;
xyandl2norm = zeros(h_lr,w_lr,3,testnumber,'int32');
disp('patchmatching');
parfor i=1:testnumber;
%run patchmatch
%fprintf('Patch match running image %d\n',i);
%B = F38_ExtractFeatureFromAnImage( lrexampleimages(:,:,i));
B = repmat(lrexampleimages(:,:,i),[1 1 3]);
xyandl2norm(:,:,:,i) = nnmex(A, B, algo, patchsize, nn_iters, [], [], [], [], cores); %the return in int32
end
l2norm = xyandl2norm(:,:,3,:);
[~, ix] = sort(l2norm,4);
%reconstruct the high-resolution image
h_expected = h_lr * zooming;
w_expected = w_lr * zooming;
img_texture = zeros(h_expected,w_expected,'uint8');
%most cases
rpixelshift = 2;
cpixelshift = 2;
for rl = 2:h_lr - patchsize
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
for cl = 2:w_lr - patchsize
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
onennidx = ix(rl,cl,1,1);
x = xyandl2norm(rl,cl,1,onennidx); %start from 0
y = xyandl2norm(rl,cl,2,onennidx);
clsource = x+1;
rlsource = y+1;
chsource = (clsource-1+cpixelshift)*zooming+1;
ch1source = chsource+zooming-1;
rhsource = (rlsource-1+rpixelshift)*zooming+1;
rh1source = rhsource+zooming-1;
img_texture(rh:rh1,ch:ch1) = hrexampleimages(rhsource:rh1source,chsource:ch1source,onennidx);
end
end
%left
cl = 1;
ch = 1;
ch1 = ch+3*zooming-1;
for rl=2:h_lr-patchsize
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
onennidx = ix(rl,cl,1,1);
x = xyandl2norm(rl,cl,1,onennidx); %start from 0
y = xyandl2norm(rl,cl,2,onennidx);
clsource = x+1;
rlsource = y+1;
chsource = (clsource-1)*zooming+1;
ch1source = chsource+3*zooming-1;
rhsource = (rlsource-1+rpixelshift)*zooming+1;
rh1source = rhsource+zooming-1;
img_texture(rh:rh1,ch:ch1) = hrexampleimages(rhsource:rh1source,chsource:ch1source,onennidx);
end
%right
cl = w_lr - patchsize+1;
ch = w_expected - 3*zooming+1;
ch1 = w_expected;
for rl=2:h_lr-patchsize
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
onennidx = ix(rl,cl,1,1);
x = xyandl2norm(rl,cl,1,onennidx); %start from 0
y = xyandl2norm(rl,cl,2,onennidx);
clsource = x+1;
rlsource = y+1;
chsource = (clsource-1+cpixelshift)*zooming+1;
ch1source = chsource+3*zooming-1;
rhsource = (rlsource-1+rpixelshift)*zooming+1;
rh1source = rhsource+zooming-1;
img_texture(rh:rh1,ch:ch1) = hrexampleimages(rhsource:rh1source,chsource:ch1source,onennidx);
end
%top
rl = 1;
rh = 1;
rh1 = rh+3*zooming-1;
for cl=2:w_lr-patchsize
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
onennidx = ix(rl,cl,1,1);
x = xyandl2norm(rl,cl,1,onennidx); %start from 0
y = xyandl2norm(rl,cl,2,onennidx);
clsource = x+1;
rlsource = y+1;
chsource = (clsource-1+cpixelshift)*zooming+1;
ch1source = chsource+zooming-1;
rhsource = (rlsource-1)*zooming+1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = hrexampleimages(rhsource:rh1source,chsource:ch1source,onennidx);
end
%bottom
rl = h_lr-patchsize+1;
rh = h_expected - 3*zooming+1;
rh1 = h_expected;
for cl=2:w_lr-patchsize
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
onennidx = ix(rl,cl,1,1);
x = xyandl2norm(rl,cl,1,onennidx); %start from 0
y = xyandl2norm(rl,cl,2,onennidx);
clsource = x+1;
rlsource = y+1;
chsource = (clsource-1+cpixelshift)*zooming+1;
ch1source = chsource+zooming-1;
rhsource = (rlsource-1+rpixelshift)*zooming+1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = hrexampleimages(rhsource:rh1source,chsource:ch1source,onennidx);
end
%left-top corner
rl=1;
cl=1;
rh = 1;
rh1 = rh+3*zooming-1;
ch = 1;
ch1 = ch+3*zooming-1;
onennidx = ix(rl,cl,1,1);
x = xyandl2norm(rl,cl,1,onennidx); %start from 0
y = xyandl2norm(rl,cl,2,onennidx);
clsource = x+1;
rlsource = y+1;
chsource = (clsource-1)*zooming+1;
ch1source = chsource+3*zooming-1;
rhsource = (rlsource-1)*zooming+1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = hrexampleimages(rhsource:rh1source,chsource:ch1source,onennidx);
%right-top corner
rl=1;
cl=w_lr-patchsize+1;
rh = (rl-1)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+3*zooming-1;
onennidx = ix(rl,cl,1,1);
x = xyandl2norm(rl,cl,1,onennidx); %start from 0
y = xyandl2norm(rl,cl,2,onennidx);
clsource = x+1;
rlsource = y+1;
chsource = (clsource-1+cpixelshift)*zooming+1;
ch1source = chsource+3*zooming-1;
rhsource = (rlsource-1)*zooming+1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = hrexampleimages(rhsource:rh1source,chsource:ch1source,onennidx);
%left-bottom corner
rl=h_lr-patchsize+1;
cl=1;
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1)*zooming+1;
ch1 = ch+3*zooming-1;
onennidx = ix(rl,cl,1,1);
x = xyandl2norm(rl,cl,1,onennidx); %start from 0
y = xyandl2norm(rl,cl,2,onennidx);
clsource = x+1;
rlsource = y+1;
chsource = (clsource-1)*zooming+1;
ch1source = chsource+3*zooming-1;
rhsource = (rlsource-1+rpixelshift)*zooming+1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = hrexampleimages(rhsource:rh1source,chsource:ch1source,onennidx);
%left-bottom corner
rl=h_lr-patchsize+1;
cl=w_lr-patchsize+1;
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+3*zooming-1;
onennidx = ix(rl,cl,1,1);
x = xyandl2norm(rl,cl,1,onennidx); %start from 0
y = xyandl2norm(rl,cl,2,onennidx);
clsource = x+1;
rlsource = y+1;
chsource = (clsource-1+cpixelshift)*zooming+1;
ch1source = chsource+3*zooming-1;
rhsource = (rlsource-1+rpixelshift)*zooming+1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = hrexampleimages(rhsource:rh1source,chsource:ch1source,onennidx);
img_texture = im2double(img_texture);
iternum = 1000;
Tolf = 0.0001;
breport = false;
disp('backprojection for img_texture');
img_texture_backprojection = F11d_BackProjection_GaussianKernel(img_y, img_texture, Gau_sigma, iternum,breport,Tolf);
%extract the graident
gradients_texture = F14_Img2Grad(img_texture_backprojection);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F21d_EdgePreserving_GaussianKernel.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F21d_EdgePreserving_GaussianKernel.m
| 8,988 |
utf_8
|
36518bf567ca2194183e56d5dd021413
|
%Chih-Yuan Yang
%10/02/12
%F21b: Based on F21a, but change the square kernel to Gaussian, to see whether the square pattern disappear
%F21c: remove the para argument
%F21d: try to use large beta0 and small beta1 to see whether it can save the computational time
function [gradient_expected gradient_actual weightmap_edge img_edge] = F21d_EdgePreserving_GaussianKernel(img_y,zooming,Gau_sigma)
LowMagSuppression = 0; %the three parameters should be adjusted later
DistanceUpperBound = 2.0;
ContrastEnhenceCoef = 1.0;
I_s = F27_SmoothnessPreserving(img_y,zooming,Gau_sigma);
T = F15_ComputeSRSSD(I_s);
Dissimilarity = EvaluateDissimilarity8(I_s);
Grad_high_initial = Img2Grad(I_s);
[h w] = size(T);
StatisticsFolder = fullfile('EdgePriors');
LoadFileName = sprintf('Statistics_Sc%d_Si%0.1f.mat',zooming,Gau_sigma);
LoadData = load(fullfile(StatisticsFolder,LoadFileName));
Statistics = LoadData.Statistics;
RidgeMap = edge(I_s,'canny',[0 0.01],0.05);
%filter out small ridge and non-maximun ridges
RidgeMap_filtered = RidgeMap;
[r_set c_set] = find(RidgeMap);
SetLength = length(r_set);
for j=1:SetLength
r = r_set(j);
c = c_set(j);
CenterMagValue = T(r,c);
if CenterMagValue < LowMagSuppression
RidgeMap_filtered(r,c) = false;
end
end
[r_set c_set] = find(RidgeMap_filtered);
SetLength = length(r_set);
[X Y] = meshgrid(1:11,1:11);
DistPatch = sqrt((X-6).^2 + (Y-6).^2);
DistMap = inf(h,w);
UsedPixel = false(h,w);
CenterCoor = zeros(h,w,2);
%Compute DistMap and CneterCoor
[r_set c_set] = find(RidgeMap_filtered);
for j=1:SetLength
r = r_set(j);
r1 = r-5;
r2 = r+5;
c = c_set(j);
c1 = c-5;
c2 = c+5;
if r1>=1 && r2<=h && c1>=1 && c2<=w %discrad boundary?
MapPatch = DistMap(r1:r2,c1:c2);
MinPatch = min(MapPatch, DistPatch);
DistMap(r1:r2,c1:c2) = MinPatch;
UsedPixel(r1:r2,c1:c2) = true;
ChangedPixels = MinPatch < MapPatch;
OriginalCenterCoorPatch = CenterCoor(r1:r2,c1:c2,:);
NewCoor = cat(3,r*ones(11), c*ones(11));
NewCenterCoorPatch = OriginalCenterCoorPatch .* repmat(1-ChangedPixels,[1,1,2]) + NewCoor .* repmat(ChangedPixels,[1,1,2]);
CenterCoor(r1:r2,c1:c2,:) = NewCenterCoorPatch;
end
end
%Convert dist to table index
TableIndexMap = zeros(h,w);
b = unique(DistPatch(:));
for i=1:length(b)
SetPixels = DistMap == b(i);
TableIndexMap(SetPixels) = i;
end
%mapping (T_p, T_r, d) to S_p
[r_set c_set] = find(UsedPixel);
SetLength = length(r_set);
UpdatedPixel = false(h,w);
S = zeros(h,w);
for i=1:SetLength
r = r_set(i);
c = c_set(i);
r_Center = CenterCoor(r,c,1);
c_Center = CenterCoor(r,c,2);
CurrentMagValue = T(r,c);
BinIdx_Current = ceil(CurrentMagValue /0.005);
%Zebra have super strong Mag
if BinIdx_Current > 100
BinIdx_Current = 100;
end
TableIndex = TableIndexMap(r,c);
if TableIndex > DistanceUpperBound
continue
end
CenterMagValue = T(r_Center,c_Center);
%Low Mag Edge suppresion
if CenterMagValue < LowMagSuppression
continue
end
BinIdx_Center = ceil(CenterMagValue /0.005);
if BinIdx_Center > 100
BinIdx_Center = 100;
end
%consult the table
if TableIndex == 1 %1 is the index of b(1) where dist = 0, enhance the contrast of pixel on edge
S_p = ContrastEnhenceCoef * Statistics(TableIndex).EstimatedMag(BinIdx_Current,BinIdx_Center);
else
S_p = Statistics(TableIndex).EstimatedMag(BinIdx_Current,BinIdx_Center);
end
if isnan(S_p)
else
UpdatedPixel(r,c) = true;
S(r,c) = S_p;
end
end
%Record the RidgeMapMap, for computing te ProbOfMag
%the Mag is the consulted Mag
%here is the problem, when the S is very strong, the affect range of ProbMagOut exceeds 1 pixel
RidgeMapMagValue = zeros(h,w);
for i=1:SetLength
r = r_set(i);
c = c_set(i);
r_Center = CenterCoor(r,c,1);
c_Center = CenterCoor(r,c,2);
RidgeMapMagValue(r,c) = S(r_Center,c_Center);
end
S(~UpdatedPixel) = T(~UpdatedPixel);
img_in = I_s;
if min(Dissimilarity(:)) == 0
d = Dissimilarity + 1e-6; %avoid 0 case; some images may have d(:,:,1) as 0
else
d = Dissimilarity;
end
ratio = d ./ repmat(d(:,:,1),[1,1,8]);
%here is the problem, I need to amplify the gradient directionally
Grad_in = Img2Grad(img_in);
Product = Grad_in .* ratio;
Sqr = Product.^2;
Sum = sum(Sqr,3);
Sqrt = sqrt(Sum); %the Sqrt might be 0, because Grad_in may be pure 0;
r1 = S ./Sqrt;
r1(isnan(r1)) = 0;
Grad_exp = Grad_high_initial .*( ratio .*(repmat(r1,[1,1,8])));
%consolidate inconsistatnt gradient
NewGrad_exp = zeros(h,w,8);
for k=1:4
switch k
case 1
ShiftOp = [0 -1];
case 2
ShiftOp = [1 -1];
case 3
ShiftOp = [1 0];
case 4
ShiftOp = [1 1];
end
k2 =k+4;
Grad1 = Grad_exp(:,:,k);
Grad2 = Grad_exp(:,:,k2);
Grad2Shift = circshift(Grad2,ShiftOp);
Grad1Abs = abs(Grad1);
Grad2AbsShift = abs(Grad2Shift);
Grad1Larger = Grad1Abs > Grad2AbsShift;
Grad2Larger = Grad2AbsShift > Grad1Abs;
NewGrad1 = Grad1 .* Grad1Larger + (-Grad2Shift) .* Grad2Larger;
NewGrad2Shift = Grad2Shift .* Grad2Larger + (-Grad1) .* Grad1Larger;
NewGrad2 = circshift(NewGrad2Shift,-ShiftOp);
NewGrad_exp(:,:,k) = NewGrad1;
NewGrad_exp(:,:,k2) = NewGrad2;
end
%current problem is the over-enhanced gradient (NewMagExp too large)
gradient_expected = NewGrad_exp;
bReport = true;
updatenumber = 0;
loopnumber = 1000;
linesearchstepnumber = 10;
beta0 = 1;
beta1 = 0.5^8;
tolf = 0.001;
img_edge = F4d_GenerateIntensityFromGradient(img_y,img_in,NewGrad_exp,Gau_sigma,bReport,...
loopnumber,updatenumber,linesearchstepnumber,beta0,beta1,tolf);
%img_edge = F4b_GenerateIntensityFromGradient(img_y,img_in,NewGrad_exp,Gau_sigma,bReport);
gradient_actual = Img2Grad(img_edge);
%compute the Map of edge weight
lambda_m = 2;
m0 = 0;
ProbMagOut = lambda_m * RidgeMapMagValue + m0;
lambda_d = 0.25;
d0 = 0.25;
ProbDistMap = exp(- (lambda_d * DistMap + d0) ); %this coef should be decied by zooming
Product = ProbMagOut .* ProbDistMap;
weightmap_edge = min(Product,1); %the two terms are not sufficient, direction is not taken into considertion
end
function Grad = Img2Grad(img)
[h w] = size(img);
Grad = zeros(h,w,8);
DiffOp = RetGradientKernel();
for i=1:8
Grad(:,:,i) = imfilter(img,DiffOp{i},'replicate');
end
end
function f = RetGradientKernel()
f = cell(8,1);
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
end
function Dissimilarity = EvaluateDissimilarity8(Img_in,PatchSize)
if ~exist('PatchSize','var');
PatchSize = 3;
end
[h w] = size(Img_in);
Dissimilarity = zeros(h,w,8);
f3x3 = ones(PatchSize)/(PatchSize^2);
for i = 1:8
DiffOp = RetGradientKernel8(i);
Diff = imfilter(Img_in,DiffOp,'symmetric');
Sqr = Diff.^2;
Sum = imfilter(Sqr,f3x3,'replicate');
Dissimilarity(:,:,i) = sqrt(Sum);
end
end
function DiffOp = RetGradientKernel8(dir)
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
DiffOp = f{dir};
end
function f = ComputeFunctionValue_Grad(img, Grad_exp)
Grad = Img2Grad(img);
Diff = Grad - Grad_exp;
Sqrt = Diff .^2;
f = sqrt(sum(Sqrt(:)));
end
|
github
|
Liusifei/Face-Hallucination-master
|
U5d_ReadFileNameList_Index_Comment.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U5d_ReadFileNameList_Index_Comment.m
| 2,170 |
utf_8
|
363e2331409333cc628abb5f9e597ed0
|
%Chih-Yuan Yang
%09/01/13
%U5: read list from a file and return it as an cell array
%U5c: add more comment controls, the comment sign % may occur at the end of a line
%U5d: suppose the list contains index
function filenamelist = U5d_ReadFileNameList_Index_Comment( fn_list )
fid = fopen(fn_list,'r');
%skip the heading comment lines started with %
tline = fgetl(fid);
idx_fn = 0;
filenamelist = cell(1);
str_tab = sprintf('\t');
while ischar(tline)
%if it is empty line or starts with %, do nothing
if isempty(tline) || tline(1) == '%'
%do nothing
else
%if a % is found, neglect the characters after the first space or %
k_space = strfind(tline,' ');
k_percentsign = strfind(tline,'%');
k_tab = strfind(tline,str_tab); %how to find a tab?
%initialize the str_fn as emtpy
str_fn = [];
if isempty(k_percentsign) && isempty(k_tab)
idx_fn = idx_fn + 1;
A = sscanf(tline,'%d %s');
num_char_fn = length(A) - 1; %A(1) is the %d, type double
for idx_char = 1:num_char_fn
str_fn(idx_char) = sprintf('%c',A(1+idx_char));
end
filenamelist{idx_fn,1} = str_fn;
else
k = length(tline);
if ~isempty(k_space)
k = min(k,k_space(2));
end
if ~isempty(k_tab)
k = min(k,k_tab(1));
end
if ~isempty(k_percentsign)
k = min(k,k_percentsign(1));
end
idx_fn = idx_fn + 1;
A = sscanf(tline(1:k-1),'%d %s');
num_char_index = length(sprintf('%d',A(1)));
num_char_fn = length(A) - num_char_index;
for idx_char = 1:num_char_fn
str_fn(idx_char) = sprintf('%c',A(num_char_index+idx_char));
end
filenamelist{idx_fn,1} = str_fn;
end
end
tline = fgetl(fid);
end
fclose(fid);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F11_BackProjection.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F11_BackProjection.m
| 934 |
utf_8
|
4b0bbea072111144272dbf5b81271106
|
%09/19/12
%Chih-Yuan Yang
%add report
function img_bp = F11_BackProjection(img_lr, img_hr, Gau_sigma, iternum,bReport)
[h_hr] = size(img_hr,1);
[h_lr] = size(img_lr,1);
zooming = h_hr/h_lr;
for i=1:iternum
img_lr_gen = F19_GenerateLRImage_BlurSubSample(img_hr,zooming,Gau_sigma);
diff_lr = img_lr - img_lr_gen;
term_diff_lr_SSD = sum(sum(diff_lr.^2));
diff_hr = imresize(diff_lr,zooming,'bilinear');
img_hr = img_hr + diff_hr;
img_lr_new = F19_GenerateLRImage_BlurSubSample(img_hr,zooming,Gau_sigma);
diff_lr_new = img_lr - img_lr_new;
term_diff_lr_SSD_afteronebackprojection = sum(sum(diff_lr_new.^2));
if bReport
fprintf('backproject iteration=%d, term_before=%0.1f, term_after=%0.1f\n', ...
i,term_diff_lr_SSD,term_diff_lr_SSD_afteronebackprojection);
end
end
img_bp = img_hr;
end
|
github
|
Liusifei/Face-Hallucination-master
|
U27d_CreateSemaphoreFile_IndexOnly_Exclude.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U27d_CreateSemaphoreFile_IndexOnly_Exclude.m
| 321 |
utf_8
|
afcd97c7aca96d193bf7d1e1e1f51dab
|
%Chih-Yuan Yang
%4/1/13
%
function U27d_CreateSemaphoreFile_IndexOnly_Exclude(fn_create,iiend,set_value0)
fid = fopen(fn_create,'w+');
for i=1:iiend
if nnz(set_value0 == i)
fprintf(fid,'%05d 0\n',i);
else
fprintf(fid,'%05d 1\n',i);
end
end
fclose(fid);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F14_Img2Grad.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F14_Img2Grad.m
| 805 |
utf_8
|
782f46fa3e2d8440290ca95abe60cc43
|
%Chih-Yuan Yang
%10/02/12
%add class control
function Grad = F14_Img2Grad(img)
if ~isa(img,'double')
img = im2double(img);
warning('input type is not double.');
end
[h w] = size(img);
Grad = zeros(h,w,8);
DiffOp = IF1_RetGradientKernel();
for i=1:8
Grad(:,:,i) = imfilter(img,DiffOp{i},'replicate');
end
end
function f = IF1_RetGradientKernel()
f = cell(8,1);
f{1} = [0 -1 1];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [ 1;
-1;
0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [1 -1 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [ 0;
-1;
1];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
end
|
github
|
Liusifei/Face-Hallucination-master
|
U27f_CreateSemaphoreFile_FromFilenamelist_SetArray.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U27f_CreateSemaphoreFile_FromFilenamelist_SetArray.m
| 300 |
utf_8
|
d02e9667b2345eca5608aafa983837c6
|
%Chih-Yuan Yang
%4/3/12
%To parallel run
function U27f_CreateSemaphoreFile_FromFilenamelist_SetArray(fn_create,arr_filename, arr_value)
fid = fopen(fn_create,'w+');
for i=1:length(arr_filename)
fprintf(fid,'%05d %s %d\n',i,arr_filename{i},arr_value(i));
end
fclose(fid);
end
|
github
|
Liusifei/Face-Hallucination-master
|
T1_ImprovePatchMatch.m
|
.m
|
Face-Hallucination-master/Code/Ours2/T1_ImprovePatchMatch.m
| 1,264 |
utf_8
|
6d50fdf1fcee30d1a653f63f9c78c8f0
|
%Chih-Yuan Yang
%04/25/16
%Sifei calls the TVD_dpreserve_mm three times, to regularize images along x, y, and x axis by TV norm.
%Does the paper and the original released code also work in this way consequently for x, y, and then x?
function img_texture = T1_ImprovePatchMatch(img_texture,img_y)
%Grad_o is a set of refined gradient maps in LR
Grad_o = T1_Img2Grad_Blockcompensate(img_y);
y = img_texture;
[h,w] = size(y);
%z is the gradient map of +x direction
z = imresize(Grad_o(:,:,1), 4, 'bilinear');
%This is Sifei's L1L2norm-regualized gradients
x = TVD_dpreserve_mm(y(:), z(1:end-1)', 0.036, 0.5, 50);
x = reshape(x,[h,w])';
%here z changes to the gradient map of +y direction
z = imresize(Grad_o(:,:,7), 4, 'bilinear');
x = TVD_dpreserve_mm(x(:), z(1:end-1)', 0.036, 0.8, 50);
x = reshape(x,[w,h])';
%Here z goes back to the gradient map of +x direction to smooth the gradients along x-axis again.
%The new parameters are smaller than the ones of the first x-axis smoothing. I guess that Sifei does
%not to want over-blur the images.
z = imresize(Grad_o(:,:,1), 4, 'bilinear');
x = TVD_dpreserve_mm(x(:), z(1:end-1)', 0.01, 0.5, 50);
img_texture = reshape(x,[h,w]);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F8_RetriveAreaGradientsByAlign_Optimization_PatchCompare.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F8_RetriveAreaGradientsByAlign_Optimization_PatchCompare.m
| 2,368 |
utf_8
|
221a92eeab3f073b39ce5d43f33b465e
|
%Chih-Yuan Yang
%09/11/12
%Solve the hair and background problem
%This idea does not work
function gradientcandidate = F8_RetriveAreaGradientsByAlign_Optimization_PatchCompare(testimage_lr, rawexampleimage, inputpoints, basepoints, region_lr, zooming, Gau_sigma)
region_hr = U18_ConvertLRRegionToHRRegion(region_lr, zooming);
exampleimagenumber = size(rawexampleimage,3);
%find the transform matrix by solving an optimization problem
parfor i=1:exampleimagenumber
alignedexampleimage_hr(:,:,i) = U20_AlignExampleImageByLandmarkSet(rawexampleimage(:,:,i),inputpoints(:,:,i),basepoints);
alignedexampleimage_lr(:,:,i) = U3_GenerateLRImage_BlurSubSample(im2double(alignedexampleimage_hr(:,:,i)),zooming,Gau_sigma);
end
%Patch to patch searching to reconstruct the back ground and hair, take how much NN? try 5
patchsize_lr = 5;
%use feature as intensity
[h_hr w_hr] = size(alignedexampleimage_hr(:,:,1));
[h_lr w_lr] = size(alignedexampleimage_lr(:,:,1));
%how much overlap? try 1
overlap_lr = 1;
stepforward_lr = patchsize_lr - overlap_lr;
r_last = h_lr-patchsize_lr+1;
rlist = 1:stepforward_lr:r_last;
if rlist(end) ~= r_last
rlist = [rlist r_last];
end
c_last = w_lr-patchsize_lr+1;
clist = 1:stepforward_lr:c_last;
if clist(end) ~= c_last
clist = [clist c_last];
end
samplenumber = length(clist)*length(rlist);
for r = rlist
for c = clist
end
end
normvalue = zeros(exampleimagenumber,1);
parfor j=1:exampleimagenumber
examplearea_lr = alignedexampleimage_lr(region_lr.top_idx:region_lr.bottom_idx,region_lr.left_idx:region_lr.right_idx,j);
feature_example_lr = U16_ExtractFeatureFromArea(examplearea_lr); %the unit is double
normvalue(j) = norm(feature_test - feature_example_lr);
end
%find the small norm
[sortnorm ix] = sort(normvalue);
%some of them are very similar
%mostsimilarindex = ix(1:20);
gradientcandidate = zeros(region_hr.height,region_hr.width,8,20); %the 3rd dim is dx and dy
parfor j=1:20
examplehrregion = alignedexampleimage_hr(region_hr.top_idx:region_hr.bottom_idx,region_hr.left_idx:region_hr.right_idx,ix(j));
gradientcandidate(:,:,:,j) = Img2Grad(im2double(examplehrregion));
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
U20_ReturnHandleDrawLandmarks.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U20_ReturnHandleDrawLandmarks.m
| 1,015 |
utf_8
|
feb1dfaf1aeb76f0c572dba0f91c468d
|
%09/14/12
%Chih-Yuan Yang
%Return the handle
function hfig = U23_ReturnHandleDrawLandmarks(im, boxes, posemap,bshownumbers,bdrawpose,bVisible)
if bVisible
hfig = figure;
else
hfig = figure('Visible','off');
end
imshow(im);
hold on;
axis image;
axis off;
for b = boxes,
partsize = b.xy(1,3)-b.xy(1,1)+1;
tx = (min(b.xy(:,1)) + max(b.xy(:,3)))/2;
ty = min(b.xy(:,2)) - partsize/2;
if bdrawpose
text(tx,ty, num2str(posemap(b.c)),'fontsize',18,'color','c');
end
for i = size(b.xy,1):-1:1;
x1 = b.xy(i,1);
y1 = b.xy(i,2);
x2 = b.xy(i,3);
y2 = b.xy(i,4);
%line([x1 x1 x2 x2 x1]', [y1 y2 y2 y1 y1]', 'color', 'b', 'linewidth', 1);
plot((x1+x2)/2,(y1+y2)/2,'r.','markersize',9);
if bshownumbers
text((x1+x2)/2,(y1+y2)/2, num2str(i), 'fontsize',9,'color','k');
end
end
end
drawnow;
end
|
github
|
Liusifei/Face-Hallucination-master
|
F42_ConvertImageCoorToCartesianCoor.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F42_ConvertImageCoorToCartesianCoor.m
| 570 |
utf_8
|
dfd1f5972aeb86cbc8334d60cde570ef
|
%Chih-Yun Yang
%10/23/12
%Called by PP2_GenerateAlignImageAndAlignedLandmarks
function pts_cartisian = F42_ConvertImageCoorToCartesianCoor(pts_image, imagesize)
h = imagesize(1);
w = imagesize(2); %not used
[pointperimage, dimperpts, imagenumber] = size(pts_image);
pts_cartisian = zeros(pointperimage, dimperpts, imagenumber);
for ii = 1:imagenumber
for ptsidx =1:pointperimage
pts_cartisian(ptsidx,1,ii) = pts_image(ptsidx,1,ii);
pts_cartisian(ptsidx,2,ii) = h - pts_image(ptsidx,2,ii);
end
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F11d_BackProjection_GaussianKernel.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F11d_BackProjection_GaussianKernel.m
| 2,514 |
utf_8
|
77f0ae684573082cd6e59065b67a6e02
|
%Chih-Yuan Yang
%07/20/14 I update the code to support the scaling factor of 3.
%F11c: controlled by iternum
%F11d: controled by TolF
%This file should be replace by F11e
%function img_bp = F11d_BackProjection_GaussianKernel(img_lr, img_hr, Gau_sigma, iternum,bReport,TolF)
[h_hr] = size(img_hr,1);
[h_lr] = size(img_lr,1);
zooming = h_hr/h_lr;
for i=1:iternum
img_lr_gen = F19a_GenerateLRImage_GaussianKernel(img_hr,zooming,Gau_sigma);
diff_lr = img_lr - img_lr_gen;
RMSE_diff_lr = sqrt(mean2(diff_lr.^2));
diff_hr = IF5_Upsample(diff_lr,zooming, Gau_sigma);
%diff_hr = imresize(diff_lr,zooming,'bilinear');
img_hr = img_hr + diff_hr;
img_lr_new = F19a_GenerateLRImage_GaussianKernel(img_hr,zooming,Gau_sigma);
diff_lr_new = img_lr - img_lr_new;
RMSE_diff_lr_afteronebackprojection = sqrt(mean2(diff_lr_new.^2));
if bReport
fprintf('backproject iteration=%d, RMSE_before=%0.6f, RMSE_after=%0.6f\n', ...
i,RMSE_diff_lr,RMSE_diff_lr_afteronebackprojection);
end
if RMSE_diff_lr_afteronebackprojection < TolF
disp('RMSE_diff_lr_afteronebackprojection < TolF');
break;
end
end
img_bp = img_hr;
end
function diff_hr = IF5_Upsample(diff_lr,zooming, Gau_sigma)
[h w] = size(diff_lr);
h_hr = h*zooming;
w_hr = w*zooming;
upsampled = zeros(h_hr,w_hr);
if zooming == 3
for rl = 1:h
rh = (rl-1) * zooming + 2;
for cl = 1:w
ch = (cl-1) * zooming + 2;
upsampled(rh,ch) = diff_lr(rl,cl);
end
end
kernelsize = ceil(Gau_sigma * 3)*2+1;
kernel = fspecial('gaussian',kernelsize,Gau_sigma);
diff_hr = imfilter(upsampled,kernel,'replicate');
elseif zooming == 4
%compute the kernel by ourself, assuming the range is
%control the kernel and the position of the diff
kernelsize = ceil(Gau_sigma * 3)*2+2; %+2 this is the even number
kernel = fspecial('gaussian',kernelsize,Gau_sigma);
%subsample diff_lr to (3,3), because of the result of imfilter
for rl = 1:h
rh = (rl-1) * zooming + 3;
for cl = 1:w
ch = (cl-1) * zooming + 3;
upsampled(rh,ch) = diff_lr(rl,cl);
end
end
diff_hr = imfilter(upsampled, kernel,'replicate');
else
error('not supported');
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F12_ACCV12Preprocess_LoadingData.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F12_ACCV12Preprocess_LoadingData.m
| 906 |
utf_8
|
6c1821196672d0e9d784f00f25d71390
|
%Chih-Yuan Yang
%09/19/12
%hint: for patch work, loading all data into memory can save time.
function [sfall srecall] = F12_ACCV12Preprocess_LoadingData(zooming,featurefilename,recordfilename)
if zooming == 4
featurefolder = fullfile('TexturePatchDataset','Feature','s4');
elseif zooming == 3
featurefolder = fullfile('TexturePatchDataset','Feature','s3');
end
sfall = cell(6,1);
srecall = cell(6,1);
quanarray = [1 2 4 8 16 32];
for qidx=1:6
quan = quanarray(qidx);
loadfilename = sprintf('%s%d.mat',featurefilename,quan);
loaddata = load(fullfile(featurefolder,loadfilename));
sfall{qidx} = loaddata.sf;
loadfilename = sprintf('%s%d.mat',recordfilename,quan);
loaddata = load(fullfile(featurefolder,loadfilename));
srecall{qidx} = loaddata.srec;
fprintf('.');
end
fprintf('\n');
end
|
github
|
Liusifei/Face-Hallucination-master
|
U27e_CreateSemaphoreFile_FromFilenamelist_ExcludeSet.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U27e_CreateSemaphoreFile_FromFilenamelist_ExcludeSet.m
| 419 |
utf_8
|
7a0ceb3028a632f982a4637bb8fadf24
|
%Chih-Yuan Yang
%4/3/12
%To parallel run
function U27e_CreateSymphonyFile_FromFilenamelist_ExcludeSet(fn_create,filenamelist, set_value0)
fid = fopen(fn_create,'w+');
for i=1:length(filenamelist)
if nnz(set_value0 == i)
fprintf(fid,'%05d %s 0\n',i,filenamelist{i});
else
fprintf(fid,'%05d %s 1\n',i,filenamelist{i});
end
end
fclose(fid);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F43_ConvertCartesianCoorToImageCoor.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F43_ConvertCartesianCoorToImageCoor.m
| 519 |
utf_8
|
695a55806208b4ae78261a7b13223eb9
|
%Chih-Yuan Yang
%10/29/12
function pts_image = F43_ConvertCartesianCoorToImageCoor(pts_cartisian, imagesize)
h = imagesize(1);
w = imagesize(2); %not used
[pointperimage, dimperpts, imagenumber] = size(pts_cartisian);
pts_image = zeros(pointperimage, dimperpts, imagenumber);
for ii = 1:imagenumber
for ptsidx =1:pointperimage
pts_image(ptsidx,1,ii) = pts_cartisian(ptsidx,1,ii);
pts_image(ptsidx,2,ii) = h - pts_cartisian(ptsidx,2,ii);
end
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
U27_CreateSemaphoreFile_TwoColumn.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U27_CreateSemaphoreFile_TwoColumn.m
| 257 |
utf_8
|
1ea771b33752980e41f3531484e57e53
|
%Chih-Yuan Yang
%09/29/12
%for parallel execution
function U27_CreateSemaphoreFile_TwoColumn(fn_create,iiend,filenamelist)
fid = fopen(fn_create,'w+');
for i=1:iiend
fprintf(fid,'%05d %s 0\n',i,filenamelist{i});
end
fclose(fid);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F28_ComputeSquareSumLowHighDiff.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F28_ComputeSquareSumLowHighDiff.m
| 393 |
utf_8
|
73640adb8e524325e9992ea6ea807837
|
%Chih-Yuan Yang
%07/20/14
%I replace F19a to F19c to support the scaling factor of 3. In addition, F19c is simpler.
function f = F28_ComputeSquareSumLowHighDiff(img,img_low,Gau_sigma)
zooming = size(img,1)/size(img_low,1);
img_lr_generated = F19c_GenerateLRImage_GaussianKernel(img,zooming,Gau_sigma);
diff = img_low - img_lr_generated;
Sqr = diff.^2;
f = sum(Sqr(:));
end
|
github
|
Liusifei/Face-Hallucination-master
|
F9_ACCV12Upampling.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F9_ACCV12Upampling.m
| 42,896 |
utf_8
|
b224bc49f05d72474523f5a65d022076
|
%Chih-Yuan Yang
%09/12/12
%To solve the hair and background problem
%hint: for patch work, loading all data into memory can save time.
function img_hr = F9_ACCV12Upampling(img_y, zooming, Gau_sigma ,sfall,srecall)
if zooming == 4
para.Gau_sigma = 1.6;
featurefilename = 'sf_1_1264_qf';
recordfilename = 'srec_1_1264_qf';
featurefolder = fullfile('TexturePatchDataset','Feature','s4');
elseif zooming == 3
para.Gau_sigma = 1.2;
featurefilename = 'sf_1_1264_qf';
recordfilename = 'srec_1_1264_qf';
featurefolder = fullfile('TexturePatchDataset','Feature','s3');
end
[img_edge reliablemap_edge] = F1_EdgePreserving(img_y,para,zooming,Gau_sigma);
%esn = 1;
[h_lr w_lr] = size(img_y);
para.lh = h_lr;
para.lw = w_lr;
para.NumberOfHCandidate = 10;
para.SimilarityFunctionSettingNumber = 1;
%load all data set to save loading time
[scanr scanra] = SearchExternalPatches(img_y,para,sfall,srecall);
para.zooming = zooming;
para.ps = 5;
para.Gau_sigma = Gau_sigma;
hrpatch = F8_ExtractAllHrPatches(img_y, para, scanr);
[scanr_self scanra_self] = F22_SearchForSelfSimilarPatchesL2Norm(img_y,para);
para.ehrfKernelWidth = 1.0;
para.bEnablemhrf = true;
[img_texture reliablemap_texture] = F11_FilterOutImproperHrPatches(img_y,hrpatch,para,scanr_self,scanra_self,scanr,scanra);
nomi = img_texture.*reliablemap_texture + img_edge .* reliablemap_edge;
denomi = reliablemap_edge + reliablemap_texture;
img_hr = nomi ./ denomi;
%there are some 0 value of denomi around boundary
%fill these pixels as img_edge
nanpixels = isnan(img_hr);
img_hr(nanpixels) = img_edge(nanpixels);
%ensure there is no nan
if nnz(isnan(img_hr))
error('should not be here');
end
end
function [scanr scanra] = SearchExternalPatches(img_y,para,sfall,srecall)
%how to search parallelly to speed up?
ps = 5; %patch size
[lh lw] = size(img_y);
hrpatchnumber = 10;
%featurefolder = para.featurefolder;
sh = GetShGeneral(ps);
scanr = zeros(6,hrpatchnumber,lh-ps+1,lw-ps+1); %scan results, mm, quan, ii, sr, sc, similairty
smallvalue = -1;
scanr(6,:,:,:) = smallvalue;
scanra = zeros(lh-ps+1,lw-ps+1); %scan results active
%scanrsimmax = smallvalue * ones(lh-ps+1,lw-ps+1); %del this line?
quanarray = [1 2 4 8 16 32];
B = [256 128 64 32 16 8];
imlyi = im2uint8(img_y);
for qidx=1:6
quan = quanarray(qidx);
b = B(qidx);
cur_initial = floor(size(sfall{1},2)/2); %accelerate the loop by using an initial position
for rl=1:lh-ps+1
fprintf('look for lut rl:%d quan:%d\n',rl,quan);
for cl = 1:lw-ps+1
patch = imlyi(rl:rl+ps-1,cl:cl+ps-1);
fq = patch(sh);
if qidx == 1
fquan = fq;
else
fquan = fq - mod(fq,quan) + quan/2;
end
[iila mma] = LookForLookUpTable9_External(fquan,sfall{qidx},cur_initial,para); %index in lookuptable
in = length(iila); %always return 20 instance
for i=1:in
ii = srecall{qidx}(1,iila(i));
sr = srecall{qidx}(2,iila(i));
sc = srecall{qidx}(3,iila(i));
%check whether the patch is in the scanr already
bSamePatch = false;
for j=1:scanra(rl,cl)
if ii == scanr(3,j,rl,cl) && sr == scanr(4,j,rl,cl) && sc == scanr(5,j,rl,cl)
bSamePatch = true;
break
end
end
if bSamePatch == false
similarity = bmm2similarity(b,mma(i),para.SimilarityFunctionSettingNumber);
if scanra(rl,cl) < hrpatchnumber
ix = scanra(rl,cl) + 1;
%to do: update scanr by similarity
%need to double it, otherwise, the int6 will kill similarity
scanr(:,ix,rl,cl) = cat(1,mma(i),quan,double(ii),double(sr),double(sc),similarity);
scanra(rl,cl) = ix;
else
[minval ix] = min(scanr(6,:,rl,cl));
if scanr(6,ix,rl,cl) < similarity
%update
scanr(:,ix,rl,cl) = cat(1,mma(i),quan,double(ii),double(sr),double(sc),similarity);
end
end
end
end
end
end
end
end
function [iila mma] = LookForLookUpTable9_External(fq,lut,cur_initial,para)
hrpatchnumber = para.NumberOfHCandidate; %default 10
fl = length(fq); %feature length
head = 1;
tail = size(lut,2);
lutsize = size(lut,2);
if exist('cur_initial','var')
if cur_initial > lutsize
cur = lutsize;
else
cur = cur_initial;
end
else
cur = round(lutsize/2);
end
cur_rec1 = cur;
%initial comparison
fqsmaller = -1;
fqlarger = 1;
fqsame = 0;
cr = 0; %compare results
mm = 0;
mmiil = 0;
%search for the largest mm
while 1
for c=1:fl
if fq(c) < lut(c,cur)
cr = fqsmaller;
break
elseif fq(c) > lut(c,cur)
cr = fqlarger;
break; %c moves to next
else %equal
cr = fqsame;
if mm < c
mm = c;
mmiil = cur;
end
end
end
if cr == fqsmaller
next = floor((cur + head)/2);
tail = cur; %adjust the range of head and tail
elseif cr == fqlarger;
next = ceil((cur + tail)/2); %the round function has to be floor, because fq is larger than cur
%otherwise the fully 255 patches will never match
head = cur; %adjust the range of head and tail
end
if mm == 25 %it happens, the initial one match the fq, therefore, there is no next defined.
break
end
if cur == next || cur_rec1 == next %the next might oscilate
break;
else
cur_rec1 = cur;
cur = next;
end
%fprintf('cur %d\n',cur);
end
if mm == 0
iila = [];
mma = [];
return
end
%post-process to find the repeated partial vectors
%search for previous
idx = 1;
iila = zeros(hrpatchnumber,1);
mma = zeros(hrpatchnumber,1);
iila(idx) = mmiil;
mma(idx) = mm;
bprecontinue = true;
bproccontinue = true;
presh = 0; %previous shift
procsh = 0; %proceeding shift
while 1
presh = presh -1;
iilpre = mmiil + presh;
if iilpre <1
bprecontinue = false;
premm = 0;
end
procsh = procsh +1;
iilproc = mmiil + procsh;
if iilproc > lutsize
bproccontinue = false;
procmm = 0;
end
if bprecontinue
diff = lut(:,iilpre) ~= fq;
if nnz(diff) == 0
premm = 25;
else
premm = find(diff,1,'first') -1;
end
end
if bproccontinue
diff = lut(:,iilproc) ~= fq;
if nnz(diff) == 0
procmm = 25;
else
procmm = find(diff,1,'first') -1;
end
end
if premm == 0 && procmm == 0
break
end
if premm > procmm
%add pre item
idx = idx + 1;
iila(idx) = iilpre;
mma(idx) = premm;
%pause the proc
bprecontinue = true;
elseif premm < procmm
%add proc item
idx = idx + 1;
iila(idx) = iilproc;
mma(idx) = procmm;
%pause the pre
bproccontinue = true;
else %premm == procmm
%add both item
idx = idx + 1;
iila(idx) = iilpre;
mma(idx) = premm;
if idx == hrpatchnumber
break
end
idx = idx + 1;
iila(idx) = iilproc;
mma(idx) = procmm;
bproccontinue = true;
bprecontinue = true;
end
if idx == hrpatchnumber
break
end
end
if idx < hrpatchnumber
iila = iila(1:idx);
mma = mma(1:idx);
end
end
function s = bmm2similarity(b,mm,SimilarityFunctionSettingNumber)
if SimilarityFunctionSettingNumber == 1
if mm >= 9
Smm = 0.9 + 0.1*(mm-9)/16;
else
Smm = 0.5 * mm/9;
end
Sb = 0.5+0.5*(log2(b)-3)/5;
s = Sb * Smm;
elseif SimilarityFunctionSettingNumber == 2
Smm = mm/25;
Sb = (log2(b)-2)/6;
s = Sb * Smm;
end
end
function hrpatch = F8_ExtractAllHrPatches(img_y, para, scanr)
%how to search parallelly to speed up?
psh = para.ps * para.zooming;
ps = para.ps;
lh = para.lh;
lw = para.lw;
s = para.zooming;
hrpatchnumber = para.NumberOfHCandidate;
hrpatch = zeros(psh,psh,lh-ps+1,lw-ps+1,hrpatchnumber);
ihfolder = fullfile('TexturePatchDataset','HRGray');
%read all images
loaddata = load(fullfile(ihfolder,'AllImages_1_1264.mat'));
allimages = loaddata.allimages;
clear loaddata
%analyize which images need to be loaded
alliiset = scanr(3,:,:,:);
alliiset_uni = unique(alliiset(:)); %allmost all images are used, from 1 to 1500
if alliiset_uni(1) ~= 0
alliiset_uni_pure = alliiset_uni;
else
alliiset_uni_pure = alliiset_uni(2:end);
end
for i = 1:length(alliiset_uni_pure)
ii = alliiset_uni_pure(i);
exampleimage_hr = im2double(allimages(:,:,ii));
exampleimage_lr = GenerateLRImage_BlurSubSample(exampleimage_hr,para.zooming,para.Gau_sigma);
match_4D = alliiset == ii;
match_3D = reshape(match_4D,hrpatchnumber,lh-ps+1,lw-ps+1); %remove the first dimension
[d1 d2 d3] = size(match_3D); %second dimention length
[idxset posset] = find(match_3D);
setin = length(idxset);
for j = 1:setin
idx = idxset(j);
possum = posset(j);
pos3 = floor( (possum-1)/d2) +1; %the relationship: possum = (pos3-1) * d2 + pos2, pos2 in (1,d2)
pos2 = possum - (pos3-1)*d2;
rl = pos2;
cl = pos3;
sr = scanr(4,idx,rl,cl);
sc = scanr(5,idx,rl,cl);
srh = (sr-1)*s+1;
srh1 = srh + psh -1;
sch = (sc-1)*s+1;
sch1 = sch + psh-1;
%to do: compensate the HR patch to match the LR query patch
hrp = exampleimage_hr(srh:srh1,sch:sch1); %HR patch
lrq = img_y(rl:rl+ps-1,cl:cl+ps-1); %LR query patch
lrr = exampleimage_lr(sr:sr+ps-1,sc:sc+ps-1); %LR retrieved patch
chrp = hrp + imresize(lrq - lrr,s,'bilinear'); %compensate HR patch
hrpatch(:,:,rl,cl,idx) = chrp;
bVisuallyCheck = false;
if bVisuallyCheck
if ~exist('hfig','var')
hfig = figure;
else
figure(hfig);
end
subplot(1,4,1);
imshow(hrp/255);
title('hrp');
subplot(1,4,2);
imshow(lrr/255);
title('lrr');
subplot(1,4,3);
imshow(lrq/255);
title('lrq');
subplot(1,4,4);
imshow(chrp/255);
title('chrp');
keyboard
end
end
end
end
function [img_texture Reliablemap] = F11_FilterOutImproperHrPatches(img_y,hrpatch,para,scanr_self,scanra_self,scanr,scanra)
%filter out improper hr patches using similarity among lr patches
%load the self-similar data
s = para.zooming;
lh = para.lh;
lw = para.lw;
ps = para.ps;
psh = s * para.ps;
patcharea = para.ps^2;
SSnumberUpperbound = 10;
%do I still need these variables?
cqarray = zeros(32,1)/0;
for qidx = 1:6
quan = 2^(qidx-1);
cqvalue = 0.9^(qidx-1);
cqarray(quan) = cqvalue;
end
hh = lh * s;
hw = lw * s;
hrres_nomi = zeros(hh,hw);
hrres_deno = zeros(hh,hw);
maskmatrix = false(psh,psh,patcharea);
Reliablemap = zeros(hh,hw);
pshs = psh * psh;
for i=1:patcharea
[sh_notsued masklow maskhigh] = GetShGeneral(ps,i,true,s); %ps, mm, bhigh, s
maskmatrix(:,:,i) = maskhigh;
end
mhr = zeros(5*s);
r1 = 2*s+1;
r2 = 3*s;
c1 = 2*s+1;
c2 = 3*s;
mhr(r1:r2,c1:c2) = 1; %the central part
sigma = para.ehrfKernelWidth;
kernel = Sigma2Kernel(sigma);
if para.bEnablemhrf
mhrf = imfilter(mhr,kernel,'replicate');
else
mhrf = mhr;
end
noHmap = scanra == 0;
noHmapToFill = noHmap;
NHOOD = [0 1 0;
1 1 1;
0 1 0];
se = strel('arbitrary',NHOOD);
noHmapneighbor = and( imdilate(noHmap,se) ,~noHmap);
%if the noHmapsever is 0, it is fine
imb = imresize(img_y,s); %use it as the reference if no F is available
rsa = [0 -1 0 1];
csa = [1 0 -1 0];
for rl= 1:lh-ps+1 %75
fprintf('rl:%d total:%d\n',rl,lh-ps+1);
rh = (rl-1)*s+1;
rh1 = rh+psh-1;
for cl = 1:lw-ps+1 %128
ch = (cl-1)*s+1;
ch1 = ch+psh-1;
%load candidates
hin = para.NumberOfHCandidate;
H = zeros(psh,psh,hin);
HSim = zeros(hin,1);
for j=1:hin
H(:,:,j) = hrpatch(:,:,rl,cl,j); %H
HSim(j) = scanr(6,j,rl,cl);
end
%compute the number of reference patches
sspin = min(SSnumberUpperbound,scanra_self(rl,cl));
%self similar patch instance number
F = zeros(ps,ps,sspin);
FSimPure = zeros(1,sspin);
rin = 0;
for i=1:sspin
sr = scanr_self(3,i,rl,cl);
sc = scanr_self(4,i,rl,cl);
%hr candidate number
rin = rin + para.NumberOfHCandidate;
F(:,:,i) = img_y(sr:sr+ps-1,sc:sc+ps-1);
FSimPure(i) = scanr_self(5,i,rl,cl);
end
%load all of the two step patches
R = zeros(psh,psh,rin);
mms = zeros(rin,1);
mmr = zeros(rin,1);
qs = zeros(rin,1);
qr = zeros(rin,1);
FSimBaseR = zeros(rin,1);
RSim = zeros(rin,1);
idx = 0;
if sspin > 0
for i=1:sspin %sspin is the Fin
sr = scanr_self(3,i,rl,cl);
sc = scanr_self(4,i,rl,cl);
%hr candidate number
hrcanin = para.NumberOfHCandidate;
for j=1:hrcanin
idx = idx + 1;
R(:,:,idx) = hrpatch(:,:,sr,sc,j);
mms(idx) = scanr_self(1,i,rl,cl);
qs(idx) = scanr_self(2,i,rl,cl);
mmr(idx) = scanr(1,j,sr,sc);
qr(idx) = scanr(2,j,sr,sc);
FSimBaseR(idx) = FSimPure(i);
RSim(idx) = scanr(6,j,sr,sc);
end
end
else
idx = 1;
rin = 1; %use bicubic
R(:,:,idx) = imb(rh:rh1,ch:ch1);
FSimBaseR(idx) = 1;FSimPure(i);
end
%here is a question, how to define the similarity between H and R?
%L2norm?
hscore = zeros(hin,1);
for i=1:hin
theH = H(:,:,i);
for j=1:rin
theR = R(:,:,j);
spf = FSimBaseR(j);
%similarity between H and R
diff = theH - theR;
L2N = norm(diff(:));
shr = exp(- L2N/pshs);
hscore(i) = hscore(i) + shr*spf;
end
end
[maxscore idx] = max(hscore);
%take this as the example
Reliablemap(rh:rh1,ch:ch1) = Reliablemap(rh:rh1,ch:ch1) + HSim(idx)*mhrf;
if hin > 0 %some patches can't find H
hrres_nomi(rh:rh1,ch:ch1) = hrres_nomi(rh:rh1,ch:ch1) + H(:,:,idx).*mhrf;
hrres_deno(rh:rh1,ch:ch1) = hrres_deno(rh:rh1,ch:ch1) + mhrf;
end
%if any of its neighbor belongs to noHmap, copy additional region to hrres
%if the pixel belongs to noHmapneighbor, then expand the copy regions
if noHmapneighbor(rl,cl) == true
mhrfspecial = zeros(5*s);
mhrfspecial(r1:r2,c1:c2) = 1;
for i=1:4
rs = rsa(i);
cs = csa(i);
checkr = rl+rs;
checkc = cl+cs;
if checkr > 0 && checkr < lh-ps+1 && checkc >0 && checkc <lw-ps+1 && noHmapToFill(checkr,checkc)
%recompute the mhrf and disable the noHmapToFill
noHmapToFill(checkr,checkc) = false;
switch i
case 1
mhrfspecial(r1:r2,c1+s:c2+s) = 1;
case 2
mhrfspecial(r1-s:r2-s,c1:c2) = 1;
case 3
mhrfspecial(r1:r2,c1-s:c2-s) = 1;
case 4
mhrfspecial(r1+s:r2+s,c1:c2) = 1;
end
end
end
mhrfspeical = imfilter(mhrfspecial,kernel,'replicate');
hrres_nomi(rh:rh1,ch:ch1) = hrres_nomi(rh:rh1,ch:ch1) + H(:,:,idx).*mhrfspeical;
hrres_deno(rh:rh1,ch:ch1) = hrres_deno(rh:rh1,ch:ch1) + mhrfspeical;
end
end
end
hrres = hrres_nomi ./hrres_deno;
exception = isnan(hrres);
hrres_filtered = hrres;
hrres_filtered(exception) = 0;
img_texture = (hrres_filtered .* (1-exception) + imb .*exception);
end
function [scanr_self scanra_self] = F22_SearchForSelfSimilarPatchesL2Norm(img_y,para)
ps = para.ps;
patcharea = ps^2;
[lh lw] = size(img_y);
%Find self similar patches
Fpatchnumber = 10;
scanr_self = zeros(5,Fpatchnumber,lh-ps+1,lw-ps+1); %scan results: mm, quan, r,c, similarity
scanra_self = Fpatchnumber * ones(lh-ps+1,lw-ps+1); %scan results active
in = (lh-ps+1)*(lw-ps+1);
fs = zeros(patcharea,in);
rec = zeros(2,in);
idx = 0;
for rl=1:lh-ps+1
for cl=1:lw-ps+1
idx = idx + 1;
rec(:,idx) = [rl;cl];
fs(:,idx) = reshape(img_y(rl:rl+ps-1,cl:cl+ps-1),patcharea,1);
end
end
%search
idx = 0;
for rl=1:lh-ps+1
for cl=1:lw-ps+1
idx = idx + 1;
fprintf('idx %d in %d\n',idx,in);
qf = fs(:,idx);
diff = fs - repmat(qf,1,in);
sqr = sum(diff.^2);
[ssqr ix] = sort(sqr);
saveidx = 0;
for j=1:11
indexinsort = ix(j);
sr = rec(1,indexinsort);
sc = rec(2,indexinsort);
if sr ~= rl || sc ~= cl
saveidx = saveidx + 1;
l2norm = sqrt(ssqr(j));
similarity = exp(-l2norm/25);
scanr_self(:,saveidx,rl,cl) = cat(1,-1,-1,sr,sc,similarity);
end
end
end
end
end
function [img_edge ProbOfEdge] = F1_EdgePreserving(img_y,para,zooming,Gau_sigma)
para.LowMagSuppression = 0;
para.DistanceUpperBound = 2.0;
para.ContrastEnhenceCoef = 1.0;
I_s = SmoothnessPreservingFunction(img_y,para,zooming);
T = ComputeSRSSD(I_s);
Dissimilarity = EvaluateDissimilarity8(I_s);
Grad_high_initial = Img2Grad(I_s);
%SaveFolder = para.tuningfolder;
[h w] = size(T);
StatisticsFolder = fullfile('EdgePriors');
LoadFileName = sprintf('Statistics_Sc%d_Si%0.1f.mat',zooming,Gau_sigma);
LoadData = load(fullfile(StatisticsFolder,LoadFileName));
Statistics = LoadData.Statistics;
RidgeMap = edge(I_s,'canny',[0 0.01],0.05);
%filter out small ridge and non-maximun ridges
RidgeMap_filtered = RidgeMap;
[r_set c_set] = find(RidgeMap);
SetLength = length(r_set);
for j=1:SetLength
r = r_set(j);
c = c_set(j);
CenterMagValue = T(r,c);
if CenterMagValue < para.LowMagSuppression
RidgeMap_filtered(r,c) = false;
end
end
[r_set c_set] = find(RidgeMap_filtered);
SetLength = length(r_set);
[X Y] = meshgrid(1:11,1:11);
DistPatch = sqrt((X-6).^2 + (Y-6).^2);
DistMap = inf(h,w);
UsedPixel = false(h,w);
CenterCoor = zeros(h,w,2);
%Compute DistMap and CneterCoor
[r_set c_set] = find(RidgeMap_filtered);
for j=1:SetLength
r = r_set(j);
r1 = r-5;
r2 = r+5;
c = c_set(j);
c1 = c-5;
c2 = c+5;
if r1>=1 && r2<=h && c1>=1 && c2<=w %discrad boundary?
MapPatch = DistMap(r1:r2,c1:c2);
MinPatch = min(MapPatch, DistPatch);
DistMap(r1:r2,c1:c2) = MinPatch;
UsedPixel(r1:r2,c1:c2) = true;
ChangedPixels = MinPatch < MapPatch;
OriginalCenterCoorPatch = CenterCoor(r1:r2,c1:c2,:);
NewCoor = cat(3,r*ones(11), c*ones(11));
NewCenterCoorPatch = OriginalCenterCoorPatch .* repmat(1-ChangedPixels,[1,1,2]) + NewCoor .* repmat(ChangedPixels,[1,1,2]);
CenterCoor(r1:r2,c1:c2,:) = NewCenterCoorPatch;
end
end
%Convert dist to table index
TableIndexMap = zeros(h,w);
b = unique(DistPatch(:));
for i=1:length(b)
SetPixels = DistMap == b(i);
TableIndexMap(SetPixels) = i;
end
%mapping (T_p, T_r, d) to S_p
[r_set c_set] = find(UsedPixel);
SetLength = length(r_set);
UpdatedPixel = false(h,w);
S = zeros(h,w);
for i=1:SetLength
r = r_set(i);
c = c_set(i);
r_Center = CenterCoor(r,c,1);
c_Center = CenterCoor(r,c,2);
CurrentMagValue = T(r,c);
BinIdx_Current = ceil(CurrentMagValue /0.005);
%Zebra have super strong Mag
if BinIdx_Current > 100
BinIdx_Current = 100;
end
TableIndex = TableIndexMap(r,c);
if TableIndex > para.DistanceUpperBound
continue
end
CenterMagValue = T(r_Center,c_Center);
%Low Mag Edge suppresion
if CenterMagValue < para.LowMagSuppression
continue
end
BinIdx_Center = ceil(CenterMagValue /0.005);
if BinIdx_Center > 100
BinIdx_Center = 100;
end
%consult the table
if TableIndex == 1 %1 is the index of b(1) where dist = 0, enhance the contrast of pixel on edge
S_p = para.ContrastEnhenceCoef * Statistics(TableIndex).EstimatedMag(BinIdx_Current,BinIdx_Center);
else
S_p = Statistics(TableIndex).EstimatedMag(BinIdx_Current,BinIdx_Center);
end
if isnan(S_p)
else
UpdatedPixel(r,c) = true;
S(r,c) = S_p;
end
end
%Record the RidgeMapMap, for computing te ProbOfMag
%the Mag is the consulted Mag
%here is the problem, when the S is very strong, the affect range of ProbMagOut exceeds 1 pixel
RidgeMapMagValue = zeros(h,w);
for i=1:SetLength
r = r_set(i);
c = c_set(i);
r_Center = CenterCoor(r,c,1);
c_Center = CenterCoor(r,c,2);
RidgeMapMagValue(r,c) = S(r_Center,c_Center);
end
S(~UpdatedPixel) = T(~UpdatedPixel);
img_in = I_s;
if min(Dissimilarity(:)) == 0
d = Dissimilarity + 1e-6; %avoid 0 case; some images may have d(:,:,1) as 0
else
d = Dissimilarity;
end
ratio = d ./ repmat(d(:,:,1),[1,1,8]);
%here is the problem, I need to amplify the gradient directionally
Grad_in = Img2Grad(img_in);
Product = Grad_in .* ratio;
Sqr = Product.^2;
Sum = sum(Sqr,3);
Sqrt = sqrt(Sum); %the Sqrt might be 0, because Grad_in may be pure 0;
r1 = S ./Sqrt;
r1(isnan(r1)) = 0;
Grad_exp = Grad_high_initial .*( ratio .*(repmat(r1,[1,1,8])));
%consolidate inconsistatnt gradient
NewGrad_exp = zeros(h,w,8);
for k=1:4
switch k
case 1
ShiftOp = [0 -1];
case 2
ShiftOp = [1 -1];
case 3
ShiftOp = [1 0];
case 4
ShiftOp = [1 1];
end
k2 =k+4;
Grad1 = Grad_exp(:,:,k);
Grad2 = Grad_exp(:,:,k2);
Grad2Shift = circshift(Grad2,ShiftOp);
Grad1Abs = abs(Grad1);
Grad2AbsShift = abs(Grad2Shift);
Grad1Larger = Grad1Abs > Grad2AbsShift;
Grad2Larger = Grad2AbsShift > Grad1Abs;
NewGrad1 = Grad1 .* Grad1Larger + (-Grad2Shift) .* Grad2Larger;
NewGrad2Shift = Grad2Shift .* Grad2Larger + (-Grad1) .* Grad1Larger;
NewGrad2 = circshift(NewGrad2Shift,-ShiftOp);
NewGrad_exp(:,:,k) = NewGrad1;
NewGrad_exp(:,:,k2) = NewGrad2;
end
%current problem is the over-enhanced gradient (NewMagExp too large)
para.bReport = true;
img_edge = GenerateIntensityFromGradient(img_y,img_in,NewGrad_exp,para,zooming,Gau_sigma);
%compute the Map of edge weight
lambda_m = 2;
m0 = 0;
ProbMagOut = lambda_m * RidgeMapMagValue + m0;
lambda_d = 0.25;
d0 = 0.25;
ProbDistMap = exp(- (lambda_d * DistMap + d0) ); %this coef should be decied by zooming
Product = ProbMagOut .* ProbDistMap;
ProbOfEdge = min(Product,1); %the two terms are not sufficient, direction is not taken into considertion
para.bDumpInformation = false;
if para.bDumpInformation
scc(ProbOfEdge);
title('Edge Weight Map');
hfig = gcf;
fn = sprintf('%s_%s_%d_%d_EdgeWeightMap.png',para.SaveName,para.Legend,para.setting,para.tuning);
saveas(hfig,fullfile(para.tuningfolder,fn));
close(hfig)
scc(ProbMagOut,[0 1]);
hFig = gcf;
title('$b_1 s_r + b_0$, sharpness term','interpreter','latex');
axis off
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_Weight_SharpnessTerm.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_Weight_SharpnessTerm.fig']));
close(hFig);
scc(ProbDistMap,[0 1]);
hFig = gcf;
title('$e^{a_1 d+a_0}$, distance term','interpreter','latex');
axis off
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_Weight_DistanceTerm.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_Weight_DistanceTerm.fig']));
close(hFig);
scc(ProbOfEdge,[0 1]);
hFig = gcf;
title(''); %remove title, make it blank
axis off
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_W_e.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_W_e.fig']));
close(hFig);
scc(RidgeMap,'g');
hFig = gcf;
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_R_WithFrame.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_R_WithFrame.fig']));
close(hFig);
RidgeMap_filtered_inverted = 1-RidgeMap_filtered;
scc(RidgeMap_filtered_inverted,'g');
colorbar off
hFig = gcf;
title('$R$','interpreter','latex');
saveas(hFig,fullfile(SaveFolder,[para.SaveName 'RidgeMap_WithFrame.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName 'RidgeMap_WithFrame.fig']));
close(hFig);
imwrite(1-RidgeMap_filtered,fullfile(para.tuningfolder,[para.SaveName '_R.png']));
MaxS = max(S(:));
scc(T,[0 MaxS]);
hFig = gcf;
%title('$M^*$, Mangnitude of gradient of $I^*$','interpreter','latex');
title('');
axis off
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_T.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_T.fig']));
close(hFig);
scc(S,[0 MaxS]);
%title('$M''$, Predicted mangnitude of gradient','interpreter','latex');
title('');
hFig = gcf;
axis off
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_S.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_S.fig']));
close(hFig);
imwrite(I_s,fullfile(SaveFolder, [para.SaveName '_I_s.png']));
MagOut = ComputeSRSSD(img_edge);
scc(MagOut,[0 0.9]);
hFig = gcf;
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_I_e_WithFrame.png']));
saveas(hFig,fullfile(SaveFolder,[para.SaveName '_I_e_WithFrame.fig']));
close(hFig);
imwrite(img_edge,fullfile(SaveFolder, [para.SaveName '_I_e.png']));
% scc(img_edge, 'g',[0 1]);
% hFig = gcf;
% saveas(hFig,fullfile(SaveFolder,'img_edge.fig'));
% imwrite(img_edge,fullfile(SaveFolder,'img_edge.png'));
% close(hFig);
end
end
function img_out = SmoothnessPreservingFunction(img_y,para,zooming)
img_bb = imresize(img_y,zooming);
Kernel = Sigma2Kernel(para.Gau_sigma);
%compute the similarity from low
Coef = 10;
PatchSize = 3;
Sqrt_low = SimilarityEvaluation(img_y,PatchSize);
Similarity_low = exp(-Sqrt_low*Coef);
[h_high w_high] = size(img_bb);
ExpectedSimilarity = zeros(h_high,w_high,16);
%upsamplin the similarity
for dir=1:16
ExpectedSimilarity(:,:,dir) = imresize(Similarity_low(:,:,dir),zooming,'bilinear');
end
%refind the Grad_high by Similarity_high
LoopNumber = 10;
img = img_bb;
for loop = 1:LoopNumber
%refine gradient by ExpectedSimilarity
ValueSum = zeros(h_high,w_high);
WeightSum = sum(ExpectedSimilarity,3); %if thw weight sum is low, it is unsuitable to generate the grad by interpolation
for dir = 1:16
[MoveOp N] = GetMoveKernel16(dir);
if N == 1
MovedData = imfilter(img,MoveOp{1},'replicate');
else %N ==2
MovedData1 = imfilter(img,MoveOp{1},'replicate');
MovedData2 = imfilter(img,MoveOp{2},'replicate');
MovedData = (MovedData1 + MovedData2)/2;
end
Product = MovedData .* ExpectedSimilarity(:,:,dir);
ValueSum = ValueSum + Product;
end
I = ValueSum ./ WeightSum;
%intensity compensate
Diff = imresize(imfilter(I,Kernel,'replicate'),1/zooming, 'nearest') - img_y;
UpSampled = imresize(Diff,zooming,'bilinear');
Grad0 = imfilter(UpSampled,Kernel,'replicate');
Term_LowHigh_in = ComputeFunctionValue_lowhigh(I,img_y,para.Gau_sigma);
I_in = I; %make a copy, restore the value if all beta fails
bDecrease = false;
tau = 0.2;
for line_search_loop=1:10
%line search for the beta, fixed 1/32 is not a good choice
I = I_in - tau * Grad0;
Term_LowHigh_out = ComputeFunctionValue_lowhigh(I,img_y,para.Gau_sigma);
if Term_LowHigh_out < Term_LowHigh_in
bDecrease = true;
break;
else
tau = tau * 0.5;
end
end
if bDecrease == true
I_best = I;
else
break;
end
% fprintf('loop=%d, LowHihg_in=%0.1f, LowHigh_out=%0.1f,\n',loop,Term_LowHigh_in,Term_LowHigh_out);
% imwrite(I,fullfile(SaveFolder, [num2str(loop) '_GenIntenFromGrad.png']));
img = I_best;
end
img_out = img;
end
function SqrtData = SimilarityEvaluation(Img_in,PatchSize)
HalfPatchSize = (PatchSize-1)/2;
[h w] = size(Img_in);
SqrtData = zeros(h,w,16);
f3x3 = ones(3);
for i = 1:16
[DiffOp N] = RetGradientKernel16(i);
if N == 1
Diff = imfilter(Img_in,DiffOp{1},'symmetric');
else
Diff1 = imfilter(Img_in,DiffOp{1},'symmetric');
Diff2 = imfilter(Img_in,DiffOp{2},'symmetric');
Diff = (Diff1+Diff2)/2;
end
Sqr = Diff.^2;
Sum = imfilter(Sqr,f3x3,'replicate');
Mean = Sum/9;
SqrtData(:,:,i) = sqrt(Mean);
end
end
function [DiffOp N] = RetGradientKernel16(dir)
DiffOp = cell(2,1);
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
switch dir
case 1
N = 1;
DiffOp{1} = f{1};
DiffOp{2} = [];
case 2
N = 2;
DiffOp{1} = f{1};
DiffOp{2} = f{2};
case 3
N = 1;
DiffOp{1} = f{2};
DiffOp{2} = [];
case 4
N = 2;
DiffOp{1} = f{2};
DiffOp{2} = f{3};
case 5
N = 1;
DiffOp{1} = f{3};
DiffOp{2} = [];
case 6
N = 2;
DiffOp{1} = f{3};
DiffOp{2} = f{4};
case 7
N = 1;
DiffOp{1} = f{4};
DiffOp{2} = [];
case 8
N = 2;
DiffOp{1} = f{4};
DiffOp{2} = f{5};
case 9
N = 1;
DiffOp{1} = f{5};
DiffOp{2} = [];
case 10
N = 2;
DiffOp{1} = f{5};
DiffOp{2} = f{6};
case 11
DiffOp{1} = f{6};
DiffOp{2} = [];
N = 1;
case 12
N = 2;
DiffOp{1} = f{6};
DiffOp{2} = f{7};
case 13
N = 1;
DiffOp{1} = f{7};
DiffOp{2} = [];
case 14
N = 2;
DiffOp{1} = f{7};
DiffOp{2} = f{8};
case 15
DiffOp{1} = f{8};
DiffOp{2} = [];
N = 1;
case 16
N = 2;
DiffOp{1} = f{8};
DiffOp{2} = f{1};
end
end
function [Kernel N] = GetMoveKernel16(dir)
Kernel = cell(2,1);
f{1} = [0 0 0;
0 0 1;
0 0 0];
f{2} = [0 0 1;
0 0 0;
0 0 0];
f{3} = [0 1 0;
0 0 0;
0 0 0];
f{4} = [1 0 0;
0 0 0;
0 0 0];
f{5} = [0 0 0;
1 0 0;
0 0 0];
f{6} = [0 0 0;
0 0 0;
1 0 0];
f{7} = [0 0 0;
0 0 0;
0 1 0];
f{8} = [0 0 0;
0 0 0;
0 0 1];
switch dir
case 1
N = 1;
Kernel{1} = f{1};
Kernel{2} = [];
case 2
N = 2;
Kernel{1} = f{1};
Kernel{2} = f{2};
case 3
N = 1;
Kernel{1} = f{2};
Kernel{2} = [];
case 4
N = 2;
Kernel{1} = f{2};
Kernel{2} = f{3};
case 5
N = 1;
Kernel{1} = f{3};
Kernel{2} = [];
case 6
N = 2;
Kernel{1} = f{3};
Kernel{2} = f{4};
case 7
N = 1;
Kernel{1} = f{4};
Kernel{2} = [];
case 8
N = 2;
Kernel{1} = f{4};
Kernel{2} = f{5};
case 9
N = 1;
Kernel{1} = f{5};
Kernel{2} = [];
case 10
N = 2;
Kernel{1} = f{5};
Kernel{2} = f{6};
case 11
Kernel{1} = f{6};
Kernel{2} = [];
N = 1;
case 12
N = 2;
Kernel{1} = f{6};
Kernel{2} = f{7};
case 13
N = 1;
Kernel{1} = f{7};
Kernel{2} = [];
case 14
N = 2;
Kernel{1} = f{7};
Kernel{2} = f{8};
case 15
Kernel{1} = f{8};
Kernel{2} = [];
N = 1;
case 16
N = 2;
Kernel{1} = f{8};
Kernel{2} = f{1};
end
end
function f = ComputeFunctionValue_lowhigh(img,img_low,Gau_sigma)
KernelSize = ceil(Gau_sigma) * 3 + 1;
G = fspecial('gaussian',KernelSize,Gau_sigma);
Conv = imfilter(img,G,'replicate');
SubSample = imresize(Conv,size(img_low),'antialias',false);
Diff = SubSample - img_low;
Sqr = Diff.^2;
f = sum(Sqr(:));
end
function Grad = Img2Grad(img)
[h w] = size(img);
Grad = zeros(h,w,8);
DiffOp = RetGradientKernel();
for i=1:8
Grad(:,:,i) = imfilter(img,DiffOp{i},'replicate');
end
end
function f = RetGradientKernel()
f = cell(8,1);
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
end
function Dissimilarity = EvaluateDissimilarity8(Img_in,PatchSize)
if ~exist('PatchSize','var');
PatchSize = 3;
end
[h w] = size(Img_in);
Dissimilarity = zeros(h,w,8);
f3x3 = ones(PatchSize)/(PatchSize^2);
for i = 1:8
DiffOp = RetGradientKernel8(i);
Diff = imfilter(Img_in,DiffOp,'symmetric');
Sqr = Diff.^2;
Sum = imfilter(Sqr,f3x3,'replicate');
Dissimilarity(:,:,i) = sqrt(Sum);
end
end
function DiffOp = RetGradientKernel8(dir)
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
DiffOp = f{dir};
end
function img_out = GenerateIntensityFromGradient(img_y,img_initial,Grad_exp,para,zooming,Gau_sigma)
if ~isfield(para,'bReport')
para.bReport = false;
end
if ~isfield(para,'LoopNumber')
para.LoopNumber = 30;
end
if ~isfield(para,'beta0')
beta0 = 1;
else
beta0 = para.beta0;
end
if ~isfield(para,'beta1')
beta1 = 1;
else
beta1 = para.beta1;
end
% TempFolder = para.tuningfolder;
% zooming = para.zooming;
%create dir
% if isfield(para,'tuning')
% SaveFolder = para.tuningfolder;
% else
% SaveFolder = fullfile(TempFolder,'OptimizationProgress');
% end
% if ~exist(SaveFolder,'dir')
% mkdir( SaveFolder );
% end
Kernel = Sigma2Kernel(Gau_sigma);
%compute gradient
I = img_initial;
I_best = I;
for loop = 1:para.LoopNumber
%refine image by patch similarity
%refine image by low-high intensity
Diff = imresize(imfilter(I,Kernel,'replicate'),1/zooming, 'nearest') - img_y;
UpSampled = imresize(Diff,zooming,'bilinear');
Grad0 = imfilter(UpSampled,Kernel,'replicate');
%refine image by expected gradeint
%Gradient decent
%I = ModifyByGradient(I,Grad_exp);
OptDir = Grad_exp - Img2Grad(I);
Grad1 = sum(OptDir,3);
Grad_all = beta0 * Grad0 + beta1 * Grad1;
I_in = I; %make a copy, restore the value if all beta fails
bDecrease = false;
tau = 0.2;
Term_Grad_in = ComputeFunctionValue_Grad(I,Grad_exp);
Term_LowHigh_in = ComputeFunctionValue_lowhigh(I,img_y,para.Gau_sigma);
Term_all_in = Term_LowHigh_in * beta0 + Term_Grad_in * beta1;
for line_search_loop=1:10
%line search for the beta, fixed 1/32 is not a good choice
I = I_in - tau * Grad_all;
Term_Grad_out = ComputeFunctionValue_Grad(I,Grad_exp);
Term_LowHigh_out = ComputeFunctionValue_lowhigh(I,img_y,para.Gau_sigma);
Term_all_out = Term_LowHigh_out * beta0 + Term_Grad_out * beta1;
if Term_all_out < Term_all_in
bDecrease = true;
break;
else
tau = tau * 0.5;
end
end
if bDecrease == true
I_best = I;
else
break;
end
if para.bReport
fprintf(['loop=%d, all_in=%0.1f, all_out=%0.1f, LowHihg_in=%0.1f, LowHigh_out=%0.1f, ' ...
'Grad_in=%0.1f, Grad_out=%0.1f\n'],loop,Term_all_in,Term_all_out,Term_LowHigh_in,Term_LowHigh_out, ...
Term_Grad_in,Term_Grad_out);
end
% imwrite(I,fullfile(SaveFolder, [num2str(loop) '_GenIntenFromGrad.png']));
end
img_out = I_best;
end
function f = ComputeFunctionValue_Grad(img, Grad_exp)
Grad = Img2Grad(img);
Diff = Grad - Grad_exp;
Sqrt = Diff .^2;
f = sqrt(sum(Sqrt(:)));
end
|
github
|
Liusifei/Face-Hallucination-master
|
U2_ReturnTheLargestToDoNumber.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U2_ReturnTheLargestToDoNumber.m
| 707 |
utf_8
|
f1e7aae84f58dba2883b935de28df52c
|
%Chih-Yuan Yang
%09/29/12
%To parallel run Glasner's algorithm
%Change name from U25 to U2
function fileidx = U2_ReturnTheLargestToDoNumber(fn_symphony,iistart)
fileidx = -1; %default, if
fid = fopen(fn_symphony,'r+');
C = textscan(fid,'%05d %s %d\n');
iiend = length(C{1,3});
bwriteback = false;
for i=iistart:iiend
if C{1,3}(i) == 0
fileidx = i;
C{1,3}(i) = 1;
bwriteback = true;
break;
end
end
if bwriteback
fseek(fid,0,'bof'); %move to beginning
for i=1:iiend
fprintf(fid,'%05d %s %d\n',C{1,1}(i),C{1,2}{i},C{1,3}(i));
end
end
fclose(fid);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F19c_GenerateLRImage_GaussianKernel.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F19c_GenerateLRImage_GaussianKernel.m
| 1,852 |
utf_8
|
867da30d81dc53d3a29ad3042d25141d
|
%Chih-Yuan Yang
%03/15/13
%Change the method of subsampling
%F19b, add the mode of scaling as 2
%F19c, add the mode of scaling as 8
function lrimg = F19c_GenerateLRImage_GaussianKernel(hrimg,s,sigma)
if isa(hrimg,'uint8')
hrimg = im2double(hrimg);
end
[h, w, d] = size(hrimg);
htrim = h-mod(h,s);
wtrim = w-mod(w,s);
imtrim = hrimg(1:htrim,1:wtrim,1:d);
h_lr = htrim/s;
w_lr = wtrim/s;
%detect image type
if mod(s,2) == 1
kernelsize = ceil(sigma * 3)*2+1; %the kernel size is odd
kernel = fspecial('gaussian',kernelsize,sigma);
if d == 1
blurimg = imfilter(imtrim,kernel,'replicate');
elseif d == 3
blurimg = zeros(htrim,wtrim,d);
for i=1:3
blurimg(:,:,i) = imfilter(imtrim(:,:,i),kernel,'replicate');
end
end
lrimg = imresize(blurimg,1/s,'nearest');
elseif mod(s,2) == 0 %s is even
sampleshift = s/2;
kernelsize = ceil(sigma*3)*2+2; %the kernel size is even
kernel = fspecial('gaussian',kernelsize,sigma); %kernel is always a symmetric matrix
blurimg = imfilter(imtrim,kernel,'replicate');
lrimg = zeros(h_lr,w_lr,d);
for didx = 1:d
for rl=1:h_lr
r_hr_sample = (rl-1)*s+sampleshift; %the shift is the key issue, because the effect of imfilter using a kernel
%shapened in even number width is equivalent to a 0.5 pixel shift in the
%original image
for cl = 1:w_lr
c_hr_sample = (cl-1)*s+sampleshift;
lrimg(rl,cl,didx) = blurimg(r_hr_sample,c_hr_sample,didx);
end
end
end
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F37g_GetTexturePatchMatch_Aligned.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F37g_GetTexturePatchMatch_Aligned.m
| 13,512 |
utf_8
|
daf2019f43ec02e60280f1cc83622ce9
|
%Chih-Yuan Yang
%09/08/13
%Use patchmatch to retrieve a texture background
%F37g: There is parallel optimization toolbox problem so that I have to
%temporarily change the code where all parfor has to be removed.
function [gradients_texture, img_texture, img_texture_backprojection] = F37g_GetTexturePatchMatch_Aligned(img_y, ...
hrexampleimages, lrexampleimages, landmarks_test, rawexamplelandmarks)
%parameter
numberofHcandidate = 10;
%start
[h_lr, w_lr, exampleimagenumber] = size(lrexampleimages);
[h_hr, w_hr, ~] = size(hrexampleimages);
zooming = h_hr/h_lr;
if zooming == 4
Gau_sigma = 1.6;
elseif zooming == 3
Gau_sigma = 1.2;
end
alignedexampleimage_hr = zeros(h_hr,w_hr,exampleimagenumber,'uint8'); %set as uint8 to reduce memory demand
alignedexampleimage_lr = zeros(h_lr,w_lr,exampleimagenumber);
disp('align images');
set = 28:48; %eyes and nose
basepoints = landmarks_test(set,:);
inputpoints = rawexamplelandmarks(set,:,:);
for k=1:exampleimagenumber
alignedexampleimage_hr(:,:,k) = F18_AlignExampleImageByLandmarkSet(hrexampleimages(:,:,k),inputpoints(:,:,k),basepoints);
%F19 automatically convert uint8 input to double
alignedexampleimage_lr(:,:,k) = F19a_GenerateLRImage_GaussianKernel(alignedexampleimage_hr(:,:,k),zooming,Gau_sigma);
end
cores = 2; % Use more cores for more speed
if cores==1
algo = 'cpu';
else
algo = 'cputiled';
end
patchsize_lr = 5;
nn_iters = 5;
A = repmat(img_y,[1 1 3]);
testnumber = exampleimagenumber;
xyandl2norm = zeros(h_lr,w_lr,3,testnumber,'int32');
disp('patchmatching');
for i=1:testnumber;
%run patchmatch
B = repmat(alignedexampleimage_lr(:,:,i),[1 1 3]);
xyandl2norm(:,:,:,i) = nnmex(A, B, algo, patchsize_lr, nn_iters, [], [], [], [], cores); %the return totalpatchnumber int32
end
l2norm_double = double(xyandl2norm(:,:,3,:));
[sortedl2norm, ix] = sort(l2norm_double,4);
hrpatchextractdata = zeros(h_lr-patchsize_lr+1,w_lr-patchsize_lr+1,numberofHcandidate,3); %ii,r_lr_src,c_lr_src
%here
hrpatchsimilarity = zeros(h_lr-patchsize_lr+1,w_lr-patchsize_lr+1,numberofHcandidate);
parameter_l2normtosimilarity = 625;
for rl = 1:h_lr-patchsize_lr+1
for cl = 1:w_lr-patchsize_lr+1
for k=1:numberofHcandidate
knnidx = ix(rl,cl,1,k);
x = xyandl2norm(rl,cl,1,knnidx); %start from 0
y = xyandl2norm(rl,cl,2,knnidx);
clsource = x+1;
rlsource = y+1;
hrpatchextractdata(rl,cl,k,:) = reshape([knnidx rlsource clsource],[1 1 1 3]);
hrpatchsimilarity(rl,cl,k) = exp(-sortedl2norm(rl,cl,1,knnidx)/parameter_l2normtosimilarity);
end
end
end
hrpatch = F39_ExtractAllHrPatches(patchsize_lr,zooming, hrpatchextractdata,alignedexampleimage_hr);
hrpatch = F40_CompensateHRpatches(hrpatch, img_y, zooming, hrpatchextractdata,alignedexampleimage_lr);
%mostsimilarinputpatchrecord = IF2_SearchForSelfSimilarPatchesL2Norm(img_y,patchsize_lr);
%hrpatch_filtered = IF3_SimilarityFilter(hrpatch,hrpatchsimilarity,mostsimilarinputpatchrecord);
%img_texture = IF4_BuildHRimagefromHRPatches(hrpatch_filtered,zooming);
img_texture = IF4_BuildHRimagefromHRPatches(hrpatch,zooming);
iternum = 1000;
Tolf = 0.0001;
breport = false;
disp('backprojection for img_texture');
img_texture_backprojection = F11d_BackProjection_GaussianKernel(img_y, img_texture, Gau_sigma, iternum,breport,Tolf);
%extract the graident
gradients_texture = F14_Img2Grad(img_texture_backprojection);
end
function scanresult = IF2_SearchForSelfSimilarPatchesL2Norm(img_y,patchsize_lr)
%out:
%scanresult: 3 x numberofFcandidate x (h_lr-patchsize+1) x (w_lr-patchsize+1)
patcharea = patchsize_lr^2;
[lh lw] = size(img_y);
%Find self similar patches
numberofFcandidate = 10;
scanresult = zeros(3,numberofFcandidate,lh-patchsize_lr+1,lw-patchsize_lr+1); %scan results: r,c, similarity
totalpatchnumber = (lh-patchsize_lr+1)*(lw-patchsize_lr+1);
featurematrix = zeros(patcharea,totalpatchnumber);
rec = zeros(2,totalpatchnumber);
idx = 0;
for rl=1:lh-patchsize_lr+1
rl1 = rl+patchsize_lr-1;
for cl=1:lw-patchsize_lr+1
cl1 = cl+patchsize_lr-1;
idx = idx + 1;
rec(:,idx) = [rl;cl];
featurematrix(:,idx) = reshape(img_y(rl:rl1,cl:cl1),patcharea,1);
end
end
%search
idx = 0;
for rl=1:lh-patchsize_lr+1
for cl=1:lw-patchsize_lr+1
idx = idx + 1;
fprintf('idx %d totalpatchnumber %d\n',idx,totalpatchnumber);
queryfeature = featurematrix(:,idx);
diff = featurematrix - repmat(queryfeature,1,totalpatchnumber);
sqr = sum(diff.^2);
[ssqr ix] = sort(sqr);
saveidx = 0;
for j=1:numberofFcandidate+1 %add one to prevent find itself
indexinsort = ix(j);
sr = rec(1,indexinsort);
sc = rec(2,indexinsort);
%explanation: it is possible that there are 11 lr patches with the same appearance
%and the input one is sorted at item indexed more than 11 so that sr and cl are insufficient
%to prevenet the problem
if sr ~= rl || sc ~= cl
saveidx = saveidx + 1;
if saveidx <= numberofFcandidate
l2norm = sqrt(ssqr(j));
similarity = exp(-l2norm/25);
scanresult(1:3,saveidx,rl,cl) = [sr;sc;similarity];
end
end
end
end
end
end
function hrpatch_filtered = IF3_SimilarityFilter(hrpatch,hrpatchsimilarity,mostsimilarinputpatches)
%totalpatchnumber
%hrpatch: patchsize_hr x patchsize_hr x (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1) x numberofHcandidate
%hrpatchsimilarity: (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1) x numberofHcandidate
%mostsimilarinputpatches: 3 x numberofFcandidate x (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1)
%out
%hrpatch_filtered: patchsize_hr x patchsize_hr x (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1)
zooming = 4;
patchsize_hr = size(hrpatch,1);
patchsize_lr = patchsize_hr /zooming;
h_lr = size(hrpatch,3) + patchsize_lr -1;
w_lr = size(hrpatch,4) + patchsize_lr -1;
numberofHcandidate = size(hrpatch,5);
numberofFcandidate = size(mostsimilarinputpatches,2);
%allocate for out
hrpatch_filtered = zeros(patchsize_hr,patchsize_hr,h_lr-patchsize_lr+1,w_lr-patchsize_lr+1);
for rl= 1:h_lr-patchsize_lr+1
fprintf('rl:%d total:%d\n',rl,h_lr-patchsize_lr+1);
for cl = 1:w_lr-patchsize_lr+1
%load candidates
H = zeros(patchsize_hr,patchsize_hr,numberofHcandidate);
similarityHtolrpatch = zeros(numberofHcandidate,1);
for j=1:numberofHcandidate
H(:,:,j) = hrpatch(:,:,rl,cl,j); %H
similarityHtolrpatch(j) = hrpatchsimilarity(rl,cl,j);
end
%self similar patch instance number
similarityFtolrpatch = reshape( mostsimilarinputpatches(3,:,rl,cl) , [numberofFcandidate , 1]);
%load all of the two step patches
R = zeros(patchsize_hr,patchsize_hr,numberofFcandidate,numberofHcandidate);
RSimbasedonF = zeros(numberofFcandidate,numberofHcandidate);
for i=1:numberofFcandidate
sr = mostsimilarinputpatches(1,i,rl,cl);
sc = mostsimilarinputpatches(2,i,rl,cl);
%hr candidate number
for j=1:numberofHcandidate
R(:,:,i,j) = hrpatch(:,:,sr,sc,j);
RSimbasedonF(i,j) = hrpatchsimilarity(sr,sc,j);
end
end
%here is a question, how to define the similarity between H and R?
%L2norm?
hscore = zeros(numberofHcandidate,1);
for i=1:numberofHcandidate
theH = H(:,:,i);
for j=1:numberofFcandidate
for k=1:numberofHcandidate
theR = R(:,:,j,k);
similarityRbasedonF = RSimbasedonF(j,k);
%similarity between H and R
diff = theH - theR;
L2N = norm(diff(:));
similarityRtoH = exp(- L2N/25); %the 25 is a parameter, need to be tuned totalpatchnumber the future
hscore(i) = hscore(i) + similarityHtolrpatch(i) * similarityRbasedonF * similarityRtoH * similarityFtolrpatch(j);
end
end
end
[~, idx] = max(hscore);
hrpatch_filtered(:,:,rl,cl) = hrpatch(:,:,rl,cl,idx(1));
end
end
end
function img_texture = IF4_BuildHRimagefromHRPatches(hrpatch,zooming)
%reconstruct the high-resolution image
patchsize_hr = size(hrpatch,1);
patchsize_lr = patchsize_hr/zooming;
h_lr = size(hrpatch,3) + patchsize_lr - 1;
w_lr = size(hrpatch,4) + patchsize_lr - 1;
h_expected = h_lr * zooming;
w_expected = w_lr * zooming;
img_texture = zeros(h_expected,w_expected);
%most cases
rpixelshift = 2; %this should be modified according to patchsize_lr
cpixelshift = 2;
for rl = 2:h_lr - patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
for cl = 2:w_lr - patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
img_texture(rh:rh1,ch:ch1) = usedhrpatch(9:12,9:12);
end
end
%left
cl = 1;
ch = 1;
ch1 = ch+3*zooming-1;
for rl=2:h_lr-patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%right
cl = w_lr - patchsize_lr+1;
ch = w_expected - 3*zooming+1;
ch1 = w_expected;
for rl=2:h_lr-patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%top
rl = 1;
rh = 1;
rh1 = rh+3*zooming-1;
for cl=2:w_lr-patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%bottom
rl = h_lr-patchsize_lr+1;
rh = h_expected - 3*zooming+1;
rh1 = h_expected;
for cl=2:w_lr-patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%left-top corner
rl=1;
cl=1;
rh = 1;
rh1 = rh+3*zooming-1;
ch = 1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
%right-top corner
rl=1;
cl=w_lr-patchsize_lr+1;
rh = (rl-1)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
%left-bottom corner
rl=h_lr-patchsize_lr+1;
cl=1;
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
%left-bottom corner
rl=h_lr-patchsize_lr+1;
cl=w_lr-patchsize_lr+1;
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
|
github
|
Liusifei/Face-Hallucination-master
|
U11_ExtractEyeRangeFeature.m
|
.m
|
Face-Hallucination-master/Code/Ours2/U11_ExtractEyeRangeFeature.m
| 529 |
utf_8
|
8b0d1120b08d30f10d422b87054bd2bf
|
%Chih-Yuan Yang
%08/31/12
function eyerangefeature = U11_ExtractEyeRangeFeature(eyerange)
[h w] = size(eyerange);
%the feature: gradient
%check type, int8 can not compute feature
if isa(eyerange,'double')
eyerange_double = eyerange;
else
eyerange_double = double(eyerange);
end
dx = eyerange_double(:,2:end) - eyerange_double(:,1:end-1);
dy = eyerange_double(2:end,:) - eyerange_double(1:end-1,:);
eyerangefeature = cat(1,reshape(dx,[h*(w-1) 1]), reshape(dy,[(h-1)*w 1]));
end
|
github
|
Liusifei/Face-Hallucination-master
|
F41_ComputePatchSimilarity.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F41_ComputePatchSimilarity.m
| 483 |
utf_8
|
9a56cadc176880ec786d047551d619eb
|
%Chih-Yuan Yang
%10/12/12
%For nnmex() in terms of discriptor mode
function l2norm = F41_ComputePatchSimilarity(A,B,xy)
[h w d] = size(A);
retrieveddescriptor = zeros(h,w,d);
for r=1:h
for c=1:w
x = xy(r,c,1);
y = xy(r,c,2);
r_source = y+1;
c_source = x+1;
retrieveddescriptor(r,c,:) = B(r_source,c_source,:);
end
end
diff = A-retrieveddescriptor;
l2norm = sqrt(sum(diff.^2,3));
end
|
github
|
Liusifei/Face-Hallucination-master
|
T1_Img2Grad.m
|
.m
|
Face-Hallucination-master/Code/Ours2/T1_Img2Grad.m
| 743 |
utf_8
|
dfeb710d5ccd6e72fc1185c0d2ad270b
|
function Grad = T1_Img2Grad(img)
[h w] = size(img);
Grad = zeros(h,w,8);
DiffOp = RetGradientKernel();
for i=1:8
Grad(:,:,i) = imfilter(img,DiffOp{i},'replicate');
end
end
function f = RetGradientKernel()
f = cell(8,1);
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
end
|
github
|
Liusifei/Face-Hallucination-master
|
F37a_GetTexturePatchMatchSimilarityFilter.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F37a_GetTexturePatchMatchSimilarityFilter.m
| 15,700 |
utf_8
|
a3d7b006dc2ed233234aae66f7da2cbe
|
%Chih-Yuan Yang
%10/05/12
%Use patchmatch to retrieve a texture background
function [gradients_texture img_texture img_texture_backprojection] = F37a_GetTexturePatchMatchSimilarityFilter(img_y, ...
hrexampleimages, lrexampleimages)
%parameter
numberofHcandidate = 10;
%start
[h_lr, w_lr, exampleimagenumber] = size(lrexampleimages);
[h_hr, w_hr, ~] = size(hrexampleimages);
zooming = h_hr/h_lr;
if zooming == 4
Gau_sigma = 1.6;
elseif zooming == 3
Gau_sigma = 1.2;
end
cores = 2; % Use more cores for more speed
if cores==1
algo = 'cpu';
else
algo = 'cputiled';
end
patchsize_lr = 5;
nn_iters = 5;
%A =F38_ExtractFeatureFromAnImage(img_y);
A = repmat(img_y,[1 1 3]);
testnumber = exampleimagenumber;
xyandl2norm = zeros(h_lr,w_lr,3,testnumber,'int32');
disp('patchmatching');
parfor i=1:testnumber;
%run patchmatch
B = repmat(lrexampleimages(:,:,i),[1 1 3]);
xyandl2norm(:,:,:,i) = nnmex(A, B, algo, patchsize_lr, nn_iters, [], [], [], [], cores); %the return totalpatchnumber int32
end
l2norm_double = double(xyandl2norm(:,:,3,:));
[sortedl2norm, ix] = sort(l2norm_double,4);
hrpatchextractdata = zeros(h_lr-patchsize_lr+1,w_lr-patchsize_lr+1,numberofHcandidate,3); %ii,r_lr_src,c_lr_src
%here
hrpatchsimilarity = zeros(h_lr-patchsize_lr+1,w_lr-patchsize_lr+1,numberofHcandidate);
parameter_l2normtosimilarity = 625;
for rl = 1:h_lr-patchsize_lr+1
for cl = 1:w_lr-patchsize_lr+1
for k=1:numberofHcandidate
knnidx = ix(rl,cl,1,k);
x = xyandl2norm(rl,cl,1,knnidx); %start from 0
y = xyandl2norm(rl,cl,2,knnidx);
clsource = x+1;
rlsource = y+1;
hrpatchextractdata(rl,cl,k,:) = reshape([knnidx rlsource clsource],[1 1 1 3]);
hrpatchsimilarity(rl,cl,k) = exp(-sortedl2norm(rl,cl,1,knnidx)/parameter_l2normtosimilarity);
end
end
end
hrpatch = IF1_ExtractAllHrPatches(img_y, patchsize_lr,zooming, hrpatchextractdata,hrexampleimages,lrexampleimages);
mostsimilarinputpatchrecord = IF2_SearchForSelfSimilarPatchesL2Norm(img_y,patchsize_lr);
hrpatch_filtered = IF3_SimilarityFilter(hrpatch,hrpatchsimilarity,mostsimilarinputpatchrecord);
img_texture = IF4_BuildHRimagefromHRPatches(hrpatch_filtered,zooming);
iternum = 1000;
Tolf = 0.0001;
breport = false;
disp('backprojection for img_texture');
img_texture_backprojection = F11d_BackProjection_GaussianKernel(img_y, img_texture, Gau_sigma, iternum,breport,Tolf);
%extract the graident
gradients_texture = F14_Img2Grad(img_texture_backprojection);
end
function scanresult = IF2_SearchForSelfSimilarPatchesL2Norm(img_y,patchsize_lr)
%out:
%scanresult: 3 x numberofFcandidate x (h_lr-patchsize+1) x (w_lr-patchsize+1)
patcharea = patchsize_lr^2;
[lh lw] = size(img_y);
%Find self similar patches
numberofFcandidate = 10;
scanresult = zeros(3,numberofFcandidate,lh-patchsize_lr+1,lw-patchsize_lr+1); %scan results: r,c, similarity
totalpatchnumber = (lh-patchsize_lr+1)*(lw-patchsize_lr+1);
featurematrix = zeros(patcharea,totalpatchnumber);
rec = zeros(2,totalpatchnumber);
idx = 0;
for rl=1:lh-patchsize_lr+1
rl1 = rl+patchsize_lr-1;
for cl=1:lw-patchsize_lr+1
cl1 = cl+patchsize_lr-1;
idx = idx + 1;
rec(:,idx) = [rl;cl];
featurematrix(:,idx) = reshape(img_y(rl:rl1,cl:cl1),patcharea,1);
end
end
%search
idx = 0;
for rl=1:lh-patchsize_lr+1
for cl=1:lw-patchsize_lr+1
idx = idx + 1;
fprintf('idx %d totalpatchnumber %d\n',idx,totalpatchnumber);
queryfeature = featurematrix(:,idx);
diff = featurematrix - repmat(queryfeature,1,totalpatchnumber);
sqr = sum(diff.^2);
[ssqr ix] = sort(sqr);
saveidx = 0;
for j=1:numberofFcandidate+1 %add one to prevent find itself
indexinsort = ix(j);
sr = rec(1,indexinsort);
sc = rec(2,indexinsort);
%explanation: it is possible that there are 11 lr patches with the same appearance
%and the input one is sorted at item indexed more than 11 so that sr and cl are insufficient
%to prevenet the problem
if sr ~= rl || sc ~= cl
saveidx = saveidx + 1;
if saveidx <= numberofFcandidate
l2norm = sqrt(ssqr(j));
similarity = exp(-l2norm/25);
scanresult(1:3,saveidx,rl,cl) = [sr;sc;similarity];
end
end
end
end
end
end
function hrpatch_filtered = IF3_SimilarityFilter(hrpatch,hrpatchsimilarity,mostsimilarinputpatches)
%totalpatchnumber
%hrpatch: patchsize_hr x patchsize_hr x (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1) x numberofHcandidate
%hrpatchsimilarity: (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1) x numberofHcandidate
%mostsimilarinputpatches: 3 x numberofFcandidate x (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1)
%out
%hrpatch_filtered: patchsize_hr x patchsize_hr x (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1)
zooming = 4;
patchsize_hr = size(hrpatch,1);
patchsize_lr = patchsize_hr /zooming;
h_lr = size(hrpatch,3) + patchsize_lr -1;
w_lr = size(hrpatch,4) + patchsize_lr -1;
numberofHcandidate = size(hrpatch,5);
numberofFcandidate = size(mostsimilarinputpatches,2);
%allocate for out
hrpatch_filtered = zeros(patchsize_hr,patchsize_hr,h_lr-patchsize_lr+1,w_lr-patchsize_lr+1);
for rl= 1:h_lr-patchsize_lr+1
fprintf('rl:%d total:%d\n',rl,h_lr-patchsize_lr+1);
for cl = 1:w_lr-patchsize_lr+1
%load candidates
H = zeros(patchsize_hr,patchsize_hr,numberofHcandidate);
similarityHtolrpatch = zeros(numberofHcandidate,1);
for j=1:numberofHcandidate
H(:,:,j) = hrpatch(:,:,rl,cl,j); %H
similarityHtolrpatch(j) = hrpatchsimilarity(rl,cl,j);
end
%self similar patch instance number
similarityFtolrpatch = reshape( mostsimilarinputpatches(3,:,rl,cl) , [numberofFcandidate , 1]);
%load all of the two step patches
R = zeros(patchsize_hr,patchsize_hr,numberofFcandidate,numberofHcandidate);
RSimbasedonF = zeros(numberofFcandidate,numberofHcandidate);
for i=1:numberofFcandidate
sr = mostsimilarinputpatches(1,i,rl,cl);
sc = mostsimilarinputpatches(2,i,rl,cl);
%hr candidate number
for j=1:numberofHcandidate
R(:,:,i,j) = hrpatch(:,:,sr,sc,j);
RSimbasedonF(i,j) = hrpatchsimilarity(sr,sc,j);
end
end
%here is a question, how to define the similarity between H and R?
%L2norm?
hscore = zeros(numberofHcandidate,1);
for i=1:numberofHcandidate
theH = H(:,:,i);
for j=1:numberofFcandidate
for k=1:numberofHcandidate
theR = R(:,:,j,k);
similarityRbasedonF = RSimbasedonF(j,k);
%similarity between H and R
diff = theH - theR;
L2N = norm(diff(:));
similarityRtoH = exp(- L2N/25); %the 25 is a parameter, need to be tuned totalpatchnumber the future
hscore(i) = hscore(i) + similarityHtolrpatch(i) * similarityRbasedonF * similarityRtoH * similarityFtolrpatch(j);
end
end
end
[~, idx] = max(hscore);
hrpatch_filtered(:,:,rl,cl) = hrpatch(:,:,rl,cl,idx(1));
end
end
end
function hrpatch = IF1_ExtractAllHrPatches(img_y, patchsize_lr,zooming,hrpatchextractdata,allHRexampleimages,allLRexampleimages)
%question: if the hrpatch does not need compensate, the input paramters img_y and allLRexampleimages can be ignore
%in:
%hrpatchextractdata: (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1) x numberofHcandidate * 3
%the last 3 dim: ii, r_lr_src, c_lr_src
disp('extracting HR patches');
patchsize_hr = patchsize_lr * zooming;
[h_lr_active, w_lr_active, numberofHcandidate, ~] = size(hrpatchextractdata);
hrpatch = zeros(patchsize_hr,patchsize_hr,h_lr_active,w_lr_active,numberofHcandidate);
%analyize which images need to be loaded
alliiset = hrpatchextractdata(:,:,:,1);
alliiset_uni = unique(alliiset(:));
for i = 1:length(alliiset_uni)
ii = alliiset_uni(i);
fprintf('extracting image %d\n',ii);
exampleimage_hr = im2double(allHRexampleimages(:,:,ii));
exampleimage_lr = allLRexampleimages(:,:,ii);
match_4D = alliiset == ii;
match_3D = reshape(match_4D,h_lr_active,w_lr_active,numberofHcandidate); %remove the last dimension
[rlset clandkset] = find(match_3D);
setsize = length(rlset);
for j = 1:setsize
rl = rlset(j);
clandklinearindex = clandkset(j);
%the relationship clandklindearidx = x_lr_active * (k-1) + cl
k = floor( (clandklinearindex-1)/w_lr_active) +1; %the relationship: possum = (pos3-1) * d2 + pos2, pos2 totalpatchnumber (1,d2)
cl = clandklinearindex - (k-1)*w_lr_active;
sr = hrpatchextractdata(rl,cl,k,2);
sc = hrpatchextractdata(rl,cl,k,3);
srh = (sr-1)*zooming+1;
srh1 = srh + patchsize_hr -1;
sch = (sc-1)*zooming+1;
sch1 = sch + patchsize_hr-1;
%compensate the HR patch to match the LR query patch
hrp = exampleimage_hr(srh:srh1,sch:sch1); %HR patch
%lrq = img_y(rl:rl+patchsize_lr-1,cl:cl+patchsize_lr-1); %LR query patch
%lrr = exampleimage_lr(sr:sr+patchsize_lr-1,sc:sc+patchsize_lr-1); %LR retrieved patch
%the imresize make the process very slow
%chrp = hrp + imresize(lrq - lrr,zooming,'bilinear'); %compensate HR patch
%hrpatch(:,:,rl,cl,k) = chrp;
hrpatch(:,:,rl,cl,k) = hrp;
if 0
bVisuallyCheck = false;
if bVisuallyCheck
if ~exist('hfig','var')
hfig = figure;
else
figure(hfig);
end
subplot(1,4,1);
imshow(hrp/255);
title('hrp');
subplot(1,4,2);
imshow(lrr/255);
title('lrr');
subplot(1,4,3);
imshow(lrq/255);
title('lrq');
subplot(1,4,4);
imshow(chrp/255);
title('chrp');
keyboard
end
end
end
end
end
function img_texture = IF4_BuildHRimagefromHRPatches(hrpatch,zooming)
%reconstruct the high-resolution image
patchsize_hr = size(hrpatch,1);
patchsize_lr = patchsize_hr/zooming;
h_lr = size(hrpatch,3) + patchsize_lr - 1;
w_lr = size(hrpatch,4) + patchsize_lr - 1;
h_expected = h_lr * zooming;
w_expected = w_lr * zooming;
img_texture = zeros(h_expected,w_expected);
%most cases
rpixelshift = 2; %this should be modified according to patchsize_lr
cpixelshift = 2;
for rl = 2:h_lr - patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
for cl = 2:w_lr - patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
img_texture(rh:rh1,ch:ch1) = usedhrpatch(9:12,9:12);
end
end
%left
cl = 1;
ch = 1;
ch1 = ch+3*zooming-1;
for rl=2:h_lr-patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%right
cl = w_lr - patchsize_lr+1;
ch = w_expected - 3*zooming+1;
ch1 = w_expected;
for rl=2:h_lr-patchsize_lr
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%top
rl = 1;
rh = 1;
rh1 = rh+3*zooming-1;
for cl=2:w_lr-patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%bottom
rl = h_lr-patchsize_lr+1;
rh = h_expected - 3*zooming+1;
rh1 = h_expected;
for cl=2:w_lr-patchsize_lr
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
%left-top corner
rl=1;
cl=1;
rh = 1;
rh1 = rh+3*zooming-1;
ch = 1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
%right-top corner
rl=1;
cl=w_lr-patchsize_lr+1;
rh = (rl-1)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 1;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
%left-bottom corner
rl=h_lr-patchsize_lr+1;
cl=1;
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 1;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
%left-bottom corner
rl=h_lr-patchsize_lr+1;
cl=w_lr-patchsize_lr+1;
rh = (rl-1+rpixelshift)*zooming+1;
rh1 = rh+3*zooming-1;
ch = (cl-1+cpixelshift)*zooming+1;
ch1 = ch+3*zooming-1;
usedhrpatch = hrpatch(:,:,rl,cl);
chsource = 9;
ch1source = chsource+3*zooming-1;
rhsource = 9;
rh1source = rhsource+3*zooming-1;
img_texture(rh:rh1,ch:ch1) = usedhrpatch(rhsource:rh1source,chsource:ch1source);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F39_ExtractAllHrPatches.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F39_ExtractAllHrPatches.m
| 2,478 |
utf_8
|
2b17a373baa50a6548520ff944833a50
|
%Chih-Yuan Yang
%10/07/12
%Sepearte internal function as external
function hrpatch = F39_ExtractAllHrPatches(patchsize_lr,zooming,hrpatchextractdata,allHRexampleimages)
%question: if the hrpatch does not need compensate, the input paramters img_y and allLRexampleimages can be ignore
%in:
%hrpatchextractdata: (h_lr-patchsize_lr+1) x (w_lr-patchsize_lr+1) x numberofHcandidate * 3
%the last 3 dim: ii, r_lr_src, c_lr_src
disp('extracting HR patches');
patchsize_hr = patchsize_lr * zooming;
[h_lr_active, w_lr_active, numberofHcandidate, ~] = size(hrpatchextractdata);
hrpatch = zeros(patchsize_hr,patchsize_hr,h_lr_active,w_lr_active,numberofHcandidate);
%analyize which images need to be loaded
alliiset = hrpatchextractdata(:,:,:,1);
alliiset_uni = unique(alliiset(:));
for i = 1:length(alliiset_uni)
ii = alliiset_uni(i);
fprintf('extracting image %d in function F39\n',ii);
exampleimage_hr = im2double(allHRexampleimages(:,:,ii));
match_4D = alliiset == ii;
match_3D = reshape(match_4D,h_lr_active,w_lr_active,numberofHcandidate); %remove the last dimension
[rlset clandkset] = find(match_3D);
setsize = length(rlset);
for j = 1:setsize
rl = rlset(j);
clandklinearindex = clandkset(j);
%the relationship clandklindearidx = x_lr_active * (k-1) + cl
k = floor( (clandklinearindex-1)/w_lr_active) +1; %the relationship: possum = (pos3-1) * d2 + pos2, pos2 totalpatchnumber (1,d2)
cl = clandklinearindex - (k-1)*w_lr_active;
sr = hrpatchextractdata(rl,cl,k,2);
sc = hrpatchextractdata(rl,cl,k,3);
srh = (sr-1)*zooming+1;
srh1 = srh + patchsize_hr -1;
sch = (sc-1)*zooming+1;
sch1 = sch + patchsize_hr-1;
%compensate the HR patch to match the LR query patch
hrp = exampleimage_hr(srh:srh1,sch:sch1); %HR patch
%lrq = img_y(rl:rl+patchsize_lr-1,cl:cl+patchsize_lr-1); %LR query patch
%lrr = exampleimage_lr(sr:sr+patchsize_lr-1,sc:sc+patchsize_lr-1); %LR retrieved patch
%the imresize make the process very slow
%chrp = hrp + imresize(lrq - lrr,zooming,'bilinear'); %compensate HR patch
%hrpatch(:,:,rl,cl,k) = chrp;
hrpatch(:,:,rl,cl,k) = hrp;
end
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
F20_Sigma2Kernel.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F20_Sigma2Kernel.m
| 174 |
utf_8
|
5dd9a9e790fe6562963dad18ce1f632d
|
%Chih-Yuan Yang
%09/20/12
function Kernel = F20_Sigma2Kernel(Gau_sigma)
KernelSize = ceil(Gau_sigma * 3)*2+1;
Kernel = fspecial('gaussian',KernelSize,Gau_sigma);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F6e_RetriveImage_DrawFlowChart.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F6e_RetriveImage_DrawFlowChart.m
| 2,799 |
utf_8
|
90151785621128e11aefa5cdc589c2fe
|
%Chih-Yuan Yang
%6/12/13
%F6d: return the alinged images to draw the flowchart
%F6e: return the aligned landmarks so that I can draw the new figure for PAMI
function [retrievedhrimage, retrievedlrimage, retrievedidx, alignedexampleimage_hr, alignedexampleimage_lr, ...
alignedlandmarks] = ...
F6e_RetriveImage_DrawFlowChart(testimage_lr, ...
rawexampleimage, inputpoints, basepoints, mask_lr, zooming, Gau_sigma, glasslist, bglassavoid)
%the rawexampleimage should be double
if ~isa(rawexampleimage,'uint8')
error('wrong class');
end
[h_hr, w_hr, exampleimagenumber] = size(rawexampleimage);
[h_lr, w_lr] = size(testimage_lr);
%find the transform matrix by solving an optimization problem
alignedexampleimage_hr = zeros(h_hr,w_hr,exampleimagenumber,'uint8'); %set as uint8 to reduce memory demand
alignedexampleimage_lr = zeros(h_lr,w_lr,exampleimagenumber);
arr_alignedlandmarks = cell(exampleimagenumber,1);
parfor i=1:exampleimagenumber
[alignedexampleimage_hr(:,:,i) , arr_alignedlandmarks{i}]= F18b_AlignExampleImageByLandmarkSet(rawexampleimage(:,:,i),inputpoints(:,:,i),basepoints);
%F19 automatically convert uint8 input to double
alignedexampleimage_lr(:,:,i) = F19a_GenerateLRImage_GaussianKernel(alignedexampleimage_hr(:,:,i),zooming,Gau_sigma);
end
[r_set, c_set] = find(mask_lr);
top = min(r_set);
bottom = max(r_set);
left = min(c_set);
right = max(c_set);
area_test = im2double(testimage_lr(top:bottom,left:right));
area_mask = mask_lr(top:bottom,left:right);
area_test_aftermask = area_test .* area_mask;
%extract feature from the eyerange, the features are the gradient of LR eye region
feature_test = F24_ExtractFeatureFromArea(area_test_aftermask); %the unit is double
%search for the thousand example images to find the most similar eyerange
normvalue = zeros(exampleimagenumber,1);
parfor j=1:exampleimagenumber
examplearea_lr = alignedexampleimage_lr(top:bottom,left:right,j);
examplearea_lr_aftermask = examplearea_lr .* area_mask;
feature_example_lr = F24_ExtractFeatureFromArea(examplearea_lr_aftermask); %the unit is double
normvalue(j) = norm(feature_test - feature_example_lr);
end
%find the small norm
[sortnorm, ix] = sort(normvalue);
%some of them are very similar
%only return the 1nn
if bglassavoid
for k=1:exampleimagenumber
if glasslist(ix(k)) == false
break
end
end
else
k =1;
end
retrievedhrimage = alignedexampleimage_hr(:,:,ix(k));
retrievedlrimage = alignedexampleimage_lr(:,:,ix(k));
alignedlandmarks = arr_alignedlandmarks{ix(k)};
retrievedidx = ix(k);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F21f_EdgePreserving_GaussianKernel.m
|
.m
|
Face-Hallucination-master/Code/Ours2/F21f_EdgePreserving_GaussianKernel.m
| 10,261 |
utf_8
|
c49afa98a0822dec5155f604e7daf9f1
|
%Chih-Yuan Yang
%10/27/12
%F21b: Based on F21a, but change the square kernel to Gaussian, to see whether the square pattern disappear
%F21c: remove the para argument
%F21d: try to use large beta0 and small beta1 to see whether it can save the computational time
%F21e: return gradient_expect to save time
%F21f: change the function F27 to F27a used in this function
function [gradient_expected, weightmap_edge] = F21f_EdgePreserving_GaussianKernel(img_y,zooming,Gau_sigma)
LowMagSuppression = 0; %the three parameters should be adjusted later
DistanceUpperBound = 2.0;
ContrastEnhenceCoef = 1.0;
I_s = F27a_SmoothnessPreserving(img_y,zooming,Gau_sigma);
folder_output = fullfile('Result','Test17_GenerateFigureForPAMI15');
imwrite(I_s,fullfile(folder_output,'H_s.png'));
T = F15_ComputeSRSSD(I_s);
hfig = figure;
imagesc(T);
axis image off
caxis([0 0.7121]);
saveas(hfig, fullfile(folder_output,'mag.png'));
close(hfig);
Dissimilarity = EvaluateDissimilarity8(I_s);
Grad_high_initial = Img2Grad(I_s);
[h w] = size(T);
StatisticsFolder = fullfile('EdgePriors');
LoadFileName = sprintf('Statistics_Sc%d_Si%0.1f.mat',zooming,Gau_sigma);
LoadData = load(fullfile(StatisticsFolder,LoadFileName));
Statistics = LoadData.Statistics;
RidgeMap = edge(I_s,'canny',[0 0.01],0.05);
%Draw the canny detected edges to the cropped region
mag_plus_edge_whole_image = T;
% mag_plus_edge_whole_image(RidgeMap) = 0.2111;
CropRegion = mag_plus_edge_whole_image(257+1:257+1+12-1,50+1:50+1+9-1);
hfig = figure;
imagesc(CropRegion);
axis image off
caxis([0 0.7121]);
saveas(hfig, fullfile(folder_output, 'mag_region.png'));
close(hfig);
RidgeMap_inverted = ~RidgeMap;
imwrite(RidgeMap_inverted,fullfile(folder_output,'Canny.png'));
%filter out small ridge and non-maximun ridges
RidgeMap_filtered = RidgeMap;
[r_set c_set] = find(RidgeMap);
SetLength = length(r_set);
for j=1:SetLength
r = r_set(j);
c = c_set(j);
CenterMagValue = T(r,c);
if CenterMagValue < LowMagSuppression
RidgeMap_filtered(r,c) = false;
end
end
[r_set c_set] = find(RidgeMap_filtered);
SetLength = length(r_set);
[X Y] = meshgrid(1:11,1:11);
DistPatch = sqrt((X-6).^2 + (Y-6).^2);
DistMap = inf(h,w);
UsedPixel = false(h,w);
CenterCoor = zeros(h,w,2);
%Compute DistMap and CneterCoor
[r_set c_set] = find(RidgeMap_filtered);
for j=1:SetLength
r = r_set(j);
r1 = r-5;
r2 = r+5;
c = c_set(j);
c1 = c-5;
c2 = c+5;
if r1>=1 && r2<=h && c1>=1 && c2<=w %discrad boundary?
MapPatch = DistMap(r1:r2,c1:c2);
MinPatch = min(MapPatch, DistPatch);
DistMap(r1:r2,c1:c2) = MinPatch;
UsedPixel(r1:r2,c1:c2) = true;
ChangedPixels = MinPatch < MapPatch;
OriginalCenterCoorPatch = CenterCoor(r1:r2,c1:c2,:);
NewCoor = cat(3,r*ones(11), c*ones(11));
NewCenterCoorPatch = OriginalCenterCoorPatch .* repmat(1-ChangedPixels,[1,1,2]) + NewCoor .* repmat(ChangedPixels,[1,1,2]);
CenterCoor(r1:r2,c1:c2,:) = NewCenterCoorPatch;
end
end
%Convert dist to table index
TableIndexMap = zeros(h,w);
b = unique(DistPatch(:));
for i=1:length(b)
SetPixels = DistMap == b(i);
TableIndexMap(SetPixels) = i;
end
%mapping (T_p, T_r, d) to S_p
[r_set c_set] = find(UsedPixel);
SetLength = length(r_set);
UpdatedPixel = false(h,w);
S = zeros(h,w);
for i=1:SetLength
r = r_set(i);
c = c_set(i);
r_Center = CenterCoor(r,c,1);
c_Center = CenterCoor(r,c,2);
CurrentMagValue = T(r,c);
BinIdx_Current = ceil(CurrentMagValue /0.005);
%Zebra have super strong Mag
if BinIdx_Current > 100
BinIdx_Current = 100;
end
TableIndex = TableIndexMap(r,c);
if TableIndex > DistanceUpperBound
continue
end
CenterMagValue = T(r_Center,c_Center);
%Low Mag Edge suppresion
if CenterMagValue < LowMagSuppression
continue
end
BinIdx_Center = ceil(CenterMagValue /0.005);
if BinIdx_Center > 100
BinIdx_Center = 100;
end
%consult the table
if TableIndex == 1 %1 is the index of b(1) where dist = 0, enhance the contrast of pixel on edge
S_p = ContrastEnhenceCoef * Statistics(TableIndex).EstimatedMag(BinIdx_Current,BinIdx_Center);
else
S_p = Statistics(TableIndex).EstimatedMag(BinIdx_Current,BinIdx_Center);
end
if isnan(S_p)
else
UpdatedPixel(r,c) = true;
S(r,c) = S_p;
end
end
%Record the RidgeMapMap, for computing te ProbOfMag
%the Mag is the consulted Mag
%here is the problem, when the S is very strong, the affect range of ProbMagOut exceeds 1 pixel
RidgeMapMagValue = zeros(h,w);
for i=1:SetLength
r = r_set(i);
c = c_set(i);
r_Center = CenterCoor(r,c,1);
c_Center = CenterCoor(r,c,2);
RidgeMapMagValue(r,c) = S(r_Center,c_Center);
end
S(~UpdatedPixel) = T(~UpdatedPixel);
img_in = I_s;
if min(Dissimilarity(:)) == 0
d = Dissimilarity + 1e-6; %avoid 0 case; some images may have d(:,:,1) as 0
else
d = Dissimilarity;
end
ratio = d ./ repmat(d(:,:,1),[1,1,8]);
%here is the problem, I need to amplify the gradient directionally
Grad_in = Img2Grad(img_in);
Product = Grad_in .* ratio;
Sqr = Product.^2;
Sum = sum(Sqr,3);
Sqrt = sqrt(Sum); %the Sqrt might be 0, because Grad_in may be pure 0;
r1 = S ./Sqrt;
r1(isnan(r1)) = 0;
Grad_exp = Grad_high_initial .*( ratio .*(repmat(r1,[1,1,8])));
%consolidate inconsistatnt gradient
NewGrad_exp = zeros(h,w,8);
for k=1:4
switch k
case 1
ShiftOp = [0 -1];
case 2
ShiftOp = [1 -1];
case 3
ShiftOp = [1 0];
case 4
ShiftOp = [1 1];
end
k2 =k+4;
Grad1 = Grad_exp(:,:,k);
Grad2 = Grad_exp(:,:,k2);
Grad2Shift = circshift(Grad2,ShiftOp);
Grad1Abs = abs(Grad1);
Grad2AbsShift = abs(Grad2Shift);
Grad1Larger = Grad1Abs > Grad2AbsShift;
Grad2Larger = Grad2AbsShift > Grad1Abs;
NewGrad1 = Grad1 .* Grad1Larger + (-Grad2Shift) .* Grad2Larger;
NewGrad2Shift = Grad2Shift .* Grad2Larger + (-Grad1) .* Grad1Larger;
NewGrad2 = circshift(NewGrad2Shift,-ShiftOp);
NewGrad_exp(:,:,k) = NewGrad1;
NewGrad_exp(:,:,k2) = NewGrad2;
end
%current problem is the over-enhanced gradient (NewMagExp too large)
gradient_expected = NewGrad_exp;
lambda_m = 2;
m0 = 0;
ProbMagOut = lambda_m * RidgeMapMagValue + m0;
lambda_d = 0.25;
d0 = 0.25;
ProbDistMap = exp(- (lambda_d * DistMap + d0) ); %this coef should be decied by zooming
Product = ProbMagOut .* ProbDistMap;
weightmap_edge = min(Product,1); %the two terms are not sufficient, direction is not taken into considertion
if 1
bReport = true;
updatenumber = 0;
loopnumber = 1000;
linesearchstepnumber = 10;
beta0 = 1;
beta1 = 0.5^8;
tolf = 0.001;
img_edge = F4e_GenerateIntensityFromGradient(img_y,img_in,NewGrad_exp,Gau_sigma,bReport,...
loopnumber,updatenumber,linesearchstepnumber,beta0,beta1,tolf);
imwrite(img_edge, fullfile(folder_output, 'img_edge.png'));
%Compute the new magitude of gradients
mog_new = sqrt(sum(NewGrad_exp.^2,3));
hfig = figure;
imagesc(mog_new);
axis image off
saveas(hfig, fullfile(folder_output,'mog_new.png'));
range = caxis;
disp(range);
%img_edge = F4b_GenerateIntensityFromGradient(img_y,img_in,NewGrad_exp,Gau_sigma,bReport);
%gradient_actual = Img2Grad(img_edge);
%compute the Map of edge weight
end
end
function Grad = Img2Grad(img)
[h w] = size(img);
Grad = zeros(h,w,8);
DiffOp = RetGradientKernel();
for i=1:8
Grad(:,:,i) = imfilter(img,DiffOp{i},'replicate');
end
end
function f = RetGradientKernel()
f = cell(8,1);
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
end
function Dissimilarity = EvaluateDissimilarity8(Img_in,PatchSize)
if ~exist('PatchSize','var');
PatchSize = 3;
end
[h w] = size(Img_in);
Dissimilarity = zeros(h,w,8);
f3x3 = ones(PatchSize)/(PatchSize^2);
for i = 1:8
DiffOp = RetGradientKernel8(i);
Diff = imfilter(Img_in,DiffOp,'symmetric');
Sqr = Diff.^2;
Sum = imfilter(Sqr,f3x3,'replicate');
Dissimilarity(:,:,i) = sqrt(Sum);
end
end
function DiffOp = RetGradientKernel8(dir)
f{1} = [0 0 0;
0 -1 1;
0 0 0];
f{2} = [0 0 1;
0 -1 0;
0 0 0];
f{3} = [0 1 0;
0 -1 0;
0 0 0];
f{4} = [1 0 0;
0 -1 0;
0 0 0];
f{5} = [0 0 0;
1 -1 0;
0 0 0];
f{6} = [0 0 0;
0 -1 0;
1 0 0];
f{7} = [0 0 0;
0 -1 0;
0 1 0];
f{8} = [0 0 0;
0 -1 0;
0 0 1];
DiffOp = f{dir};
end
function f = ComputeFunctionValue_Grad(img, Grad_exp)
Grad = Img2Grad(img);
Diff = Grad - Grad_exp;
Sqrt = Diff .^2;
f = sqrt(sum(Sqrt(:)));
end
|
github
|
Liusifei/Face-Hallucination-master
|
im2patches.m
|
.m
|
Face-Hallucination-master/Code/Ma10/im2patches.m
| 1,616 |
utf_8
|
cc1828fb7cc7e88a54b135f8bd1c17a6
|
% function to convert an image into patches according to a possible mask
% note that for now im has to be a grayscale image
function [patches,max_x,max_y] = im2patches(im,patchSize,intervalSize)
if exist('boundarySize','var')~=1
boundarySize = ceil(patchSize/2);
end
% if boundarySize < patchSize
% error('The boundary size must be equal to or greater than the patch size!');
% end
% the grid of a patch
[p_xx,p_yy]=meshgrid(-patchSize/2:patchSize/2-1,-patchSize/2:patchSize/2-1);
nDim = numel(p_xx);
[height,width]=size(im);
[grid_xx,grid_yy]=meshgrid(boundarySize+1:intervalSize:width-boundarySize+1,boundarySize+1:intervalSize:height-boundarySize+1);
% [ind_xx,ind_yy]=meshgrid(1:size(grid_xx,2),1:size(grid_xx,1));
if nargin > 1
max_x = size(grid_xx,2);
max_y = size(grid_xx,1);
end
grid_xx = grid_xx(:); grid_yy = grid_yy(:);
% ind_xx = ind_xx(:); ind_yy = ind_yy(:);
% if exist('mask','var')==1
% if ~isempty(mask)
% index = mask(sub2ind([height,width],grid_yy(:),grid_xx(:)))>0.5;
% grid_xx = grid_xx(index);
% grid_yy = grid_yy(index);
% end
% end
nPatches = numel(grid_xx);
Patches = struct('x',{},'y',{},'vec',{[]});
xx = repmat(p_xx(:)',[nPatches,1]) + repmat(grid_xx(:),[1,nDim]);
yy = repmat(p_yy(:)',[nPatches,1]) + repmat(grid_yy(:),[1,nDim]);
index = sub2ind([height,width],yy(:),xx(:));
patches = reshape(im(index),[nPatches,nDim]);
% for ii = 1:nPatches
% Patches(ii).x = grid_xx(ii);
% Patches(ii).y = grid_yy(ii);
% % Patches(ii).indx = ind_xx(ii);
% % Patches(ii).indy = ind_yy(ii);
% Patches(ii).vec = patches(ii,:);
% end
|
github
|
Liusifei/Face-Hallucination-master
|
F1a_rnd_smp_dictionary.m
|
.m
|
Face-Hallucination-master/Code/Jianchao08/F1a_rnd_smp_dictionary.m
| 1,202 |
utf_8
|
efe29029302db06579a4ca72784e60a1
|
%Chih-Yuan Yang
%10/29/12
%F1: Change the original file to load '*.png'
%F1a: according to the ICIP paper of Jianchao, the Xl and Xh are both high-resolution patch
function [Xh, Xl] = F1a_rnd_smp_dictionary(folder_exampleimages, folder_reconstructedimages, patch_size, num_patch)
fpath = fullfile(folder_reconstructedimages, '*.png');
filelist = dir(fpath);
Xh = [];
Xl = [];
filenumber = length(filelist);
nums = zeros(1, filenumber);
for num = 1:length(filelist),
img_reconstructed = imread(fullfile(folder_reconstructedimages, filelist(num).name));
nums(num) = prod(size(img_reconstructed));
end;
nums = floor(nums*num_patch/sum(nums));
for ii = 1:filenumber,
patch_num = nums(ii);
fn_load_recon = filelist(ii).name;
fn_short = fn_load_recon(1:end-10);
fn_original = [fn_short '.png'];
img_reconstructed = im2double(imread(fullfile(folder_reconstructedimages, fn_load_recon)));
img_original = rgb2gray(im2double(imread(fullfile(folder_exampleimages, fn_original))));
[H, L] = F5_sample_patches(img_reconstructed, img_original, patch_size, patch_num);
Xh = [Xh, H];
Xl = [Xl, L];
fprintf('Sampled...%d\n', size(Xh, 2));
end;
|
github
|
Liusifei/Face-Hallucination-master
|
F2_coupled_dic_train.m
|
.m
|
Face-Hallucination-master/Code/Jianchao08/F2_coupled_dic_train.m
| 1,084 |
utf_8
|
142a3e5792ad61dc4db24ef3a0d90175
|
%Chih-Yuan Yang
%10/19/12
%reduce the iteration number to reduce the computation time
%this function needs to be further improved, to pass the temp folder of dictionary to F3_sparse_coding
%and remove the hard-coded folder in F3_sparse_coding
function [Dh, Dl] = F2_coupled_dic_train(Xh, Xl, codebook_size, lambda, iterationnumber)
addpath('Sparse coding/sc2');
hDim = size(Xh, 1);
lDim = size(Xl, 1);
% joint learning of the dictionary
X = [1/sqrt(hDim)*Xh; 1/sqrt(lDim)*Xl];
if size(X,2) > 80000
X = X(:, 1:80000);
end
Xnorm = sqrt(sum(X.^2, 1));
clear Xh Xl;
X = X(:, Xnorm > 1e-5);
X = X./repmat(sqrt(sum(X.^2, 1)), hDim+lDim, 1);
idx = randperm(size(X, 2));
Binit = X(:, idx(1:codebook_size));
%why is this lambda/2?
fn_save_temp = 'Dictionary_temp';
[D] = F3_sparse_coding(X, codebook_size, lambda/2, 'L1', [], iterationnumber, 5000, fn_save_temp, [], Binit);
Dh = D(1:hDim, :);
Dl = D(hDim+1:end, :);
% normalize the dictionary
Dh = Dh./repmat(sqrt(sum(Dh.^2, 1)), hDim, 1);
Dl = Dl./repmat(sqrt(sum(Dl.^2, 1)), lDim, 1);
|
github
|
Liusifei/Face-Hallucination-master
|
rnd_smp_dictionary.m
|
.m
|
Face-Hallucination-master/Code/Jianchao08/rnd_smp_dictionary.m
| 2,852 |
utf_8
|
212d5ad367742fb65a07fe925569d9ec
|
function [Xh, Xl] = rnd_smp_dictionary(tr_dir, patch_size, zooming, num_patch)
fpath = fullfile(tr_dir, '*.bmp');
img_dir = dir(fpath);
Xh = [];
Xl = [];
img_num = length(img_dir);
nums = zeros(1, img_num);
for num = 1:length(img_dir),
im = imread(fullfile(tr_dir, img_dir(num).name));
nums(num) = prod(size(im));
end;
nums = floor(nums*num_patch/sum(nums));
for ii = 1:img_num,
patch_num = nums(ii);
im = imread(fullfile(tr_dir, img_dir(ii).name));
[H, L] = sample_patches(im, patch_size, zooming, patch_num);
Xh = [Xh, H];
Xl = [Xl, L];
fprintf('Sampled...%d\n', size(Xh, 2));
end;
function [HP, LP] = sample_patches(im, patch_size, zooming, patch_num)
lz = 2;
if size(im, 3) == 3,
hIm = rgb2gray(im);
else
hIm = im;
end;
if rem(size(hIm,1),zooming)
nrow = floor(size(hIm,1)/zooming)*zooming;
hIm = hIm(1:nrow,:);
end;
if rem(size(hIm,2),zooming)
ncol = floor(size(hIm,2)/zooming)*zooming;
hIm = hIm(:,1:ncol);
end;
lIm = imresize(hIm,1/zooming);
[nrow, ncol] = size(lIm);
x = randperm(nrow-patch_size-lz-1);
y = randperm(ncol-patch_size-lz-1);
[X,Y] = meshgrid(x,y);
xrow = X(:);
ycol = Y(:);
xrow = xrow(1:patch_num);
ycol = ycol(1:patch_num);
% zoom the original image
lIm = imresize(lIm, lz,'bicubic');
hIm = double(hIm);
lIm = double(lIm);
H = zeros(zooming^2*patch_size^2,patch_num);
L = zeros(lz^2*4*patch_size^2,patch_num);
% compute the first and second order gradients
hf1 = [-1,0,1];
vf1 = [-1,0,1]';
lImG11 = conv2(lIm,hf1,'same');
lImG12 = conv2(lIm,vf1,'same');
hf2 = [1,0,-2,0,1];
vf2 = [1,0,-2,0,1]';
lImG21 = conv2(lIm,hf2,'same');
lImG22 = conv2(lIm,vf2,'same');
count = 1;
for pnum = 1:patch_num,
hrow = (xrow(pnum)-1)*zooming + 1;
hcol = (ycol(pnum)-1)*zooming + 1;
Hpatch = hIm(hrow:hrow+zooming*patch_size-1,hcol:hcol+zooming*patch_size-1);
lrow = (xrow(pnum)-1)*lz + 1;
lcol = (ycol(pnum)-1)*lz + 1;
% fprintf('(%d, %d), %d, [%d, %d]\n', lrow, lcol, lz*patch_size,
% size(lImG11));
Lpatch1 = lImG11(lrow:lrow+lz*patch_size-1,lcol:lcol+lz*patch_size-1);
Lpatch2 = lImG12(lrow:lrow+lz*patch_size-1,lcol:lcol+lz*patch_size-1);
Lpatch3 = lImG21(lrow:lrow+lz*patch_size-1,lcol:lcol+lz*patch_size-1);
Lpatch4 = lImG22(lrow:lrow+lz*patch_size-1,lcol:lcol+lz*patch_size-1);
Lpatch = [Lpatch1(:),Lpatch2(:),Lpatch3(:),Lpatch4(:)];
Lpatch = Lpatch(:);
HP(:,count) = Hpatch(:)-mean(Hpatch(:));
LP(:,count) = Lpatch;
count = count + 1;
Hpatch = Hpatch';
Lpatch1 = Lpatch1';
Lpatch2 = Lpatch2';
Lpatch3 = Lpatch3';
Lpatch4 = Lpatch4';
Lpatch = [Lpatch1(:),Lpatch2(:),Lpatch3(:),Lpatch4(:)];
HP(:,count) = Hpatch(:)-mean(Hpatch(:));
LP(:,count) = Lpatch(:);
count = count + 1;
end;
|
github
|
Liusifei/Face-Hallucination-master
|
F3_L1SR.m
|
.m
|
Face-Hallucination-master/Code/Jianchao08/F3_L1SR.m
| 4,861 |
utf_8
|
f698e5b227c276d06019af58f299e8b5
|
%Chih-Yuan yang
%10/24/12
%remove a bug where the upsampled image size in not always 3x, but should be controlled by zooming
%remove a bug where the boundary can not be well filled in Jianchao's original code
%note: the format of the first argument lIm is double but the range is 0~255
function [hIm, ww] = F3_L1SR(lIm, zooming, patch_size, overlap, Dh, Dl, lambda, regres)
% Use sparse representation as the prior for image super-resolution
% Usage
% [hIm] = L1SR(lIm, zooming, patch_size, overlap, Dh, Dl, lambda)
%
% Inputs
% -lIm: low resolution input image, single channel, e.g.
% illuminance
% -zooming: zooming factor, e.g. 3
% -patch_size: patch size for the low resolution image
% -overlap: overlap among patches, e.g. 1
% -Dh: dictionary for the high resolution patches
% -Dl: dictionary for the low resolution patches
% -regres: 'L1' use the sparse representation directly to high
% resolution dictionary;
% 'L2' use the supports found by sparse representation
% and apply least square regression coefficients to high
% resolution dictionary.
% Ouputs
% -hIm: the recovered image, single channel
%
% Written by Jianchao Yang @ IFP UIUC
% April, 2009
% Webpage: http://www.ifp.illinois.edu/~jyang29/
% For any questions, please email me by [email protected]
%
% Reference
% Jianchao Yang, John Wright, Thomas Huang and Yi Ma. Image superresolution
% as sparse representation of raw image patches. IEEE Computer Society
% Conference on Computer Vision and Pattern Recognition (CVPR), 2008.
%
[lhg, lwd] = size(lIm);
hhg = lhg*zooming;
hwd = lwd*zooming;
mIm = imresize(lIm, 2,'bicubic');
[mhg, mwd] = size(mIm);
hpatch_size = patch_size*zooming;
mpatch_size = patch_size*2;
% extract gradient feature from lIm
hf1 = [-1,0,1];
vf1 = [-1,0,1]';
hf2 = [1,0,-2,0,1];
vf2 = [1,0,-2,0,1]';
lImG11 = conv2(mIm,hf1,'same');
lImG12 = conv2(mIm,vf1,'same');
lImG21 = conv2(mIm,hf2,'same');
lImG22 = conv2(mIm,vf2,'same');
lImfea(:,:,1) = lImG11;
lImfea(:,:,2) = lImG12;
lImfea(:,:,3) = lImG21;
lImfea(:,:,4) = lImG22;
lgridx = 2:patch_size-overlap:lwd-patch_size;
lgridx = [lgridx, lwd-patch_size];
lgridy = 2:patch_size-overlap:lhg-patch_size;
lgridy = [lgridy, lhg-patch_size];
mgridx = (lgridx - 1)*2 + 1;
mgridy = (lgridy - 1)*2 + 1;
% using linear programming to find sparse solution
bhIm = imresize(lIm, zooming, 'bicubic');
hIm = zeros([hhg, hwd]);
nrml_mat = zeros([hhg, hwd]);
hgridx = (lgridx-1)*zooming + 1;
hgridy = (lgridy-1)*zooming + 1;
disp('Processing the patches sequentially...');
count = 0;
% loop to recover each patch
for xx = 1:length(mgridx),
for yy = 1:length(mgridy),
mcolx = mgridx(xx);
mrowy = mgridy(yy);
count = count + 1;
if ~mod(count, 100),
fprintf('.\n');
else
fprintf('.');
end;
mpatch = mIm(mrowy:mrowy+mpatch_size-1, mcolx:mcolx+mpatch_size-1);
mmean = mean(mpatch(:));
mpatchfea = lImfea(mrowy:mrowy+mpatch_size-1, mcolx:mcolx+mpatch_size-1, :);
mpatchfea = mpatchfea(:);
mnorm = sqrt(sum(mpatchfea.^2));
if mnorm > 1,
y = mpatchfea./mnorm;
else
y = mpatchfea;
end;
w = SolveLasso(Dl, y, size(Dl, 2), 'nnlasso', [], lambda);
% w = feature_sign(Dl, y, lambda);
if isempty(w),
w = zeros(size(Dl, 2), 1);
end;
switch regres,
case 'L1'
if mnorm > 1,
hpatch = Dh*w*mnorm;
else
hpatch = Dh*w;
end;
case 'L2'
idx = find(w);
lsups = Dl(:, idx);
hsups = Dh(:, idx);
w = inv(lsups'*lsups)*lsups'*mpatchfea;
hpatch = hsups*w;
otherwise
error('Unknown fitting!');
end;
hpatch = reshape(hpatch, [hpatch_size, hpatch_size]);
hpatch = hpatch + mmean;
hcolx = hgridx(xx);
hrowy = hgridy(yy);
hIm(hrowy:hrowy+hpatch_size-1, hcolx:hcolx+hpatch_size-1)...
= hIm(hrowy:hrowy+hpatch_size-1, hcolx:hcolx+hpatch_size-1) + hpatch;
nrml_mat(hrowy:hrowy+hpatch_size-1, hcolx:hcolx+hpatch_size-1)...
= nrml_mat(hrowy:hrowy+hpatch_size-1, hcolx:hcolx+hpatch_size-1) + 1;
end;
end;
fprintf('done!\n');
% fill the empty
hIm(1:zooming, :) = bhIm(1:zooming, :);
hIm(:, 1:zooming) = bhIm(:, 1:zooming);
hIm(end-zooming+1:end, :) = bhIm(end-zooming+1:end, :);
hIm(:, end-zooming+1:end) = bhIm(:, end-zooming+1:end);
nrml_mat(nrml_mat < 1) = 1;
hIm = hIm./nrml_mat;
|
github
|
Liusifei/Face-Hallucination-master
|
F3a_L1SR.m
|
.m
|
Face-Hallucination-master/Code/Jianchao08/F3a_L1SR.m
| 7,950 |
utf_8
|
c517a83c8a2019640fb35dc497f554b6
|
%Chih-Yuan yang
%10/24/12
%remove a bug where the upsampled image size in not always 3x, but should be controlled by zooming
%remove a bug where the boundary can not be well filled in Jianchao's original code
%note: the format of the first argument lIm is double but the range is 0~255
%F3a: dymanically change the dictionary so that the overlapped region can be taken into consideration
function [img_hr, ww] = F3a_L1SR(lIm, zooming, patch_size, overlap, Dh, Dl, lambda, regres)
% Use sparse representation as the prior for image super-resolution
% Usage
% [img_hr] = L1SR(lIm, zooming, patch_size, overlap, Dh, Dl, lambda)
%
% Inputs
% -lIm: low resolution input image, single channel, e.g.
% illuminance
% -zooming: zooming factor, e.g. 3
% -patch_size: patch size for the low resolution image
% -overlap: overlap among patches, e.g. 1
% -Dh: dictionary for the high resolution patches
% -Dl: dictionary for the low resolution patches
% -regres: 'L1' use the sparse representation directly to high
% resolution dictionary;
% 'L2' use the supports found by sparse representation
% and apply least square regression coefficients to high
% resolution dictionary.
% Ouputs
% -img_hr: the recovered image, single channel
%
% Written by Jianchao Yang @ IFP UIUC
% April, 2009
% Webpage: http://www.ifp.illinois.edu/~jyang29/
% For any questions, please email me by [email protected]
%
% Reference
% Jianchao Yang, John Wright, Thomas Huang and Yi Ma. Image superresolution
% as sparse representation of raw image patches. IEEE Computer Society
% Conference on Computer Vision and Pattern Recognition (CVPR), 2008.
%
[lhg, lwd] = size(lIm);
hhg = lhg*zooming;
hwd = lwd*zooming;
mIm = imresize(lIm, 2,'bicubic');
[mhg, mwd] = size(mIm);
patchsize_hr = patch_size*zooming;
patcharea_hr = patchsize_hr^2;
mpatch_size = patch_size*2;
% extract gradient feature from lIm
hf1 = [-1,0,1];
vf1 = [-1,0,1]';
hf2 = [1,0,-2,0,1];
vf2 = [1,0,-2,0,1]';
lImG11 = conv2(mIm,hf1,'same');
lImG12 = conv2(mIm,vf1,'same');
lImG21 = conv2(mIm,hf2,'same');
lImG22 = conv2(mIm,vf2,'same');
lImfea(:,:,1) = lImG11;
lImfea(:,:,2) = lImG12;
lImfea(:,:,3) = lImG21;
lImfea(:,:,4) = lImG22;
%it is very wierd why the index start from 2, change it to one
lgridx = 1:patch_size-overlap:lwd-patch_size+1;
if lgridx(end) ~= lwd-patch_size+1
lgridx = [lgridx, lwd-patch_size+1]; %fill the last one
end
lgridy = 1:patch_size-overlap:lhg-patch_size+1;
if lgridy(end) ~= lhg-patch_size+1
lgridy = [lgridy, lhg-patch_size+1];
end
%the index of extract features from middle resolution
mgridx = (lgridx - 1)*2 + 1; %the horizonal sample coordinate of the x2 interpolated image
mgridy = (lgridy - 1)*2 + 1; %the vertical
% using linear programming to find sparse solution
bimg_hr = imresize(lIm, zooming, 'bicubic');
img_hr = zeros([hhg, hwd]);
%nrml_mat = zeros([hhg, hwd]);
h_hr = hhg;
w_hr = hwd;
filledmap = false(h_hr,w_hr);
reconfeature = zeros(h_hr,w_hr);
nrml_mat = zeros(h_hr,w_hr);
hgridx = (lgridx-1)*zooming + 1; %the destination in HR
hgridy = (lgridy-1)*zooming + 1;
disp('Processing the patches sequentially...');
count = 0;
% loop to recover each patch
for cidx = 1:length(mgridx) %cidx is the index of array, not the coordinate
for ridx = 1:length(mgridy)
%the index in LR is disregarded because it is irrelevant to feature extraction
c_mr = mgridx(cidx); %cidx is the coordinate of the index in middle resolution
r_mr = mgridy(ridx);
c_hr = hgridx(cidx);
r_hr = hgridy(ridx);
count = count + 1;
if ~mod(count, 100),
fprintf('.\n');
else
fprintf('.');
end;
%mpatch_size: the patchsize * 2
mpatch = mIm(r_mr:r_mr+mpatch_size-1, c_mr:c_mr+mpatch_size-1); %mIm: the middle image,
mmean = mean(mpatch(:));
%here, the feature changes, not only the gradient, but also the filled HR intensity, too,
mpatchfea = lImfea(r_mr:r_mr+mpatch_size-1, c_mr:c_mr+mpatch_size-1, :);
mpatchfea = mpatchfea(:);
%consider the overlapped region
processregion_hr = zeros(h_hr,w_hr);
processregion_hr(r_hr:r_hr+patchsize_hr-1,c_hr:c_hr+patchsize_hr-1) = 1;
overlapregion_hr = filledmap .* processregion_hr;
overlapregion_inpatch = overlapregion_hr(r_hr:r_hr+patchsize_hr-1,c_hr:c_hr+patchsize_hr-1);
overlapregion_hr_logical = logical(overlapregion_hr);
%extract the intensity of filled hr
if nnz(overlapregion_hr) == 0
overlapreconfeature = [];
Dh_partial = [];
else
overlapreconfeature = reconfeature(overlapregion_hr_logical); %reshape already
usedpixels = reshape(overlapregion_inpatch,[patcharea_hr,1]);
Dh_partial = Dh(logical(usedpixels),:);
end
%create the new y, new Dl, and new Dh, assuming beta is 1, the same as described in paper
y_concatenated = cat(1,mpatchfea,overlapreconfeature);
D_concatenated = cat(1,Dl,Dh_partial);
thenorm = sqrt(sum(y_concatenated.^2));
if thenorm > 1,
y_input = y_concatenated./thenorm;
else
y_input = y_concatenated;
end;
w = SolveLasso(D_concatenated, y_input, size(D_concatenated, 2), 'nnlasso', [], lambda);
if isempty(w),
w = zeros(size(Dl, 2), 1);
end;
if thenorm > 1,
reconfeature_hr_concatenated = Dh*w*thenorm;
else
reconfeature_hr_concatenated = Dh*w; %this is the reconstructed
end
reconfeature_hr = reconfeature_hr_concatenated(1:patcharea_hr);
%mnorm = sqrt(sum(mpatchfea.^2));
%the extracted feature, which is also dynamically change
%if mnorm > 1,
% y = mpatchfea./mnorm;
%else
% y = mpatchfea;
%end;
%here, the Dl and the corresponding Dh will be dymanically change
%w = SolveLasso(Dl, y, size(Dl, 2), 'nnlasso', [], lambda);
% w = feature_sign(Dl, y, lambda);
%if isempty(w),
%
%w = zeros(size(Dl, 2), 1);
%end;
%switch regres,
% case 'L1'
% if mnorm > 1,
% reconfeature_hr = Dh*w*mnorm;
% else
% reconfeature_hr = Dh*w; %this is the reconstructed
% end;
% case 'L2'
% idx = find(w);
% lsups = Dl(:, idx);
% hsups = Dh(:, idx);
% w = inv(lsups'*lsups)*lsups'*mpatchfea;
% reconfeature_hr = hsups*w;
% otherwise
% error('Unknown fitting!');
%end;
patchdiff_hr = reshape(reconfeature_hr, [patchsize_hr, patchsize_hr]);
reconfeature(r_hr:r_hr+patchsize_hr-1, c_hr:c_hr+patchsize_hr-1) = patchdiff_hr;
reconintensity_hr = patchdiff_hr + mmean;
img_hr(r_hr:r_hr+patchsize_hr-1, c_hr:c_hr+patchsize_hr-1)...
= img_hr(r_hr:r_hr+patchsize_hr-1, c_hr:c_hr+patchsize_hr-1) + reconintensity_hr;
nrml_mat(r_hr:r_hr+patchsize_hr-1, c_hr:c_hr+patchsize_hr-1)...
= nrml_mat(r_hr:r_hr+patchsize_hr-1, c_hr:c_hr+patchsize_hr-1) + 1;
filledmap(r_hr:r_hr+patchsize_hr-1, c_hr:c_hr+patchsize_hr-1) = 1;
end
end
fprintf('done!\n');
% fill the empty
%img_hr(1:zooming, :) = bimg_hr(1:zooming, :);
%img_hr(:, 1:zooming) = bimg_hr(:, 1:zooming);
%img_hr(end-zooming+1:end, :) = bimg_hr(end-zooming+1:end, :);
%img_hr(:, end-zooming+1:end) = bimg_hr(:, end-zooming+1:end);
nrml_mat(nrml_mat < 1) = 1;
img_hr = img_hr./nrml_mat;
|
github
|
Liusifei/Face-Hallucination-master
|
F4_OptimizationTerm.m
|
.m
|
Face-Hallucination-master/Code/Jianchao08/F4_OptimizationTerm.m
| 857 |
utf_8
|
15d8711641b99d84c55235c45fb90590
|
%Chih-Yuan Yang
%10/29/12
%solve the optimization problem
function termvalue = F4_OptimizationTerm(c, basisW,img_y,sigma)
[h_lr, w_lr] = size(img_y);
vector_hr = basisW * c;
zooming = round(sqrt(size(vector_hr,1)/(h_lr* w_lr)));
h_hr = h_lr * zooming;
w_hr = w_lr * zooming;
img_hr = reshape(vector_hr,[h_hr,w_hr]);
img_downsample = F19a_GenerateLRImage_GaussianKernel(img_hr,zooming,sigma);
diff = img_downsample - img_y;
term1 = sum(sum(diff.^2));
%what is the high pass filter? the difference of two Gaussian function?
kernel1 = fspecial('gaussian',11,1.6);
kernel2 = fspecial('gaussian',11,1.2);
img_Gau1 = imfilter(img_hr,kernel1,'replicate');
img_Gau2 = imfilter(img_hr,kernel2,'replicate');
diff = img_Gau1 - img_Gau2;
term2 = sqrt(sum(sum(diff.^2)));
termvalue = term1 + term2;
end
|
github
|
Liusifei/Face-Hallucination-master
|
F3b_L1SR.m
|
.m
|
Face-Hallucination-master/Code/Jianchao08/F3b_L1SR.m
| 7,366 |
utf_8
|
1e041e1c5630403bb833d728f9d3828b
|
%Chih-Yuan yang
%10/24/12
%remove a bug where the upsampled image size in not always 3x, but should be controlled by zooming
%remove a bug where the boundary can not be well filled in Jianchao's original code
%note: the format of the first argument lIm is double but the range is 0~255
%F3a: dymanically change the dictionary so that the overlapped region can be taken into consideration
%F3b: Jianchao does not mention how to handle the overlap, the average look
function [img_hr, ww] = F3b_L1SR(lIm, zooming, patch_size, overlap, Dh, Dl, lambda, regres)
% Use sparse representation as the prior for image super-resolution
% Usage
% [img_hr] = L1SR(lIm, zooming, patch_size, overlap, Dh, Dl, lambda)
%
% Inputs
% -lIm: low resolution input image, single channel, e.g.
% illuminance
% -zooming: zooming factor, e.g. 3
% -patch_size: patch size for the low resolution image
% -overlap: overlap among patches, e.g. 1
% -Dh: dictionary for the high resolution patches
% -Dl: dictionary for the low resolution patches
% -regres: 'L1' use the sparse representation directly to high
% resolution dictionary;
% 'L2' use the supports found by sparse representation
% and apply least square regression coefficients to high
% resolution dictionary.
% Ouputs
% -img_hr: the recovered image, single channel
%
% Written by Jianchao Yang @ IFP UIUC
% April, 2009
% Webpage: http://www.ifp.illinois.edu/~jyang29/
% For any questions, please email me by [email protected]
%
% Reference
% Jianchao Yang, John Wright, Thomas Huang and Yi Ma. Image superresolution
% as sparse representation of raw image patches. IEEE Computer Society
% Conference on Computer Vision and Pattern Recognition (CVPR), 2008.
%
[lhg, lwd] = size(lIm);
hhg = lhg*zooming;
hwd = lwd*zooming;
mIm = imresize(lIm, 2,'bicubic');
[mhg, mwd] = size(mIm);
patchsize_hr = patch_size*zooming;
patcharea_hr = patchsize_hr^2;
mpatch_size = patch_size*2;
% extract gradient feature from lIm
hf1 = [-1,0,1];
vf1 = [-1,0,1]';
hf2 = [1,0,-2,0,1];
vf2 = [1,0,-2,0,1]';
lImG11 = conv2(mIm,hf1,'same');
lImG12 = conv2(mIm,vf1,'same');
lImG21 = conv2(mIm,hf2,'same');
lImG22 = conv2(mIm,vf2,'same');
lImfea(:,:,1) = lImG11;
lImfea(:,:,2) = lImG12;
lImfea(:,:,3) = lImG21;
lImfea(:,:,4) = lImG22;
%it is very wierd why the index start from 2, change it to one
lgridx = 1:patch_size-overlap:lwd-patch_size+1;
if lgridx(end) ~= lwd-patch_size+1
lgridx = [lgridx, lwd-patch_size+1]; %fill the last one
end
lgridy = 1:patch_size-overlap:lhg-patch_size+1;
if lgridy(end) ~= lhg-patch_size+1
lgridy = [lgridy, lhg-patch_size+1];
end
%the index of extract features from middle resolution
mgridx = (lgridx - 1)*2 + 1; %the horizonal sample coordinate of the x2 interpolated image
mgridy = (lgridy - 1)*2 + 1; %the vertical
% using linear programming to find sparse solution
bimg_hr = imresize(lIm, zooming, 'bicubic');
img_hr = zeros([hhg, hwd]);
%nrml_mat = zeros([hhg, hwd]);
h_hr = hhg;
w_hr = hwd;
filledmap = false(h_hr,w_hr);
reconfeature = zeros(h_hr,w_hr);
nrml_mat = zeros(h_hr,w_hr);
hgridx = (lgridx-1)*zooming + 1; %the destination in HR
hgridy = (lgridy-1)*zooming + 1;
disp('Processing the patches sequentially...');
count = 0;
% loop to recover each patch
for cidx = 1:length(mgridx) %cidx is the index of array, not the coordinate
for ridx = 1:length(mgridy)
%the index in LR is disregarded because it is irrelevant to feature extraction
c_mr = mgridx(cidx); %cidx is the coordinate of the index in middle resolution
r_mr = mgridy(ridx);
c_hr = hgridx(cidx);
r_hr = hgridy(ridx);
count = count + 1;
if ~mod(count, 100),
fprintf('.\n');
else
fprintf('.');
end;
%mpatch_size: the patchsize * 2
mpatch = mIm(r_mr:r_mr+mpatch_size-1, c_mr:c_mr+mpatch_size-1); %mIm: the middle image,
mmean = mean(mpatch(:));
%here, the feature changes, not only the gradient, but also the filled HR intensity, too,
mpatchfea = lImfea(r_mr:r_mr+mpatch_size-1, c_mr:c_mr+mpatch_size-1, :);
mpatchfea = mpatchfea(:);
%consider the overlapped region
processregion_image = false(h_hr,w_hr);
processregion_image(r_hr:r_hr+patchsize_hr-1,c_hr:c_hr+patchsize_hr-1) = true;
overlapregion_image = filledmap & processregion_image;
nonoverlapregion_image = processregion_image & ~overlapregion_image;
overlapregion_patch = overlapregion_image(r_hr:r_hr+patchsize_hr-1,c_hr:c_hr+patchsize_hr-1);
overlapregion_itensity = img_hr(r_hr:r_hr+patchsize_hr-1,c_hr:c_hr+patchsize_hr-1);
expectedfeature_patch = overlapregion_itensity - mmean;
expectedfeature_overlap_linearize = expectedfeature_patch(overlapregion_patch);
nonoverlapregion_patch = true(patchsize_hr) & ~overlapregion_patch;
%extract the intensity of filled hr
if nnz(overlapregion_image) == 0
overlapreconfeature = [];
Dh_partial = [];
else
overlapreconfeature = expectedfeature_overlap_linearize;
usedpixels = reshape(overlapregion_patch,[patcharea_hr,1]);
Dh_partial = Dh(usedpixels,:);
end
%create the new y, new Dl, and new Dh, assuming beta is 1, the same as described in paper
y_concatenated = cat(1,mpatchfea,overlapreconfeature);
Dl_concatenated = cat(1,Dl,Dh_partial);
Dh_concatenated = cat(1,Dh,Dh_partial);
%norm_y = sqrt(sum(mpatchfea.^2));
norm_concatenated = sqrt(sum(y_concatenated.^2));
if norm_concatenated > 1,
y_input = y_concatenated./norm_concatenated;
else
y_input = y_concatenated;
end;
w = SolveLasso(Dl_concatenated, y_input, size(Dl_concatenated, 2), 'lasso', [], lambda);
if isempty(w),
w = zeros(size(Dl, 2), 1);
end;
if norm_concatenated > 1,
reconfeature_hr_concatenated = Dh_concatenated*w*norm_concatenated;
else
reconfeature_hr_concatenated = Dh*w; %this is the reconstructed
end
reconfeature_hr = reconfeature_hr_concatenated(1:patcharea_hr);
patchdiff_hr = reshape(reconfeature_hr, [patchsize_hr, patchsize_hr]);
reconfeature(r_hr:r_hr+patchsize_hr-1, c_hr:c_hr+patchsize_hr-1) = patchdiff_hr;
reconintensity_hr = patchdiff_hr + mmean;
%try not overlap
%img_hr(r_hr:r_hr+patchsize_hr-1, c_hr:c_hr+patchsize_hr-1)...
% = img_hr(r_hr:r_hr+patchsize_hr-1, c_hr:c_hr+patchsize_hr-1) + reconintensity_hr;
img_hr(nonoverlapregion_image) = reconintensity_hr(nonoverlapregion_patch);
nrml_mat(nonoverlapregion_image) = nrml_mat(nonoverlapregion_image) + 1;
%nrml_mat(r_hr:r_hr+patchsize_hr-1, c_hr:c_hr+patchsize_hr-1)...
% = nrml_mat(r_hr:r_hr+patchsize_hr-1, c_hr:c_hr+patchsize_hr-1) + 1;
filledmap(r_hr:r_hr+patchsize_hr-1, c_hr:c_hr+patchsize_hr-1) = 1;
end
end
fprintf('done!\n');
nrml_mat(nrml_mat < 1) = 1;
img_hr = img_hr./nrml_mat;
|
github
|
Liusifei/Face-Hallucination-master
|
F5_sample_patches.m
|
.m
|
Face-Hallucination-master/Code/Jianchao08/F5_sample_patches.m
| 1,400 |
utf_8
|
4eb7f1f373a3a48d367c73424626b0bb
|
%Chih-Yuan Yang
%10/29/12
%Export this function form F1a_rnd_smp_dictionary
function [HP, LP] = F5_sample_patches(img_reconstructed, img_original, patchsize, patch_num)
[nrow, ncol] = size(img_reconstructed);
x = randperm(nrow-patchsize+1);
y = randperm(ncol-patchsize+1);
[X,Y] = meshgrid(x,y);
xrow = X(:);
ycol = Y(:);
xrow = xrow(1:patch_num);
ycol = ycol(1:patch_num);
% compute the first and second order gradients
hf1 = [-1,0,1];
vf1 = [-1,0,1]';
grad1 = conv2(img_reconstructed,hf1,'same');
grad2 = conv2(img_reconstructed,vf1,'same');
hf2 = [1,0,-2,0,1];
vf2 = [1,0,-2,0,1]';
grad3 = conv2(img_reconstructed,hf2,'same');
grad4 = conv2(img_reconstructed,vf2,'same');
featurelength_hr = patchsize^2;
featurelength_lr = 4*patchsize^2;
HP = zeros(featurelength_hr,patch_num);
LP = zeros(featurelength_lr,patch_num);
for idx = 1:patch_num,
hrow = xrow(idx);
hcol = ycol(idx);
Hpatch = img_original(hrow:hrow+patchsize-1,hcol:hcol+patchsize-1);
Lpatch1 = grad1(hrow:hrow+patchsize-1,hcol:hcol+patchsize-1);
Lpatch2 = grad2(hrow:hrow+patchsize-1,hcol:hcol+patchsize-1);
Lpatch3 = grad3(hrow:hrow+patchsize-1,hcol:hcol+patchsize-1);
Lpatch4 = grad4(hrow:hrow+patchsize-1,hcol:hcol+patchsize-1);
Lpatch = [Lpatch1(:);Lpatch2(:);Lpatch3(:);Lpatch4(:)];
HP(:,idx) = Hpatch(:)-mean(Hpatch(:));
LP(:,idx) = Lpatch;
end
|
github
|
Liusifei/Face-Hallucination-master
|
F1_rnd_smp_dictionary.m
|
.m
|
Face-Hallucination-master/Code/Jianchao08/F1_rnd_smp_dictionary.m
| 2,927 |
utf_8
|
eb2f78ed0c406dcc46fb5cc253b0b1ef
|
%Chih-Yuan Yang
%10/17/12
%F1: Change the original file to load '*.png'
function [Xh, Xl] = F1_rnd_smp_dictionary(tr_dir, patch_size, zooming, num_patch)
fpath = fullfile(tr_dir, '*.png');
img_dir = dir(fpath);
Xh = [];
Xl = [];
img_num = length(img_dir);
nums = zeros(1, img_num);
for num = 1:length(img_dir),
im = imread(fullfile(tr_dir, img_dir(num).name));
nums(num) = prod(size(im));
end;
nums = floor(nums*num_patch/sum(nums));
for ii = 1:img_num,
patch_num = nums(ii);
im = imread(fullfile(tr_dir, img_dir(ii).name));
[H, L] = sample_patches(im, patch_size, zooming, patch_num);
Xh = [Xh, H];
Xl = [Xl, L];
fprintf('Sampled...%d\n', size(Xh, 2));
end;
function [HP, LP] = sample_patches(im, patch_size, zooming, patch_num)
lz = 2;
if size(im, 3) == 3,
hIm = rgb2gray(im);
else
hIm = im;
end;
if rem(size(hIm,1),zooming)
nrow = floor(size(hIm,1)/zooming)*zooming;
hIm = hIm(1:nrow,:);
end;
if rem(size(hIm,2),zooming)
ncol = floor(size(hIm,2)/zooming)*zooming;
hIm = hIm(:,1:ncol);
end;
lIm = imresize(hIm,1/zooming);
[nrow, ncol] = size(lIm);
x = randperm(nrow-patch_size-lz-1);
y = randperm(ncol-patch_size-lz-1);
[X,Y] = meshgrid(x,y);
xrow = X(:);
ycol = Y(:);
xrow = xrow(1:patch_num);
ycol = ycol(1:patch_num);
% zoom the original image
lIm = imresize(lIm, lz,'bicubic');
hIm = double(hIm);
lIm = double(lIm);
H = zeros(zooming^2*patch_size^2,patch_num);
L = zeros(lz^2*4*patch_size^2,patch_num);
% compute the first and second order gradients
hf1 = [-1,0,1];
vf1 = [-1,0,1]';
lImG11 = conv2(lIm,hf1,'same');
lImG12 = conv2(lIm,vf1,'same');
hf2 = [1,0,-2,0,1];
vf2 = [1,0,-2,0,1]';
lImG21 = conv2(lIm,hf2,'same');
lImG22 = conv2(lIm,vf2,'same');
count = 1;
for pnum = 1:patch_num,
hrow = (xrow(pnum)-1)*zooming + 1;
hcol = (ycol(pnum)-1)*zooming + 1;
Hpatch = hIm(hrow:hrow+zooming*patch_size-1,hcol:hcol+zooming*patch_size-1);
lrow = (xrow(pnum)-1)*lz + 1;
lcol = (ycol(pnum)-1)*lz + 1;
% fprintf('(%d, %d), %d, [%d, %d]\n', lrow, lcol, lz*patch_size,
% size(lImG11));
Lpatch1 = lImG11(lrow:lrow+lz*patch_size-1,lcol:lcol+lz*patch_size-1);
Lpatch2 = lImG12(lrow:lrow+lz*patch_size-1,lcol:lcol+lz*patch_size-1);
Lpatch3 = lImG21(lrow:lrow+lz*patch_size-1,lcol:lcol+lz*patch_size-1);
Lpatch4 = lImG22(lrow:lrow+lz*patch_size-1,lcol:lcol+lz*patch_size-1);
Lpatch = [Lpatch1(:),Lpatch2(:),Lpatch3(:),Lpatch4(:)];
Lpatch = Lpatch(:);
HP(:,count) = Hpatch(:)-mean(Hpatch(:));
LP(:,count) = Lpatch;
count = count + 1;
Hpatch = Hpatch';
Lpatch1 = Lpatch1';
Lpatch2 = Lpatch2';
Lpatch3 = Lpatch3';
Lpatch4 = Lpatch4';
Lpatch = [Lpatch1(:),Lpatch2(:),Lpatch3(:),Lpatch4(:)];
HP(:,count) = Hpatch(:)-mean(Hpatch(:));
LP(:,count) = Lpatch(:);
count = count + 1;
end;
|
github
|
Liusifei/Face-Hallucination-master
|
L1SR.m
|
.m
|
Face-Hallucination-master/Code/Jianchao08/L1SR.m
| 4,642 |
utf_8
|
6a6cc4d000b3a0e623ebe08066cce5ad
|
%Chih-Yuan yang
%10/22/12
%remove a bug where the upsampled image size in not always 3x, but should be controlled by zooming
function [hIm, ww] = L1SR(lIm, zooming, patch_size, overlap, Dh, Dl, lambda, regres)
% Use sparse representation as the prior for image super-resolution
% Usage
% [hIm] = L1SR(lIm, zooming, patch_size, overlap, Dh, Dl, lambda)
%
% Inputs
% -lIm: low resolution input image, single channel, e.g.
% illuminance
% -zooming: zooming factor, e.g. 3
% -patch_size: patch size for the low resolution image
% -overlap: overlap among patches, e.g. 1
% -Dh: dictionary for the high resolution patches
% -Dl: dictionary for the low resolution patches
% -regres: 'L1' use the sparse representation directly to high
% resolution dictionary;
% 'L2' use the supports found by sparse representation
% and apply least square regression coefficients to high
% resolution dictionary.
% Ouputs
% -hIm: the recovered image, single channel
%
% Written by Jianchao Yang @ IFP UIUC
% April, 2009
% Webpage: http://www.ifp.illinois.edu/~jyang29/
% For any questions, please email me by [email protected]
%
% Reference
% Jianchao Yang, John Wright, Thomas Huang and Yi Ma. Image superresolution
% as sparse representation of raw image patches. IEEE Computer Society
% Conference on Computer Vision and Pattern Recognition (CVPR), 2008.
%
[lhg, lwd] = size(lIm);
hhg = lhg*zooming;
hwd = lwd*zooming;
mIm = imresize(lIm, 2,'bicubic');
[mhg, mwd] = size(mIm);
hpatch_size = patch_size*zooming;
mpatch_size = patch_size*2;
% extract gradient feature from lIm
hf1 = [-1,0,1];
vf1 = [-1,0,1]';
hf2 = [1,0,-2,0,1];
vf2 = [1,0,-2,0,1]';
lImG11 = conv2(mIm,hf1,'same');
lImG12 = conv2(mIm,vf1,'same');
lImG21 = conv2(mIm,hf2,'same');
lImG22 = conv2(mIm,vf2,'same');
lImfea(:,:,1) = lImG11;
lImfea(:,:,2) = lImG12;
lImfea(:,:,3) = lImG21;
lImfea(:,:,4) = lImG22;
lgridx = 2:patch_size-overlap:lwd-patch_size;
lgridx = [lgridx, lwd-patch_size];
lgridy = 2:patch_size-overlap:lhg-patch_size;
lgridy = [lgridy, lhg-patch_size];
mgridx = (lgridx - 1)*2 + 1;
mgridy = (lgridy - 1)*2 + 1;
% using linear programming to find sparse solution
bhIm = imresize(lIm, zooming, 'bicubic');
hIm = zeros([hhg, hwd]);
nrml_mat = zeros([hhg, hwd]);
hgridx = (lgridx-1)*zooming + 1;
hgridy = (lgridy-1)*zooming + 1;
disp('Processing the patches sequentially...');
count = 0;
% loop to recover each patch
for xx = 1:length(mgridx),
for yy = 1:length(mgridy),
mcolx = mgridx(xx);
mrowy = mgridy(yy);
count = count + 1;
if ~mod(count, 100),
fprintf('.\n');
else
fprintf('.');
end;
mpatch = mIm(mrowy:mrowy+mpatch_size-1, mcolx:mcolx+mpatch_size-1);
mmean = mean(mpatch(:));
mpatchfea = lImfea(mrowy:mrowy+mpatch_size-1, mcolx:mcolx+mpatch_size-1, :);
mpatchfea = mpatchfea(:);
mnorm = sqrt(sum(mpatchfea.^2));
if mnorm > 1,
y = mpatchfea./mnorm;
else
y = mpatchfea;
end;
w = SolveLasso(Dl, y, size(Dl, 2), 'nnlasso', [], lambda);
% w = feature_sign(Dl, y, lambda);
if isempty(w),
w = zeros(size(Dl, 2), 1);
end;
switch regres,
case 'L1'
if mnorm > 1,
hpatch = Dh*w*mnorm;
else
hpatch = Dh*w;
end;
case 'L2'
idx = find(w);
lsups = Dl(:, idx);
hsups = Dh(:, idx);
w = inv(lsups'*lsups)*lsups'*mpatchfea;
hpatch = hsups*w;
otherwise
error('Unknown fitting!');
end;
hpatch = reshape(hpatch, [hpatch_size, hpatch_size]);
hpatch = hpatch + mmean;
hcolx = hgridx(xx);
hrowy = hgridy(yy);
hIm(hrowy:hrowy+hpatch_size-1, hcolx:hcolx+hpatch_size-1)...
= hIm(hrowy:hrowy+hpatch_size-1, hcolx:hcolx+hpatch_size-1) + hpatch;
nrml_mat(hrowy:hrowy+hpatch_size-1, hcolx:hcolx+hpatch_size-1)...
= nrml_mat(hrowy:hrowy+hpatch_size-1, hcolx:hcolx+hpatch_size-1) + 1;
end;
end;
fprintf('done!\n');
% fill the empty
hIm(1:3, :) = bhIm(1:3, :);
hIm(:, 1:3) = bhIm(:, 1:3);
hIm(end-2:end, :) = bhIm(end-2:end, :);
hIm(:, end-2:end) = bhIm(:, end-2:end);
nrml_mat(nrml_mat < 1) = 1;
hIm = hIm./nrml_mat;
|
github
|
Liusifei/Face-Hallucination-master
|
F3c_L1SR_HRHR_Dictionary.m
|
.m
|
Face-Hallucination-master/Code/Jianchao08/F3c_L1SR_HRHR_Dictionary.m
| 5,822 |
utf_8
|
a24f9782d6dab41b0c2b7d6e052d10cd
|
%Chih-Yuan yang
%10/24/12
%remove a bug where the upsampled image size in not always 3x, but should be controlled by zooming
%remove a bug where the boundary can not be well filled in Jianchao's original code
%note: the format of the first argument img_recon is double but the range is 0~255
%F3a: dymanically change the dictionary so that the overlapped region can be taken into consideration
%F3b: Jianchao does not mention how to handle the overlap, the average look
%F3c: The new dictioanry maps HR patch to HR patch, so the code in the file changes, too.
function img_hr = F3c_L1SR_HRHR_Dictionary(img_recon, patchsize, overlap, Dh, Dl, lambda, regres)
% Use sparse representation as the prior for image super-resolution
% Usage
% [img_hr] = L1SR(img_recon, zooming, patchsize, overlap, Dh, Dl, lambda)
%
% Inputs
% -img_recon: low resolution input image, single channel, e.g.
% illuminance
% -zooming: zooming factor, e.g. 3
% -patchsize: patch size for the low resolution image
% -overlap: overlap among patches, e.g. 1
% -Dh: dictionary for the high resolution patches
% -Dl: dictionary for the low resolution patches
% -regres: 'L1' use the sparse representation directly to high
% resolution dictionary;
% 'L2' use the supports found by sparse representation
% and apply least square regression coefficients to high
% resolution dictionary.
% Ouputs
% -img_hr: the recovered image, single channel
%
% Written by Jianchao Yang @ IFP UIUC
% April, 2009
% Webpage: http://www.ifp.illinois.edu/~jyang29/
% For any questions, please email me by [email protected]
%
% Reference
% Jianchao Yang, John Wright, Thomas Huang and Yi Ma. Image superresolution
% as sparse representation of raw image patches. IEEE Computer Society
% Conference on Computer Vision and Pattern Recognition (CVPR), 2008.
%
patcharea = patchsize^2;
[h_hr, w_hr] = size(img_recon);
hf1 = [-1,0,1];
vf1 = [-1,0,1]';
hf2 = [1,0,-2,0,1];
vf2 = [1,0,-2,0,1]';
grad1 = imfilter(img_recon,hf1,'conv','same','replicate');
grad2 = imfilter(img_recon,vf1,'conv','same','replicate');
grad3 = imfilter(img_recon,hf2,'conv','same','replicate');
grad4 = imfilter(img_recon,vf2,'conv','same','replicate');
gradall(:,:,1) = grad1;
gradall(:,:,2) = grad2;
gradall(:,:,3) = grad3;
gradall(:,:,4) = grad4;
gridx = 1:patchsize-overlap:w_hr-patchsize+1;
if gridx(end) ~= w_hr-patchsize+1
gridx = [gridx, w_hr-patchsize+1]; %fill the last one
end
gridy = 1:patchsize-overlap:h_hr-patchsize+1;
if gridy(end) ~= h_hr-patchsize+1
gridy = [gridy, h_hr-patchsize+1];
end
img_hr = zeros([h_hr, w_hr]);
filledmap = false(h_hr,w_hr);
reconfeature = zeros(h_hr,w_hr);
%the boundary
for cidx = 1:length(gridx) %cidx is the index of array, not the coordinate
sprintf('cidx = %d out of %d\n',cidx, length(gridx));
for ridx = 1:length(gridy)
c = gridx(cidx);
c1 = c + patchsize-1;
r = gridy(ridx);
r1 = r+ patchsize-1;
%mpatch_size: the patchsize * 2
patch_intensity = img_recon(r:r1, c:c1); %mIm: the middle image,
patch_intensity_mean = mean2(patch_intensity);
%here, the feature changes, not only the gradient, but also the filled HR intensity, too,
patch_feature = gradall(r:r1, c:c1, :);
vector_feature = patch_feature(:);
%consider the overlapped region
processregion_image = false(h_hr,w_hr);
processregion_image(r:r1,c:c1) = true;
overlapregion_image = filledmap & processregion_image;
nonoverlapregion_image = processregion_image & ~overlapregion_image;
overlapregion_patch = overlapregion_image(r:r1,c:c1);
overlapregion_itensity = img_hr(r:r1,c:c1);
expectedfeature_patch = overlapregion_itensity - patch_intensity_mean;
expectedfeature_overlap_linearize = expectedfeature_patch(overlapregion_patch);
nonoverlapregion_patch = true(patchsize) & ~overlapregion_patch;
%extract the intensity of filled hr
if nnz(overlapregion_image) == 0
overlapreconfeature = [];
Dh_partial = [];
else
overlapreconfeature = expectedfeature_overlap_linearize;
usedpixels = reshape(overlapregion_patch,[patcharea,1]);
Dh_partial = Dh(usedpixels,:);
end
%create the new y, new Dl, and new Dh, assuming beta is 1, the same as described in paper
y_concatenated = cat(1,vector_feature,overlapreconfeature);
Dl_concatenated = cat(1,Dl,Dh_partial);
Dh_concatenated = cat(1,Dh,Dh_partial);
%norm_y = sqrt(sum(mpatchfea.^2));
norm_concatenated = sqrt(sum(y_concatenated.^2));
if norm_concatenated > 1,
y_input = y_concatenated./norm_concatenated;
else
y_input = y_concatenated;
end;
w = SolveLasso(Dl_concatenated, y_input, size(Dl_concatenated, 2), 'lasso', [], lambda);
if isempty(w),
w = zeros(size(Dl, 2), 1);
end
if norm_concatenated > 1,
reconfeature_hr_concatenated = Dh_concatenated*w*norm_concatenated;
else
reconfeature_hr_concatenated = Dh*w; %this is the reconstructed
end
reconfeature_hr = reconfeature_hr_concatenated(1:patcharea);
patchdiff_hr = reshape(reconfeature_hr, [patchsize, patchsize]);
reconfeature(r:r1, c:c1) = patchdiff_hr;
reconintensity_hr = patchdiff_hr + patch_intensity_mean;
img_hr(nonoverlapregion_image) = reconintensity_hr(nonoverlapregion_patch);
filledmap(r:r1, c:c1) = 1;
end
end
fprintf('done!\n');
|
github
|
Liusifei/Face-Hallucination-master
|
immaxproduct.m
|
.m
|
Face-Hallucination-master/Code/Liu07IJCV/immaxproduct.m
| 4,991 |
utf_8
|
63134b108b43613ee53f95414d8be926
|
% max-product belief propagation on image lattice (2D matrix)
% This implementation contains no product. The compatibility function
% should be changed to exp{-E} where E is energy or error. Here E is the
% input. Output is the MAP estimation of the graph
%
% This implementation is based on W.T. Freeman et al's IJCV paper
% http://www.merl.com/reports/docs/TR2000-05.pdf
%
% Input arguments:
% CO: [nstates x height x width] -- energy function phi, connecting to
% the observation
% CM_h: [nstates x nstates x height x (width-1)]--compatibility function
% psi, connectiing horizontal neighbors. Each matrix is
% [index of left x index of right]
% CM_v: [nstates x nstates x (height-1) x width]--compatibility function
% psi, connectiing vertical neighbors. Each matrix is
% [index of top x index of bottom]
% nIerations: scalar-- the number of iterations in BP. The default value
% is max(height,width)/2
% alpha: scalar-- It's better to smooth the message update in each
% iteration. Weight alpha is used to weight the new
% messages, and (1-alpha) is to weight the old ones.
%
% Output arguments:
% IDX [height x width]-- Bayesian MAP estimation of the graph, each
% element is an integer between 1 and nstates
%
% Ce Liu
% CSAIL,MIT, [email protected]
% Feb, 2006
function [IDX,En]=immaxproduct(CO,CM_h,CM_v,nIterations,alpha)
% get the dimension of the
[nstates,height,width]=size(CO);
% sanity check for the dimensions
if size(CM_h)~=[nstates nstates height width-1]
error('The dimension of CM_h is incorrect!');
end
if size(CM_v)~=[nstates nstates height-1 width]
error('The dimension of CM_v is incorrect!');
end
% default values for nIerations and alpha
if exist('nIterations','var')~=1
nIterations=round(max(height,width)/2);
end
if exist('alpha','var')~=1
alpha=0.6;
end
% compatibility function psi has to be permuted for bottom to top and right
% to left.
CMtb=reshape(CM_v,nstates,nstates*(height-1)*width);
CMbt=reshape(permute(CM_v,[2,1,3,4]),nstates,nstates*(height-1)*width);
CMlr=reshape(CM_h,nstates,nstates*height*(width-1));
CMrl=reshape(permute(CM_h,[2,1,3,4]),nstates,nstates*height*(width-1));
% initialize messages
% Mtb: top to bottom
% Mbt: bottom to top
% Mlr: left to right
% Mrl: right to left
Mtb=zeros(nstates,(height-1),width);
Mbt=Mtb;
Mlr=zeros(nstates,height,width-1);
Mrl=Mlr;
[foo,IDX]=min(CO,[],1);
En(1)=imgraphen(IDX,CO,CM_h,CM_v);
for i=1:nIterations
% update message from top to bottom
Mtb1=zeros(nstates,height-1,width);
Mtb1(:,2:end,:)=Mtb1(:,2:end,:)+Mtb(:,1:end-1,:);
Mtb1(:,:,1:end-1)=Mtb1(:,:,1:width-1)+Mrl(:,1:end-1,:);
Mtb1(:,:,2:end)=Mtb1(:,:,2:end)+Mlr(:,1:end-1,:);
Mtb1=Mtb1+CO(:,1:end-1,:);
Mtb1=kron(reshape(Mtb1,nstates,(height-1)*width),ones(1,nstates))+CMtb;
Mtb1=reshape(min(Mtb1,[],1),[nstates,height-1,width]);
% update message from bottom to top
Mbt1=zeros(nstates,height-1,width);
Mbt1(:,1:end-1,:)=Mbt1(:,1:end-1,:)+Mbt(:,2:end,:);
Mbt1(:,:,1:end-1)=Mbt1(:,:,1:end-1)+Mrl(:,2:end,:);
Mbt1(:,:,2:end)=Mbt1(:,:,2:end)+Mlr(:,2:end,:);
Mbt1=Mbt1+CO(:,2:end,:);
Mbt1=kron(reshape(Mbt1,nstates,(height-1)*width),ones(1,nstates))+CMbt;
Mbt1=reshape(min(Mbt1,[],1),[nstates,height-1,width]);
% update message from left to right
Mlr1=zeros(nstates,height,width-1);
Mlr1(:,:,2:end)=Mlr1(:,:,2:end)+Mlr(:,:,1:end-1);
Mlr1(:,1:end-1,:)=Mlr1(:,1:end-1,:)+Mbt(:,:,1:end-1);
Mlr1(:,2:end,:)=Mlr1(:,2:end,:)+Mtb(:,:,1:end-1);
Mlr1=Mlr1+CO(:,:,1:end-1);
Mlr1=kron(reshape(Mlr1,nstates,height*(width-1)),ones(1,nstates))+CMlr;
Mlr1=reshape(min(Mlr1,[],1),[nstates,height,width-1]);
% update message from right to left
Mrl1=zeros(nstates,height,width-1);
Mrl1(:,:,1:end-1)=Mrl1(:,:,1:end-1)+Mrl(:,:,2:end);
Mrl1(:,1:end-1,:)=Mrl1(:,1:end-1,:)+Mbt(:,:,2:end);
Mrl1(:,2:end,:)=Mrl1(:,2:end,:)+Mtb(:,:,2:end);
Mrl1=Mrl1+CO(:,:,2:end);
Mrl1=kron(reshape(Mrl1,nstates,height*(width-1)),ones(1,nstates))+CMrl;
Mrl1=reshape(min(Mrl1,[],1),[nstates,height,width-1]);
% reassign message
Mtb=Mtb1*alpha+Mtb*(1-alpha);
Mbt=Mbt1*alpha+Mbt*(1-alpha);
Mlr=Mlr1*alpha+Mlr*(1-alpha);
Mrl=Mrl1*alpha+Mrl*(1-alpha);
% Bayesian MAP inference
M=zeros(nstates,height,width);
M(:,2:end,:)=M(:,2:end,:)+Mtb;
M(:,1:end-1,:)=M(:,1:end-1,:)+Mbt;
M(:,:,2:end)=M(:,:,2:end)+Mlr;
M(:,:,1:end-1)=M(:,:,1:end-1)+Mrl;
M=M+CO;
[foo,IDX]=min(M,[],1);
En(i+1)=imgraphen(IDX,CO,CM_h,CM_v);
end
%figure;plot(En);
% step 2. Bayesian MAP inference
M=zeros(nstates,height,width);
M(:,2:end,:)=M(:,2:end,:)+Mtb;
M(:,1:end-1,:)=M(:,1:end-1,:)+Mbt;
M(:,:,2:end)=M(:,:,2:end)+Mlr;
M(:,:,1:end-1)=M(:,:,1:end-1)+Mrl;
M=M+CO;
[foo,IDX]=min(M,[],1);
IDX=squeeze(IDX);
|
github
|
Liusifei/Face-Hallucination-master
|
Sigma2Kernel.m
|
.m
|
Face-Hallucination-master/Code/Liu07IJCV/Sigma2Kernel.m
| 154 |
utf_8
|
139abf654633c4b0fd0c1146ade54f2c
|
%10/23/11
function Kernel = Sigma2Kernel(Gau_sigma)
KernelSize = ceil(Gau_sigma * 3)*2+1;
Kernel = fspecial('gaussian',KernelSize,Gau_sigma);
end
|
github
|
Liusifei/Face-Hallucination-master
|
F2_FindSimilarPatch_CSH.m
|
.m
|
Face-Hallucination-master/Code/Liu07IJCV/F2_FindSimilarPatch_CSH.m
| 3,398 |
utf_8
|
b326c59409096453749c6affa18ca294
|
%Chih-Yuan Yang, EECS, UC Merced
%Last Modified: 08/23/12
%Find similar patches form many images using CSH
%Question: I do not need to use all patches cropped from the query image.
%Only parts are required. How to handle it?
%However, do not consider it now.
%And what I need are the coordinate and norm, not the patch. How to do it?
function F2_FindSimilarPatch_CSH(queryimage,middlebandimage)
addpath('Utility');
addpath(genpath(fullfile('Lib','CSH_code_v2')));
%set randseed
seed = RandStream('mcg16807','Seed',0);
RandStream.setGlobalStream(seed)
im8 = imread( para.SourceFile);
im8y = rgb2gray(im8);
%find the CSH nn
width = ps; %should I use 4 or 8? Problem: CSH does not support patch size as 6
iteration = 5;
nnk = 20;
[lh lw d] = size(im8);
recin = 20;
normcurr = zeros(lh,lw,nnk);
A = im8y;
ps = width;
eh = lh-ps+1;
ew = lw-ps+1; %effective w
scanr = zeros(eh,ew,recin,4); %scan results, norm, ii, sr, sc
bigvalue = 255*width*width;
scanr(:,:,:,1) = bigvalue;
iistart = para.iistart;
iiend = para.iiend;
for ii=iistart:iiend
%if ii == 2
% keyboard
%end
fn = sprintf('%05d.png',ii);
fprintf('csh fn: %s\n',fn);
ime = imread(fullfile(DatasetFolder,fn)); %the channel number is 1
B = ime;
idxhead = (ii-1)*nnk+1;
idxend = idxhead + nnk-1;
retres = CSH_nn(A,B,width,iteration,nnk); %x,y <==> c,r $retrived results
%dimension: w,h,2,nnk
for l = 1:nnk
colMap = retres(:,:,1,l);
rowMap = retres(:,:,2,l);
br_boundary_to_ignore = width -1;
%GetAnnError_GrayLevel_C1 is a funciton in CHS lab. It can compute very fast
normcurr(:,:,l) = GetAnnError_GrayLevel_C1(A,B,uint16(rowMap),uint16(colMap),uint16(0),uint16(br_boundary_to_ignore), uint16(width));
end
%update scanr
normcurrmin = min(normcurr,[],3);
checkmap = normcurrmin(1:eh,1:ew) < scanr(:,:,recin,1); %the last one has the largest norm
[rset cset] = find(checkmap);
setin = length(rset);
for j=1:setin
rl = rset(j);
cl = cset(j);
[normcurrsort ixcurr] = sort(normcurr(rl,cl,:));
for i=1:nnk
%update the smaller norm
compidx = recin-i+1;
if normcurrsort(i) < scanr(rl,cl,compidx,1)
%update
oriidx = ixcurr(i);
scanr(rl,cl,compidx,1) = normcurrsort(i);
scanr(rl,cl,compidx,2) = ii;
scanr(rl,cl,compidx,3) = retres(rl,cl,2,oriidx); %rowmap
scanr(rl,cl,compidx,4) = retres(rl,cl,1,oriidx); %colmap
else
break
end
end
%sort again the updated data
[normnewsort ixnew] = sort(scanr(rl,cl,:,1));
tempdata = scanr(rl,cl,:,:);
for i=1:recin
if ixnew(i) ~= i
scanr(rl,cl,i,:) = tempdata(1,1,ixnew(i),:);
end
end
end
end
sn = sprintf('%s_csh_scanr_%d_%d.mat',para.SaveName,iistart,iiend);
save(fullfile(para.tuningfolder,sn),'scanr');
end
|
github
|
Liusifei/Face-Hallucination-master
|
imgraphen.m
|
.m
|
Face-Hallucination-master/Code/Liu07IJCV/imgraphen.m
| 505 |
utf_8
|
fcf09b9d17018fce6c5fefc7e27f85ce
|
% function to compute energy of the graph defiend on image lattice
function E=imgraphen(IDX,CO,CM_h,CM_v)
if length(size(IDX))==2
[nh,nw]=size(IDX);
IDX=reshape(IDX,[1 nh nw]);
end
E=CO(IDX);
E=sum(E(:));
IDX=squeeze(IDX);
[nh,nw]=size(IDX);
for i=1:nh
for j=1:nw
% horizontal energy
p=IDX(i,j);
if j<nw
q=IDX(i,j+1);
E=E+CM_h(p,q,i,j);
end
if i<nh
q=IDX(i+1,j);
E=E+CM_v(p,q,i,j);
end
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
PP5_GenerateLRImage.m
|
.m
|
Face-Hallucination-master/Code/Liu07IJCV/PP5_GenerateLRImage.m
| 1,026 |
utf_8
|
c33aa33f548f868504cdf06cbc52b199
|
%08/22/12
%Compute PCA
function PP5_GenerateLRImage()
%for test images
%sourcefolder = fullfile('Examples','TestFaces_Color');
%dstfolder = fullfile('Source','TestFaces');
%GenerateLRImagesWholeFolder(sourcefolder,dstfolder);
%for training images;
sourcefolder = fullfile('Examples','TrainingFaces_Gray');
dstfolder = fullfile('Examples','TrainingFaces_LRGray');
GenerateLRImagesWholeFolder(sourcefolder,dstfolder);
end
function GenerateLRImagesWholeFolder(sourcefolder,dstfolder)
if ~exist(dstfolder,'dir')
mkdir(dstfolder);
end
filelist = dir(fullfile(sourcefolder,'*.png'));
filenumber = length(filelist);
for i=1:filenumber
fn_image = filelist(i).name;
% fn_image_short = fn_image(1:end-4);
img_hr = im2double(imread(fullfile(sourcefolder,fn_image)));
img_lr = U3_GenerateLRImage_BlurSubSample(img_hr,4,1.6);
% fn_save = sprintf('%s.png',fn_image);
imwrite( img_lr, fullfile(dstfolder,fn_image));
end
end
|
github
|
Liusifei/Face-Hallucination-master
|
mergePatches.m
|
.m
|
Face-Hallucination-master/Code/Liu07IJCV/mergePatches.m
| 1,075 |
utf_8
|
8d3cffb2b6e9bb2b635f4cf5851a5680
|
% function to merge all the patches onto one image
function im = mergePatches(Patches,overlapSize,width,height)
[nDim,h,w]=size(Patches);
patchDim = sqrt(nDim);
patchSize = (patchDim-1)/2;
intervalSize = patchSize*2-overlapSize;
% set the dimension for the output image
if exist('width','var')~=1
width = patchDim*w - (overlapSize+1)*(w-1);
end
if exist('height','var')~=1
height = patchDim*h - (overlapSize+1)*(h-1);
end
im = zeros([height,width]);
weight = zeros([height,width]);
% set the individual mask
foo = linspace(0,1,overlapSize+2);
foo = foo(2:end-1);
mask = [foo ones(1,patchDim-overlapSize*2) foo(end:-1:1)];
mask = kron(mask,mask');
for i=1:h
for j=1:w
x = (j-1)*intervalSize+patchSize+1;
y = (i-1)*intervalSize+patchSize+1;
xindex = x-patchSize:x+patchSize;
yindex = y-patchSize:y+patchSize;
im(yindex,xindex) = im(yindex,xindex) + reshape(Patches(:,i,j),[patchDim,patchDim]).*mask;
weight(yindex,xindex) = weight(yindex,xindex) + mask;
end
end
index = (im==0);
im = im./weight;
im(index)=0;
|
github
|
Liusifei/Face-Hallucination-master
|
U2_GenerateImagebbnn.m
|
.m
|
Face-Hallucination-master/Code/Liu07IJCV/U2_GenerateImagebbnn.m
| 894 |
utf_8
|
5944a59cbd22e978e8e8c8f45d36d84b
|
%date of last edit: 08/10/12, rename Zooming to zooming
function U2_GenerateImagebbnn(img_y, para)
s = para.zooming;
imb255 = imresize(img_y,s) * 255;
imgnn= imresize(img_y,s,'nearest');
imwrite(imb255/255,fullfile(para.tuningfolder,[para.SaveName '_bb.png']));
imwrite(imgnn,fullfile(para.tuningfolder,[para.SaveName '_nn.png']));
imgbb = imresize(img_y,s);
imgbb(imgbb > 1) = 1;
imgbb(imgbb < 0) = 0;
img_out_yiq = imgbb;
img_out_yiq(:,:,2:3) = para.IQLayer_upsampled;
img_out_rgb = YIQ2RGB(img_out_yiq);
fn = sprintf('%s_bbcolor.png',para.SaveName);
imwrite(img_out_rgb, fullfile(para.tuningfolder, fn));
img_out_yiq = imgnn;
img_out_yiq(:,:,2:3) = para.IQLayer_upsampled;
img_out_rgb = YIQ2RGB(img_out_yiq);
fn = sprintf('%s_nncolor.png',para.SaveName);
imwrite(img_out_rgb, fullfile(para.tuningfolder, fn));
end
|
github
|
Liusifei/Face-Hallucination-master
|
F15_ComputePSNR_RMSE_SSIM_DIIVINE.m
|
.m
|
Face-Hallucination-master/Code/Liu07IJCV/F15_ComputePSNR_RMSE_SSIM_DIIVINE.m
| 1,065 |
utf_8
|
b4057938b919964cf07810f9cf5fca3d
|
%08/09/12
%Chih-Yuan Yang, EECS, UC Merced
%Compute PSNR, SSIM, DIVINE
%If you encounter a MATLAB error: Undefined function 'buildSFpyr' for input arguments of type 'double'.
%You need to install libraries which are dependencies of DIIVINE
%Steerable Pyramid Toolbox, Download from: http://www.cns.nyu.edu/~eero/steerpyr/
% action ==>compile mex in MEX subfolder, copy the pointOp.mexw64 to matlabPyrTools folder
% ==>addpath('matlabPyrTools')
%LibSVM package for MATLAB, Download from: http://www.csie.ntu.edu.tw/~cjlin/libsvm/
function [PSNR SSIM DIIVINE] = F15_ComputePSNR_RMSE_SSIM_DIIVINE(img_test, img_gt, para)
%in the future, change the pathes of lib by para
addpath(fullfile('Lib','SSIM'))
addpath(genpath(fullfile('Lib','matlabPyrTools')));
addpath(genpath(fullfile('Lib','libsvm-3.12')));
addpath(fullfile('Lib','DIIVINE')) %DIIVINE
img_test255 = img_test * 255;
img_gt255 = img_gt * 255;
PSNR = measerr(img_gt255, img_test255);
SSIM = ssim( img_gt255, img_test255);
DIIVINE = divine(img_test255);
end
|
github
|
tanvir002700/Coursera-Introduction-to-Programming-with-MATLAB-master
|
roman2.m
|
.m
|
Coursera-Introduction-to-Programming-with-MATLAB-master/Lab08/roman2.m
| 1,861 |
utf_8
|
a5646f76cbee04af3dd08f0530e14808
|
function A = roman2 (R)
% This function initially assumes the supplied input is valid. If it is not valid,
% the result, when converted back to Roman, will differ from the original input.
Roman = 'IVXLC';
Arabic = {1 5 10 50 100};
LastValue = 0; % V is value, LastValue is last V
A = uint16(0);
for k = length(R):-1:1 % scan backward from last character
P = strfind(Roman,R(k)); % search list of valid Roman characters
if isempty(P) % if invalid
V = 0; % value is zero
else % else
V = Arabic{P}; % value is Arabic equivalent
end
if V<LastValue % if subtractive situation
A = A-V; % subtract this value
else % else
A = A+V; % add this value
end % (in either case, V=0 did nothing)
LastValue = V; % update last value used
end
if A>=400 || ~strcmp(R,A2R(A)) % if out of range or result does
A = uint16(0); % not generate original string
end % send back zero
end
% convert Arabic to Roman
function R = A2R (A)
% Remove subtraction by including secondary moduli.
Roman = {'I' 'IV' 'V' 'IX' 'X' 'XL' 'L' 'XC' 'C'};
Arabic = {1 4 5 9 10 40 50 90 100};
R = ''; k = 9;
while k>0 % remove larger moduli first
if A>=Arabic{k} % if value is at least current modulus
A = A-Arabic{k}; % remove modulus from value
R = [R Roman{k}]; % append Roman character
else % else
k = k-1; % consider next smaller modulus
end
end
end
|
github
|
libDirectional/libDirectional-master
|
mvnpdfbench.m
|
.m
|
libDirectional-master/examples/mvnpdfbench.m
| 2,336 |
utf_8
|
aaba49ec24c7c9856decfeeb2472618e
|
% This function performs a benchmark of the mvnpdf and the mvnpdffast
% functions.
function mvnpdfbench
figure(1)
benchmark();
figure(2)
benchmarkSingle();
%bench(1) ;
%benchSingle(1);
end
function benchmark
dims = 1:20;
evals = 1000000;
repeats = 10;
t1 = zeros(1,repeats);
t2 = zeros(1,repeats);
times = zeros(2, length(dims));
for i = dims
for j=1:repeats
[t1(j), t2(j)]= bench(i, evals);
end
times (1,i) = median(t1);
times (2,i) = median(t2);
end
benchPlot(dims, times, sprintf('time for %i evaluations with one call', evals))
end
function benchmarkSingle
dims = 1:20;
evals = 1000;
repeats = 10;
t1 = zeros(1,repeats);
t2 = zeros(1,repeats);
times = zeros(2, length(dims));
for i = dims
for j=1:repeats
[t1(j), t2(j)]= benchSingle(i, evals);
end
times (1,i) = median(t1);
times (2,i) = median(t2);
end
benchPlot(dims, times, sprintf('time for %i evaluations with individual calls', evals))
end
function benchPlot(dims, times, titletext)
subplot(2,1,1);
plot(dims, times(1,:));
hold on
plot(dims, times(2,:));
hold off
legend('mvnpdf', 'mvnpdffast', 'location', 'northwest')
xlabel('dimension');
ylabel('time (s)');
title(titletext);
subplot(2,1,2);
plot(dims, times(1,:)./times(2,:));
hold on
plot(dims, times(1,:)./times(1,:), 'b--');
hold off
xlabel('dimension');
ylabel('speedup');
end
function [t1, t2]= bench(n, evals)
% n-D
rng default
mu = rand(1,n);
C = rand(n,n);
C=C*C';
x = rand(evals,n);
tic
r = mvnpdf(x, mu, C); %#ok<NASGU>
t1 = toc;
tic
r = mvnpdffast(x, mu, C); %#ok<NASGU>
t2 = toc;
fprintf('%fs\n%fs\n', t1, t2);
end
function [t1, t2]= benchSingle(n,evals)
% n-D
rng default
mu = rand(1,n);
C = rand(n,n);
C=C*C';
x = rand(evals,n);
tic
for i=1:length(x)
r = mvnpdf(x, mu, C); %#ok<NASGU>
end
t1 = toc;
tic
for i=1:length(x)
r = mvnpdffast(x, mu, C); %#ok<NASGU>
end
t2 = toc;
fprintf('%fs\n%fs\n', t1, t2);
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.