8testiaa4 / Scripts /sbi_generator.py
ismot's picture
Duplicate from pyesonekyaw/faceforgerydetection
2f99bb4
from imutils import face_utils
import numpy as np
import random
import albumentations as alb
from .DeepFakeMask import dfl_full, extended, components, facehull
import cv2
def IoUfrom2bboxes(boxA, boxB):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
def reorder_landmark(landmark):
landmark_add = np.zeros((13, 2))
for idx, idx_l in enumerate([77, 75, 76, 68, 69, 70, 71, 80, 72, 73, 79, 74, 78]):
landmark_add[idx] = landmark[idx_l]
landmark[68:] = landmark_add
return landmark
def get_dlib_landmarks(inp, dlib_face_detector, dlib_face_predictor):
faces = dlib_face_detector(inp, 1)
if len(faces)==0:
raise Exception("No faces detected")
landmarks=[]
size_list=[]
for face_idx in range(len(faces)):
landmark = dlib_face_predictor(inp, faces[face_idx])
landmark = face_utils.shape_to_np(landmark)
x0,y0=landmark[:,0].min(),landmark[:,1].min()
x1,y1=landmark[:,0].max(),landmark[:,1].max()
face_s=(x1-x0)*(y1-y0)
size_list.append(face_s)
landmarks.append(landmark)
landmarks=np.concatenate(landmarks).reshape((len(size_list),)+landmark.shape)
landmarks=landmarks[np.argsort(np.array(size_list))[::-1]]
return landmarks
def get_retina_bbox(inp,face_detector):
faces = face_detector.predict_jsons(inp)
landmarks=[]
size_list=[]
for face_idx in range(len(faces)):
x0,y0,x1,y1=faces[face_idx]['bbox']
landmark=np.array([[x0,y0],[x1,y1]]+faces[face_idx]['landmarks'])
face_s=(x1-x0)*(y1-y0)
size_list.append(face_s)
landmarks.append(landmark)
landmarks=np.concatenate(landmarks).reshape((len(size_list),)+landmark.shape)
landmarks=landmarks[np.argsort(np.array(size_list))[::-1]]
return landmarks
def random_get_hull(landmark,img, face_region):
face_region = int(face_region)
if face_region == 1:
mask = dfl_full(landmarks=landmark.astype('int32'),face=img, channels=3).mask
elif face_region == 2:
mask = extended(landmarks=landmark.astype('int32'),face=img, channels=3).mask
elif face_region == 3:
mask = components(landmarks=landmark.astype('int32'),face=img, channels=3).mask
else:
mask = facehull(landmarks=landmark.astype('int32'),face=img, channels=3).mask
return mask/255
class RandomDownScale(alb.core.transforms_interface.ImageOnlyTransform):
def apply(self,img,**params):
return self.randomdownscale(img)
def randomdownscale(self,img):
keep_ratio=True
keep_input_shape=True
H,W,C=img.shape
ratio_list=[2,4]
r=ratio_list[np.random.randint(len(ratio_list))]
img_ds=cv2.resize(img,(int(W/r),int(H/r)),interpolation=cv2.INTER_NEAREST)
if keep_input_shape:
img_ds=cv2.resize(img_ds,(W,H),interpolation=cv2.INTER_LINEAR)
return img_ds
def get_source_transforms():
return alb.Compose([
alb.Compose([
alb.RGBShift((-20, 20), (-20, 20), (-20, 20), p=0.3),
alb.HueSaturationValue(
hue_shift_limit=(-0.3, 0.3), sat_shift_limit=(-0.3, 0.3), val_shift_limit=(-0.3, 0.3), p=1),
alb.RandomBrightnessContrast(
brightness_limit=(-0.1, 0.1), contrast_limit=(-0.1, 0.1), p=1),
], p=1),
alb.OneOf([
RandomDownScale(p=1),
alb.Sharpen(alpha=(0.2, 0.5), lightness=(0.5, 1.0), p=1),
], p=1),
], p=1.)
def randaffine(img, mask):
f = alb.Affine(
translate_percent={'x': (-0.03, 0.03), 'y': (-0.015, 0.015)},
scale=[0.95, 1/0.95],
fit_output=False,
p=1)
g = alb.ElasticTransform(
alpha=50,
sigma=7,
alpha_affine=0,
p=1,
)
transformed = f(image=img, mask=mask)
img = transformed['image']
mask = transformed['mask']
transformed = g(image=img, mask=mask)
mask = transformed['mask']
return img, mask
def get_blend_mask(mask):
H,W=mask.shape
size_h=np.random.randint(192,257)
size_w=np.random.randint(192,257)
mask=cv2.resize(mask,(size_w,size_h))
kernel_1=random.randrange(5,26,2)
kernel_1=(kernel_1,kernel_1)
kernel_2=random.randrange(5,26,2)
kernel_2=(kernel_2,kernel_2)
mask_blured = cv2.GaussianBlur(mask, kernel_1, 0)
mask_blured = mask_blured/(mask_blured.max())
mask_blured[mask_blured<1]=0
mask_blured = cv2.GaussianBlur(mask_blured, kernel_2, np.random.randint(5,46))
mask_blured = mask_blured/(mask_blured.max())
mask_blured = cv2.resize(mask_blured,(W,H))
return mask_blured.reshape((mask_blured.shape+(1,)))
def dynamic_blend(source,target,mask,blending_type, mixup_ratio=[0.25,0.5,0.75,1,1,1]):
"""Performs dynamic blending of source and target, using the mask as the blending region
Args:
source: source image
target: target image
mask: mask image
Returns:
img_blended: blended image
mask_blurred: augmented mask used for blending
"""
mask_blured = get_blend_mask(mask)
mask_blured_copy = mask_blured.copy()
if blending_type == "Poisson":
# Poisson blending
b_mask = (mask_blured_copy * 255).astype(np.uint8)
l, t, w, h = cv2.boundingRect(b_mask)
center = (int(l + w / 2), int(t + h / 2))
img_blended = cv2.seamlessClone(source, target, b_mask, center, cv2.NORMAL_CLONE)
else:
# Mix up blending
blend_list=mixup_ratio
blend_ratio = blend_list[np.random.randint(len(blend_list))]
mask_blured_copy = mask_blured.copy()
mask_blured_copy*=blend_ratio
img_blended=(mask_blured_copy * source + (1 - mask_blured_copy) * target)
return img_blended,mask_blured
def get_transforms():
return alb.Compose([
alb.RGBShift((-20, 20), (-20, 20), (-20, 20), p=0.3),
alb.HueSaturationValue(
hue_shift_limit=(-0.3, 0.3), sat_shift_limit=(-0.3, 0.3), val_shift_limit=(-0.3, 0.3), p=0.3),
alb.RandomBrightnessContrast(
brightness_limit=(-0.3, 0.3), contrast_limit=(-0.3, 0.3), p=0.3),
alb.ImageCompression(quality_lower=40, quality_upper=100, p=0.5),
],
additional_targets={f'image1': 'image'},
p=1.)
def self_blending(img, landmark, blending_type, face_region):
if np.random.rand() < 0.25:
landmark = landmark[:68]
mask = random_get_hull(landmark, img, face_region)
if mask.shape[-1] == 3:
mask = mask[:, :, 0]
mask_copy = mask
source_transforms = get_source_transforms()
source = img.copy()
source = source_transforms(image=source.astype(np.uint8))['image']
source_before_affine_transforms, mask_before_affine_transforms = source, mask
source, mask = randaffine(source, mask)
source_after_affine_transforms, mask_after_affine_transforms = source, mask
img_blended, mask = dynamic_blend(source, img, mask, blending_type)
img_blended = img_blended.astype(np.uint8)
img = img.astype(np.uint8)
return img, img_blended, mask, mask_copy, source_before_affine_transforms, mask_before_affine_transforms, source_after_affine_transforms, mask_after_affine_transforms