Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -24,7 +24,7 @@ except (ImportError, AttributeError):
|
|
24 |
|
25 |
|
26 |
|
27 |
-
def get_face_mask_box(img,
|
28 |
h, w = img.shape[:2]
|
29 |
mask = np.zeros((h, w), dtype=np.uint8)
|
30 |
results = face_mesh.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
|
@@ -34,16 +34,18 @@ def get_face_mask_box(img, feather, padding=0):
|
|
34 |
hull = cv2.convexHull(pts)
|
35 |
cv2.fillConvexPoly(mask, hull, 255)
|
36 |
x, y, bw, bh = cv2.boundingRect(hull)
|
37 |
-
#
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
42 |
mask_roi = mask[y_pad:y2, x_pad:x2]
|
43 |
-
# inside feather
|
44 |
-
if
|
45 |
-
k = int(
|
46 |
-
|
|
|
47 |
return mask_roi, (x_pad, y_pad, x2 - x_pad, y2 - y_pad)
|
48 |
|
49 |
|
@@ -166,14 +168,14 @@ def morph_faces(img1, img2, alpha, dim, step):
|
|
166 |
return (out*255).astype(np.uint8)
|
167 |
|
168 |
|
169 |
-
def process_video(video_path, ref_img, trans, res, step,
|
170 |
cap = cv2.VideoCapture(video_path)
|
171 |
fps = cap.get(cv2.CAP_PROP_FPS) or 24
|
172 |
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
173 |
|
174 |
# Prepare masked reference
|
175 |
ref_bgr = cv2.cvtColor(ref_img, cv2.COLOR_RGB2BGR)
|
176 |
-
mask_ref, ref_box = get_face_mask_box(ref_bgr,
|
177 |
if mask_ref is None:
|
178 |
return None, None, None, None
|
179 |
x_r, y_r, w_r, h_r = ref_box
|
@@ -196,26 +198,21 @@ def process_video(video_path, ref_img, trans, res, step, feather, padding):
|
|
196 |
for i in range(total):
|
197 |
ret, frame = cap.read()
|
198 |
if not ret: break
|
199 |
-
mask_roi, box = get_face_mask_box(frame,
|
200 |
if mask_roi is None:
|
201 |
out_vid.write(frame)
|
202 |
continue
|
203 |
x, y, w, h = box
|
204 |
-
# Crop and resize original ROI
|
205 |
crop = frame[y:y+h, x:x+w]
|
206 |
crop_resized = cv2.resize(crop, (res, res))
|
207 |
-
# Morph
|
208 |
alpha = float(np.clip((trans+1)/2, 0, 1))
|
209 |
mor = morph_faces(crop_resized, ref_morph, alpha, res, step)
|
210 |
-
# Store first
|
211 |
if i == 0:
|
212 |
first_crop = crop_resized.copy()
|
213 |
first_ref = ref_morph.copy()
|
214 |
first_mask = cv2.resize(mask_roi, (res, res), interpolation=cv2.INTER_LINEAR)
|
215 |
first_morphed = mor.copy()
|
216 |
-
# Resize morphed back
|
217 |
mor_back = cv2.resize(mor, (w, h))
|
218 |
-
# Composite with shape mask
|
219 |
mask_n = (mask_roi.astype(np.float32)[..., None] / 255.0)
|
220 |
region = frame[y:y+h, x:x+w].astype(np.float32)
|
221 |
blended = region * (1-mask_n) + mor_back.astype(np.float32) * mask_n
|
@@ -224,13 +221,12 @@ def process_video(video_path, ref_img, trans, res, step, feather, padding):
|
|
224 |
|
225 |
cap.release(); out_vid.release()
|
226 |
|
227 |
-
# Apply mask to first_morphed for preview
|
228 |
if first_morphed is not None and first_mask is not None:
|
229 |
mask_n0 = first_mask.astype(np.float32)[..., None] / 255.0
|
230 |
first_morphed = (first_morphed.astype(np.float32) * mask_n0).astype(np.uint8)
|
231 |
else:
|
232 |
-
first_morphed = np.zeros((res, res,
|
233 |
-
first_crop = first_crop if first_crop is not None else np.zeros((res, res,3),np.uint8)
|
234 |
first_ref = first_ref if first_ref is not None else ref_morph.copy()
|
235 |
|
236 |
# Convert for Gradio
|
@@ -239,15 +235,15 @@ def process_video(video_path, ref_img, trans, res, step, feather, padding):
|
|
239 |
# --- Gradio App ---
|
240 |
css = """video, img { object-fit: contain !important; }"""
|
241 |
with gr.Blocks(css=css) as iface:
|
242 |
-
gr.Markdown("# Morph with Face-Shaped Composite and Padding")
|
243 |
with gr.Row():
|
244 |
vid = gr.Video(label='Input Video')
|
245 |
ref = gr.Image(type='numpy', label='Reference Image')
|
246 |
with gr.Row():
|
247 |
res = gr.Dropdown([256,384,512,768], value=512, label='Resolution')
|
248 |
step = gr.Slider(1,4,value=4,step=1,label='Landmark Sub-sampling')
|
249 |
-
feather = gr.Slider(0,
|
250 |
-
padding = gr.Slider(0,
|
251 |
trans = gr.Slider(-1.0,1.0,value=-0.35,step=0.05,label='Transition Level')
|
252 |
btn = gr.Button('Generate Morph π')
|
253 |
out_vid = gr.Video(label='Morphed Video')
|
@@ -261,7 +257,7 @@ with gr.Blocks(css=css) as iface:
|
|
261 |
outputs=[out_vid,out_crop,out_ref,out_morph],
|
262 |
show_progress=True
|
263 |
)
|
264 |
-
gr.Markdown("---\n*
|
265 |
|
266 |
if __name__=='__main__':
|
267 |
iface.launch(debug=True)
|
|
|
24 |
|
25 |
|
26 |
|
27 |
+
def get_face_mask_box(img, feather_pct, padding_pct):
|
28 |
h, w = img.shape[:2]
|
29 |
mask = np.zeros((h, w), dtype=np.uint8)
|
30 |
results = face_mesh.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
|
|
|
34 |
hull = cv2.convexHull(pts)
|
35 |
cv2.fillConvexPoly(mask, hull, 255)
|
36 |
x, y, bw, bh = cv2.boundingRect(hull)
|
37 |
+
# calculate padding and feather in pixels
|
38 |
+
pad = int(max(bw, bh) * padding_pct)
|
39 |
+
x_pad = max(x - pad, 0)
|
40 |
+
y_pad = max(y - pad, 0)
|
41 |
+
x2 = min(x + bw + pad, w)
|
42 |
+
y2 = min(y + bh + pad, h)
|
43 |
mask_roi = mask[y_pad:y2, x_pad:x2]
|
44 |
+
# inside feather: kernel proportional to face size
|
45 |
+
if feather_pct > 0 and mask_roi.size > 0:
|
46 |
+
k = int(min(mask_roi.shape[0], mask_roi.shape[1]) * feather_pct)
|
47 |
+
if k % 2 == 0: k += 1
|
48 |
+
mask_roi = cv2.GaussianBlur(mask_roi, (k, k), 0)
|
49 |
return mask_roi, (x_pad, y_pad, x2 - x_pad, y2 - y_pad)
|
50 |
|
51 |
|
|
|
168 |
return (out*255).astype(np.uint8)
|
169 |
|
170 |
|
171 |
+
def process_video(video_path, ref_img, trans, res, step, feather_pct, padding_pct):
|
172 |
cap = cv2.VideoCapture(video_path)
|
173 |
fps = cap.get(cv2.CAP_PROP_FPS) or 24
|
174 |
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
175 |
|
176 |
# Prepare masked reference
|
177 |
ref_bgr = cv2.cvtColor(ref_img, cv2.COLOR_RGB2BGR)
|
178 |
+
mask_ref, ref_box = get_face_mask_box(ref_bgr, feather_pct, padding_pct)
|
179 |
if mask_ref is None:
|
180 |
return None, None, None, None
|
181 |
x_r, y_r, w_r, h_r = ref_box
|
|
|
198 |
for i in range(total):
|
199 |
ret, frame = cap.read()
|
200 |
if not ret: break
|
201 |
+
mask_roi, box = get_face_mask_box(frame, feather_pct, padding_pct)
|
202 |
if mask_roi is None:
|
203 |
out_vid.write(frame)
|
204 |
continue
|
205 |
x, y, w, h = box
|
|
|
206 |
crop = frame[y:y+h, x:x+w]
|
207 |
crop_resized = cv2.resize(crop, (res, res))
|
|
|
208 |
alpha = float(np.clip((trans+1)/2, 0, 1))
|
209 |
mor = morph_faces(crop_resized, ref_morph, alpha, res, step)
|
|
|
210 |
if i == 0:
|
211 |
first_crop = crop_resized.copy()
|
212 |
first_ref = ref_morph.copy()
|
213 |
first_mask = cv2.resize(mask_roi, (res, res), interpolation=cv2.INTER_LINEAR)
|
214 |
first_morphed = mor.copy()
|
|
|
215 |
mor_back = cv2.resize(mor, (w, h))
|
|
|
216 |
mask_n = (mask_roi.astype(np.float32)[..., None] / 255.0)
|
217 |
region = frame[y:y+h, x:x+w].astype(np.float32)
|
218 |
blended = region * (1-mask_n) + mor_back.astype(np.float32) * mask_n
|
|
|
221 |
|
222 |
cap.release(); out_vid.release()
|
223 |
|
|
|
224 |
if first_morphed is not None and first_mask is not None:
|
225 |
mask_n0 = first_mask.astype(np.float32)[..., None] / 255.0
|
226 |
first_morphed = (first_morphed.astype(np.float32) * mask_n0).astype(np.uint8)
|
227 |
else:
|
228 |
+
first_morphed = np.zeros((res, res,3),dtype=np.uint8)
|
229 |
+
first_crop = first_crop if first_crop is not None else np.zeros((res, res,3),dtype=np.uint8)
|
230 |
first_ref = first_ref if first_ref is not None else ref_morph.copy()
|
231 |
|
232 |
# Convert for Gradio
|
|
|
235 |
# --- Gradio App ---
|
236 |
css = """video, img { object-fit: contain !important; }"""
|
237 |
with gr.Blocks(css=css) as iface:
|
238 |
+
gr.Markdown("# Morph with Face-Shaped Composite and Padding Percentage")
|
239 |
with gr.Row():
|
240 |
vid = gr.Video(label='Input Video')
|
241 |
ref = gr.Image(type='numpy', label='Reference Image')
|
242 |
with gr.Row():
|
243 |
res = gr.Dropdown([256,384,512,768], value=512, label='Resolution')
|
244 |
step = gr.Slider(1,4,value=4,step=1,label='Landmark Sub-sampling')
|
245 |
+
feather = gr.Slider(0.0,0.5,value=0.1,step=0.01,label='Feather (%)')
|
246 |
+
padding = gr.Slider(0.0,0.5,value=0.24,step=0.01,label='Padding (%)')
|
247 |
trans = gr.Slider(-1.0,1.0,value=-0.35,step=0.05,label='Transition Level')
|
248 |
btn = gr.Button('Generate Morph π')
|
249 |
out_vid = gr.Video(label='Morphed Video')
|
|
|
257 |
outputs=[out_vid,out_crop,out_ref,out_morph],
|
258 |
show_progress=True
|
259 |
)
|
260 |
+
gr.Markdown("---\n*Default values set and feather/padding are now relative percentages.*")
|
261 |
|
262 |
if __name__=='__main__':
|
263 |
iface.launch(debug=True)
|