Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -28,20 +28,26 @@ def cut_and_feather(img, feather):
|
|
28 |
mask = np.zeros((h, w), dtype=np.uint8)
|
29 |
results = face_mesh.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
|
30 |
if not results.multi_face_landmarks:
|
31 |
-
return np.zeros_like(img)
|
32 |
-
|
33 |
-
pts = np.array([(int(p.x*w), int(p.y*h)) for p in lm.landmark], np.int32)
|
34 |
hull = cv2.convexHull(pts)
|
35 |
cv2.fillConvexPoly(mask, hull, 255)
|
|
|
|
|
36 |
# feather mask
|
37 |
k = int(feather)
|
38 |
-
if k>0:
|
39 |
mask = cv2.GaussianBlur(mask, (k*2+1, k*2+1), 0)
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
def get_landmarks(img, landmark_step=1):
|
47 |
if img is None or face_mesh is None:
|
@@ -130,44 +136,66 @@ def process_video(video_path, ref_img, trans, res, step, feather):
|
|
130 |
cap = cv2.VideoCapture(video_path)
|
131 |
fps = cap.get(cv2.CAP_PROP_FPS) or 24
|
132 |
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
133 |
-
# prepare
|
134 |
ref_bgr = cv2.cvtColor(ref_img, cv2.COLOR_RGB2BGR)
|
135 |
-
|
136 |
-
|
137 |
-
|
|
|
|
|
|
|
|
|
|
|
138 |
tmp_vid = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name
|
139 |
-
out_vid = cv2.VideoWriter(tmp_vid, cv2.VideoWriter_fourcc(*'mp4v'), fps, (
|
140 |
-
first_mod_frame = None
|
|
|
141 |
for i in range(total):
|
142 |
ret, frame = cap.read()
|
143 |
-
if not ret:
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
|
156 |
|
157 |
# --- Gradio App ---
|
158 |
css = """video, img { object-fit: contain !important; }"""
|
159 |
with gr.Blocks(css=css) as iface:
|
160 |
-
gr.Markdown("# Enhanced Face Morph with
|
161 |
with gr.Row():
|
162 |
vid = gr.Video(label='Input Video')
|
163 |
ref = gr.Image(type='numpy', label='Reference Face Image')
|
164 |
with gr.Row():
|
165 |
-
res = gr.Dropdown([256,384,512,768], value=512, label='Resolution')
|
166 |
-
step = gr.Slider(1,4, value=1, step=1, label='Landmark Sub-sampling')
|
167 |
-
feather = gr.Slider(0,50, value=10, step=1, label='Feather Radius')
|
168 |
-
trans = gr.Slider(-1.0,1.0, value=0.0, step=0.05, label='Transition Level')
|
169 |
btn = gr.Button('Generate Morph π')
|
170 |
-
progress = gr.Progress()
|
171 |
out_vid = gr.Video(label='Morphed Video')
|
172 |
out_mod_frame = gr.Image(label='Modified Frame[0]')
|
173 |
out_mod_ref = gr.Image(label='Modified Reference')
|
@@ -179,8 +207,9 @@ with gr.Blocks(css=css) as iface:
|
|
179 |
outputs=[out_vid, out_mod_frame, out_mod_ref, out_morph0],
|
180 |
show_progress=True
|
181 |
)
|
182 |
-
gr.Markdown("---\n*
|
183 |
|
184 |
-
if __name__=='__main__':
|
185 |
iface.launch(debug=True)
|
186 |
|
|
|
|
28 |
mask = np.zeros((h, w), dtype=np.uint8)
|
29 |
results = face_mesh.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
|
30 |
if not results.multi_face_landmarks:
|
31 |
+
return np.zeros_like(img), None, None
|
32 |
+
pts = np.array([(int(p.x * w), int(p.y * h)) for p in results.multi_face_landmarks[0].landmark], np.int32)
|
|
|
33 |
hull = cv2.convexHull(pts)
|
34 |
cv2.fillConvexPoly(mask, hull, 255)
|
35 |
+
# bounding box
|
36 |
+
x, y, bw, bh = cv2.boundingRect(hull)
|
37 |
# feather mask
|
38 |
k = int(feather)
|
39 |
+
if k > 0:
|
40 |
mask = cv2.GaussianBlur(mask, (k*2+1, k*2+1), 0)
|
41 |
+
# extract face ROI
|
42 |
+
face_roi = img[y:y+bh, x:x+bw]
|
43 |
+
mask_roi = mask[y:y+bh, x:x+bw]
|
44 |
+
# apply mask
|
45 |
+
fg = cv2.bitwise_and(face_roi, face_roi, mask=mask_roi)
|
46 |
+
# prepare alpha
|
47 |
+
alpha = mask_roi.astype(np.float32) / 255.0
|
48 |
+
# composite onto transparent background same size
|
49 |
+
out = (fg.astype(np.float32) * alpha[..., None]).astype(np.uint8)
|
50 |
+
return out, mask_roi, (x, y, bw, bh)
|
51 |
|
52 |
def get_landmarks(img, landmark_step=1):
|
53 |
if img is None or face_mesh is None:
|
|
|
136 |
cap = cv2.VideoCapture(video_path)
|
137 |
fps = cap.get(cv2.CAP_PROP_FPS) or 24
|
138 |
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
139 |
+
# prepare reference cut
|
140 |
ref_bgr = cv2.cvtColor(ref_img, cv2.COLOR_RGB2BGR)
|
141 |
+
ref_cut, ref_mask, ref_box = cut_and_feather(ref_bgr, feather)
|
142 |
+
if ref_cut is None:
|
143 |
+
return None, None, None, None
|
144 |
+
# resize ref for morph
|
145 |
+
ref_morph = cv2.resize(ref_cut, (res, res))
|
146 |
+
# setup output video writer for original size
|
147 |
+
w_orig = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
148 |
+
h_orig = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
149 |
tmp_vid = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name
|
150 |
+
out_vid = cv2.VideoWriter(tmp_vid, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w_orig, h_orig))
|
151 |
+
first_mod_frame = None
|
152 |
+
first_morphed = None
|
153 |
for i in range(total):
|
154 |
ret, frame = cap.read()
|
155 |
+
if not ret:
|
156 |
+
break
|
157 |
+
# cut current frame face
|
158 |
+
cut, mask_roi, box = cut_and_feather(frame, feather)
|
159 |
+
if mask_roi is None:
|
160 |
+
out_vid.write(frame)
|
161 |
+
continue
|
162 |
+
x, y, bw, bh = box
|
163 |
+
# prepare for morph
|
164 |
+
cut_resized = cv2.resize(cut, (res, res))
|
165 |
+
alpha = float(np.clip((trans + 1) / 2, 0, 1))
|
166 |
+
morphed = morph_faces(cut_resized, ref_morph, alpha, res, step)
|
167 |
+
# store first outputs
|
168 |
+
if i == 0:
|
169 |
+
first_mod_frame = cut_resized.copy()
|
170 |
+
first_morphed = morphed.copy()
|
171 |
+
# composite morphed back to original frame
|
172 |
+
mor_back = cv2.resize(morphed, (bw, bh))
|
173 |
+
# normalize mask_roi
|
174 |
+
mask_norm = (mask_roi.astype(np.float32) / 255.0)[..., None]
|
175 |
+
# blend
|
176 |
+
region = frame[y:y+bh, x:x+bw].astype(np.float32)
|
177 |
+
blended = region * (1 - mask_norm) + mor_back.astype(np.float32) * mask_norm
|
178 |
+
frame[y:y+bh, x:x+bw] = blended.astype(np.uint8)
|
179 |
+
out_vid.write(frame)
|
180 |
+
cap.release()
|
181 |
+
out_vid.release()
|
182 |
+
# convert for Gradio
|
183 |
+
return tmp_vid, cv2.cvtColor(first_mod_frame, cv2.COLOR_BGR2RGB), cv2.cvtColor(ref_cut, cv2.COLOR_BGR2RGB), cv2.cvtColor(first_morphed, cv2.COLOR_BGR2RGB)
|
184 |
|
185 |
|
186 |
# --- Gradio App ---
|
187 |
css = """video, img { object-fit: contain !important; }"""
|
188 |
with gr.Blocks(css=css) as iface:
|
189 |
+
gr.Markdown("# Enhanced Face Morph with Composite Placement")
|
190 |
with gr.Row():
|
191 |
vid = gr.Video(label='Input Video')
|
192 |
ref = gr.Image(type='numpy', label='Reference Face Image')
|
193 |
with gr.Row():
|
194 |
+
res = gr.Dropdown([256, 384, 512, 768], value=512, label='Morph Resolution')
|
195 |
+
step = gr.Slider(1, 4, value=1, step=1, label='Landmark Sub-sampling')
|
196 |
+
feather = gr.Slider(0, 50, value=10, step=1, label='Feather Radius')
|
197 |
+
trans = gr.Slider(-1.0, 1.0, value=0.0, step=0.05, label='Transition Level')
|
198 |
btn = gr.Button('Generate Morph π')
|
|
|
199 |
out_vid = gr.Video(label='Morphed Video')
|
200 |
out_mod_frame = gr.Image(label='Modified Frame[0]')
|
201 |
out_mod_ref = gr.Image(label='Modified Reference')
|
|
|
207 |
outputs=[out_vid, out_mod_frame, out_mod_ref, out_morph0],
|
208 |
show_progress=True
|
209 |
)
|
210 |
+
gr.Markdown("---\n*Composite the morphed face back onto the original video.*")
|
211 |
|
212 |
+
if __name__ == '__main__':
|
213 |
iface.launch(debug=True)
|
214 |
|
215 |
+
|