NihalGazi commited on
Commit
aeb2829
Β·
verified Β·
1 Parent(s): 5f19da0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -20
app.py CHANGED
@@ -23,6 +23,24 @@ except (ImportError, AttributeError):
23
  # --- Helper Functions ---
24
 
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  def cut_and_feather(img, feather):
27
  h, w = img.shape[:2]
28
  mask = np.zeros((h, w), dtype=np.uint8)
@@ -138,50 +156,51 @@ def process_video(video_path, ref_img, trans, res, step, feather):
138
  total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
139
  # prepare reference cut
140
  ref_bgr = cv2.cvtColor(ref_img, cv2.COLOR_RGB2BGR)
141
- ref_cut, ref_mask, ref_box = cut_and_feather(ref_bgr, feather)
142
- if ref_cut is None:
143
- return None, None, None, None
144
- # resize ref for morph
145
  ref_morph = cv2.resize(ref_cut, (res, res))
146
  # setup output video writer for original size
147
  w_orig = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
148
  h_orig = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
149
  tmp_vid = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name
150
  out_vid = cv2.VideoWriter(tmp_vid, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w_orig, h_orig))
151
- first_mod_frame = None
152
  first_morphed = None
 
153
  for i in range(total):
154
  ret, frame = cap.read()
155
  if not ret:
156
  break
157
- # cut current frame face
158
- cut, mask_roi, box = cut_and_feather(frame, feather)
159
  if mask_roi is None:
160
  out_vid.write(frame)
161
  continue
162
  x, y, bw, bh = box
163
- # prepare for morph
164
- cut_resized = cv2.resize(cut, (res, res))
165
  alpha = float(np.clip((trans + 1) / 2, 0, 1))
166
- morphed = morph_faces(cut_resized, ref_morph, alpha, res, step)
167
  # store first outputs
168
  if i == 0:
169
- first_mod_frame = cut_resized.copy()
170
  first_morphed = morphed.copy()
171
- # composite morphed back to original frame
172
  mor_back = cv2.resize(morphed, (bw, bh))
173
- # normalize mask_roi
174
  mask_norm = (mask_roi.astype(np.float32) / 255.0)[..., None]
175
- # blend
176
  region = frame[y:y+bh, x:x+bw].astype(np.float32)
177
  blended = region * (1 - mask_norm) + mor_back.astype(np.float32) * mask_norm
178
  frame[y:y+bh, x:x+bw] = blended.astype(np.uint8)
179
  out_vid.write(frame)
 
180
  cap.release()
181
  out_vid.release()
182
  # convert for Gradio
183
- return tmp_vid, cv2.cvtColor(first_mod_frame, cv2.COLOR_BGR2RGB), cv2.cvtColor(ref_cut, cv2.COLOR_BGR2RGB), cv2.cvtColor(first_morphed, cv2.COLOR_BGR2RGB)
184
-
 
 
185
 
186
  # --- Gradio App ---
187
  css = """video, img { object-fit: contain !important; }"""
@@ -197,19 +216,20 @@ with gr.Blocks(css=css) as iface:
197
  trans = gr.Slider(-1.0, 1.0, value=0.0, step=0.05, label='Transition Level')
198
  btn = gr.Button('Generate Morph πŸš€')
199
  out_vid = gr.Video(label='Morphed Video')
200
- out_mod_frame = gr.Image(label='Modified Frame[0]')
201
- out_mod_ref = gr.Image(label='Modified Reference')
202
  out_morph0 = gr.Image(label='Morphed Frame[0]')
203
 
204
  btn.click(
205
  fn=process_video,
206
  inputs=[vid, ref, trans, res, step, feather],
207
- outputs=[out_vid, out_mod_frame, out_mod_ref, out_morph0],
208
  show_progress=True
209
  )
210
- gr.Markdown("---\n*Composite the morphed face back onto the original video.*")
211
 
212
  if __name__ == '__main__':
213
  iface.launch(debug=True)
214
 
215
 
 
 
23
  # --- Helper Functions ---
24
 
25
 
26
+
27
+ def get_face_mask_box(img, feather):
28
+ h, w = img.shape[:2]
29
+ mask = np.zeros((h, w), dtype=np.uint8)
30
+ results = face_mesh.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
31
+ if not results.multi_face_landmarks:
32
+ return None, None
33
+ pts = np.array([(int(p.x * w), int(p.y * h)) for p in results.multi_face_landmarks[0].landmark], np.int32)
34
+ hull = cv2.convexHull(pts)
35
+ cv2.fillConvexPoly(mask, hull, 255)
36
+ x, y, bw, bh = cv2.boundingRect(hull)
37
+ if feather > 0:
38
+ k = int(feather)
39
+ mask = cv2.GaussianBlur(mask, (k*2+1, k*2+1), 0)
40
+ mask_roi = mask[y:y+bh, x:x+bw]
41
+ return mask_roi, (x, y, bw, bh)
42
+
43
+
44
  def cut_and_feather(img, feather):
45
  h, w = img.shape[:2]
46
  mask = np.zeros((h, w), dtype=np.uint8)
 
156
  total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
157
  # prepare reference cut
158
  ref_bgr = cv2.cvtColor(ref_img, cv2.COLOR_RGB2BGR)
159
+ # cut and feather only reference for morph input
160
+ _, ref_mask_box = get_face_mask_box(ref_bgr, feather)
161
+ ref_cut = ref_bgr[ref_mask_box[1]:ref_mask_box[1]+ref_mask_box[3], ref_mask_box[0]:ref_mask_box[0]+ref_mask_box[2]]
 
162
  ref_morph = cv2.resize(ref_cut, (res, res))
163
  # setup output video writer for original size
164
  w_orig = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
165
  h_orig = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
166
  tmp_vid = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name
167
  out_vid = cv2.VideoWriter(tmp_vid, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w_orig, h_orig))
168
+ first_frame_central = None
169
  first_morphed = None
170
+
171
  for i in range(total):
172
  ret, frame = cap.read()
173
  if not ret:
174
  break
175
+ # get mask and box for current frame (no cropping for morph input)
176
+ mask_roi, box = get_face_mask_box(frame, feather)
177
  if mask_roi is None:
178
  out_vid.write(frame)
179
  continue
180
  x, y, bw, bh = box
181
+ # morph using centralized full frame
182
+ frame_resized = cv2.resize(frame, (res, res))
183
  alpha = float(np.clip((trans + 1) / 2, 0, 1))
184
+ morphed = morph_faces(frame_resized, ref_morph, alpha, res, step)
185
  # store first outputs
186
  if i == 0:
187
+ first_frame_central = frame_resized.copy()
188
  first_morphed = morphed.copy()
189
+ # composite morphed face back to original position
190
  mor_back = cv2.resize(morphed, (bw, bh))
 
191
  mask_norm = (mask_roi.astype(np.float32) / 255.0)[..., None]
 
192
  region = frame[y:y+bh, x:x+bw].astype(np.float32)
193
  blended = region * (1 - mask_norm) + mor_back.astype(np.float32) * mask_norm
194
  frame[y:y+bh, x:x+bw] = blended.astype(np.uint8)
195
  out_vid.write(frame)
196
+
197
  cap.release()
198
  out_vid.release()
199
  # convert for Gradio
200
+ return tmp_vid,
201
+ cv2.cvtColor(first_frame_central, cv2.COLOR_BGR2RGB),
202
+ cv2.cvtColor(ref_cut, cv2.COLOR_BGR2RGB),
203
+ cv2.cvtColor(first_morphed, cv2.COLOR_BGR2RGB)
204
 
205
  # --- Gradio App ---
206
  css = """video, img { object-fit: contain !important; }"""
 
216
  trans = gr.Slider(-1.0, 1.0, value=0.0, step=0.05, label='Transition Level')
217
  btn = gr.Button('Generate Morph πŸš€')
218
  out_vid = gr.Video(label='Morphed Video')
219
+ out_central = gr.Image(label='Centralized Frame[0]')
220
+ out_ref = gr.Image(label='Cropped Reference')
221
  out_morph0 = gr.Image(label='Morphed Frame[0]')
222
 
223
  btn.click(
224
  fn=process_video,
225
  inputs=[vid, ref, trans, res, step, feather],
226
+ outputs=[out_vid, out_central, out_ref, out_morph0],
227
  show_progress=True
228
  )
229
+ gr.Markdown("---\n*Now using full frame for morph input and compositing only reference overlay.*")
230
 
231
  if __name__ == '__main__':
232
  iface.launch(debug=True)
233
 
234
 
235
+