NihalGazi commited on
Commit
53352ed
Β·
verified Β·
1 Parent(s): 4c06029

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -100
app.py CHANGED
@@ -21,11 +21,8 @@ except (ImportError, AttributeError):
21
  face_mesh = None
22
 
23
  # --- Helper Functions ---
24
-
25
  def get_landmarks(img, landmark_step=1):
26
- if img is None:
27
- return None
28
- if face_mesh is None:
29
  return None
30
  img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
31
  try:
@@ -36,132 +33,115 @@ def get_landmarks(img, landmark_step=1):
36
  return None
37
  landmarks_mp = results.multi_face_landmarks[0]
38
  h, w, _ = img.shape
39
- full_landmarks = np.array([(pt.x * w, pt.y * h) for pt in landmarks_mp.landmark], dtype=np.float32)
40
- landmarks = full_landmarks[::landmark_step] if landmark_step > 1 else full_landmarks
41
  if not np.all(np.isfinite(landmarks)):
42
  return None
43
- corners = np.array([[0, 0], [w - 1, 0], [0, h - 1], [w - 1, h - 1]], dtype=np.float32)
44
  return np.vstack((landmarks, corners))
45
 
46
-
47
  def calculate_delaunay_triangles(rect, points):
48
- if points is None or len(points) < 3:
49
  return []
50
- points[:, 0] = np.clip(points[:, 0], rect[0], rect[0] + rect[2] - 1)
51
- points[:, 1] = np.clip(points[:, 1], rect[1], rect[1] + rect[3] - 1)
52
  subdiv = cv2.Subdiv2D(rect)
53
  inserted = {}
54
- for i, p in enumerate(points):
55
- tup = (int(p[0]), int(p[1]))
56
- if tup not in inserted:
57
  try:
58
- subdiv.insert(tup)
59
- inserted[tup] = i
60
  except cv2.error:
61
  continue
62
- triangles = subdiv.getTriangleList()
63
- delaunay = []
64
- for t in triangles:
65
- coords = [(int(t[0]), int(t[1])), (int(t[2]), int(t[3])), (int(t[4]), int(t[5]))]
66
- if all(rect[0] <= x < rect[0] + rect[2] and rect[1] <= y < rect[1] + rect[3] for x, y in coords):
67
- idxs = [inserted.get(c) for c in coords]
68
- if all(i is not None for i in idxs) and len(set(idxs)) == 3:
69
  delaunay.append(idxs)
70
  return delaunay
71
 
72
-
73
- def warp_triangle(img1, img2, t1, t2):
74
- if len(t1) != 3 or len(t2) != 3:
75
  return
76
- r1 = cv2.boundingRect(np.float32([t1]))
77
- r2 = cv2.boundingRect(np.float32([t2]))
78
- if r1[2] == 0 or r1[3] == 0 or r2[2] == 0 or r2[3] == 0:
79
  return
80
- t1_rect = [(t1[i][0] - r1[0], t1[i][1] - r1[1]) for i in range(3)]
81
- t2_rect = [(t2[i][0] - r2[0], t2[i][1] - r2[1]) for i in range(3)]
82
- mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32)
83
- cv2.fillConvexPoly(mask, np.int32(t2_rect), (1.0, 1.0, 1.0), 16, 0)
84
- img1_rect = img1[r1[1]:r1[1]+r1[3], r1[0]:r1[0]+r1[2]]
85
- if img1_rect.size == 0:
86
- return
87
- warp_mat = cv2.getAffineTransform(np.float32(t1_rect), np.float32(t2_rect))
88
- img2_rect = cv2.warpAffine(img1_rect, warp_mat, (r2[2], r2[3]), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
89
- img2_rect *= mask
90
- y1, y2 = r2[1], r2[1] + r2[3]
91
- x1, x2 = r2[0], r2[0] + r2[2]
92
- img2[y1:y2, x1:x2] = img2[y1:y2, x1:x2] * (1 - mask) + img2_rect
93
-
94
 
95
- def morph_faces(img1_orig, img2_orig, alpha, resize_dim, landmark_step):
96
- if img1_orig is None or img2_orig is None:
97
- return np.zeros((resize_dim, resize_dim, 3), dtype=np.uint8)
98
- img1 = cv2.resize(img1_orig, (resize_dim, resize_dim))
99
- img2 = cv2.resize(img2_orig, (resize_dim, resize_dim))
100
- landmarks1 = get_landmarks(img1, landmark_step)
101
- landmarks2 = get_landmarks(img2, landmark_step)
102
- if landmarks1 is None or landmarks2 is None or landmarks1.shape != landmarks2.shape:
103
- return cv2.addWeighted(img1, 1-alpha, img2, alpha, 0)
104
- morphed_pts = (1-alpha)*landmarks1 + alpha*landmarks2
105
- rect = (0, 0, resize_dim, resize_dim)
106
- tris = calculate_delaunay_triangles(rect, morphed_pts)
107
  if not tris:
108
- return cv2.addWeighted(img1, 1-alpha, img2, alpha, 0)
109
- img1_f = img1.astype(np.float32)/255.0
110
- img2_f = img2.astype(np.float32)/255.0
111
- w1 = np.zeros_like(img1_f)
112
- w2 = np.zeros_like(img2_f)
113
  for ids in tris:
114
- t1 = landmarks1[ids]; t2 = landmarks2[ids]; tm = morphed_pts[ids]
115
- warp_triangle(img1_f, w1, t1, tm)
116
- warp_triangle(img2_f, w2, t2, tm)
117
- morph = (1-alpha)*w1 + alpha*w2
118
- return (morph*255).astype(np.uint8)
119
 
120
-
121
- def process_video(video_path, target_img, transition_level, resolution, landmark_sampling):
122
  if video_path is None or target_img is None:
123
- dummy = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
124
- out = cv2.VideoWriter(dummy, cv2.VideoWriter_fourcc(*'mp4v'), 24, (resolution, resolution))
125
- out.release()
126
- return dummy
127
- target_bgr = cv2.cvtColor(target_img, cv2.COLOR_RGB2BGR)
128
- alpha = float(np.clip((transition_level+1)/2,0,1))
129
- cap = cv2.VideoCapture(video_path)
130
- fps = cap.get(cv2.CAP_PROP_FPS) or 24
131
- out_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
132
- out = cv2.VideoWriter(out_file, cv2.VideoWriter_fourcc(*'mp4v'), fps, (resolution, resolution))
133
  while True:
134
- ret, frame = cap.read()
135
  if not ret: break
136
- mor = morph_faces(frame, target_bgr, alpha, resolution, landmark_sampling)
 
 
137
  out.write(mor)
138
- cap.release(); out.release()
139
- return out_file
140
 
141
  # --- Gradio App ---
142
- css = """video, img { object-fit: contain !important; }"""
143
  with gr.Blocks(css=css) as iface:
144
  gr.Markdown("# Real-Time Video Face Morph πŸš€")
145
- gr.Markdown("Use the button below to generate and show a progress bar during processing.")
146
  with gr.Row():
147
- video_input = gr.Video(label="Input Video")
148
- img_input = gr.Image(type="numpy", label="Target Face Image")
149
  with gr.Row():
150
- resolution_slider = gr.Dropdown([256,384,512,768], value=512, label="Resolution")
151
- landmark_slider = gr.Slider(1,4,value=1,step=1, label="Landmark Sub-sampling")
152
- transition_slider = gr.Slider(-1.0,1.0,value=0.0,step=0.05, label="Transition Level")
153
- generate_btn = gr.Button("Generate Morph πŸš€", variant="primary")
154
- progress_bar = gr.Progress()
155
- video_output = gr.Video(label="Morphed Video")
156
-
157
- generate_btn.click(
158
  fn=process_video,
159
- inputs=[video_input, img_input, transition_slider, resolution_slider, landmark_slider],
160
- outputs=video_output,
161
- show_progress=True
162
  )
163
-
164
  gr.Markdown("---\n*Built with Gradio, OpenCV & MediaPipe.*")
165
-
166
- if __name__ == "__main__":
167
- iface.launch(debug=True)
 
21
  face_mesh = None
22
 
23
  # --- Helper Functions ---
 
24
  def get_landmarks(img, landmark_step=1):
25
+ if img is None or face_mesh is None:
 
 
26
  return None
27
  img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
28
  try:
 
33
  return None
34
  landmarks_mp = results.multi_face_landmarks[0]
35
  h, w, _ = img.shape
36
+ pts = np.array([(pt.x * w, pt.y * h) for pt in landmarks_mp.landmark], dtype=np.float32)
37
+ landmarks = pts[::landmark_step] if landmark_step > 1 else pts
38
  if not np.all(np.isfinite(landmarks)):
39
  return None
40
+ corners = np.array([[0,0],[w-1,0],[0,h-1],[w-1,h-1]], dtype=np.float32)
41
  return np.vstack((landmarks, corners))
42
 
 
43
  def calculate_delaunay_triangles(rect, points):
44
+ if points is None or len(points)<3:
45
  return []
46
+ points[:,0] = np.clip(points[:,0], rect[0], rect[0]+rect[2]-1)
47
+ points[:,1] = np.clip(points[:,1], rect[1], rect[1]+rect[3]-1)
48
  subdiv = cv2.Subdiv2D(rect)
49
  inserted = {}
50
+ for i,p in enumerate(points):
51
+ key = (int(p[0]), int(p[1]))
52
+ if key not in inserted:
53
  try:
54
+ subdiv.insert(key)
55
+ inserted[key]=i
56
  except cv2.error:
57
  continue
58
+ tris = subdiv.getTriangleList()
59
+ delaunay=[]
60
+ for t in tris:
61
+ coords=[(int(t[0]),int(t[1])),(int(t[2]),int(t[3])),(int(t[4]),int(t[5]))]
62
+ if all(rect[0]<=x<rect[0]+rect[2] and rect[1]<=y<rect[1]+rect[3] for x,y in coords):
63
+ idxs=[inserted.get(c) for c in coords]
64
+ if all(i is not None for i in idxs) and len(set(idxs))==3:
65
  delaunay.append(idxs)
66
  return delaunay
67
 
68
+ def warp_triangle(img1,img2,t1,t2):
69
+ if len(t1)!=3 or len(t2)!=3:
 
70
  return
71
+ r1=cv2.boundingRect(np.float32([t1])); r2=cv2.boundingRect(np.float32([t2]))
72
+ if r1[2]==0 or r1[3]==0 or r2[2]==0 or r2[3]==0:
 
73
  return
74
+ t1r=[(t1[i][0]-r1[0],t1[i][1]-r1[1]) for i in range(3)]
75
+ t2r=[(t2[i][0]-r2[0],t2[i][1]-r2[1]) for i in range(3)]
76
+ mask=np.zeros((r2[3],r2[2],3),dtype=np.float32)
77
+ cv2.fillConvexPoly(mask,np.int32(t2r),(1,1,1),16)
78
+ src=img1[r1[1]:r1[1]+r1[3],r1[0]:r1[0]+r1[2]]
79
+ M=cv2.getAffineTransform(np.float32(t1r),np.float32(t2r))
80
+ warped=cv2.warpAffine(src,M,(r2[2],r2[3]),flags=cv2.INTER_LINEAR,borderMode=cv2.BORDER_REFLECT_101)
81
+ warped*=mask
82
+ y1,y2=r2[1],r2[1]+r2[3]; x1,x2=r2[0],r2[0]+r2[2]
83
+ img2[y1:y2,x1:x2]=img2[y1:y2,x1:x2]*(1-mask)+warped
 
 
 
 
84
 
85
+ def morph_faces(img1, img2, alpha, dim, step):
86
+ if img1 is None or img2 is None:
87
+ return np.zeros((dim,dim,3),dtype=np.uint8)
88
+ a=cv2.resize(img1,(dim,dim)); b=cv2.resize(img2,(dim,dim))
89
+ l1=get_landmarks(a,step); l2=get_landmarks(b,step)
90
+ if l1 is None or l2 is None or l1.shape!=l2.shape:
91
+ return cv2.addWeighted(a,1-alpha,b,alpha,0)
92
+ m=(1-alpha)*l1+alpha*l2
93
+ tris=calculate_delaunay_triangles((0,0,dim,dim),m)
 
 
 
94
  if not tris:
95
+ return cv2.addWeighted(a,1-alpha,b,alpha,0)
96
+ A=a.astype(np.float32)/255; B=b.astype(np.float32)/255
97
+ Wa=np.zeros_like(A); Wb=np.zeros_like(B)
 
 
98
  for ids in tris:
99
+ warp_triangle(A,Wa,l1[ids],m[ids]); warp_triangle(B,Wb,l2[ids],m[ids])
100
+ out=(1-alpha)*Wa+alpha*Wb
101
+ return (out*255).astype(np.uint8)
 
 
102
 
103
+ def process_video(video_path, target_img, trans, res, step, progress=gr.Progress()):
 
104
  if video_path is None or target_img is None:
105
+ tmp=tempfile.NamedTemporaryFile(delete=False,suffix='.mp4').name
106
+ cv2.VideoWriter(tmp,cv2.VideoWriter_fourcc(*'mp4v'),24,(res,res)).release();return tmp
107
+ tgt=cv2.cvtColor(target_img,cv2.COLOR_RGB2BGR)
108
+ alpha=float(np.clip((trans+1)/2,0,1))
109
+ cap=cv2.VideoCapture(video_path)
110
+ n=int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
111
+ fps=cap.get(cv2.CAP_PROP_FPS) or 24
112
+ tmp=tempfile.NamedTemporaryFile(delete=False,suffix='.mp4').name
113
+ out=cv2.VideoWriter(tmp,cv2.VideoWriter_fourcc(*'mp4v'),fps,(res,res))
114
+ i=0
115
  while True:
116
+ ret,frame=cap.read()
117
  if not ret: break
118
+ i+=1
119
+ progress(i/n,desc=f'Frame {i}/{n}')
120
+ mor=morph_faces(frame,tgt,alpha,res,step)
121
  out.write(mor)
122
+ cap.release();out.release();progress(1,desc='Done')
123
+ return tmp
124
 
125
  # --- Gradio App ---
126
+ css="""video, img{object-fit:contain!important;}"""
127
  with gr.Blocks(css=css) as iface:
128
  gr.Markdown("# Real-Time Video Face Morph πŸš€")
129
+ gr.Markdown("Click 'Generate Morph' and watch the progress bar during processing.")
130
  with gr.Row():
131
+ vid=gr.Video(label='Input Video')
132
+ img=gr.Image(type='numpy',label='Target Face Image')
133
  with gr.Row():
134
+ res=gr.Dropdown([256,384,512,768],value=512,label='Resolution')
135
+ step=gr.Slider(1,4,value=1,step=1,label='Landmark Sub-sampling')
136
+ trans=gr.Slider(-1,1,value=0,step=0.05,label='Transition Level')
137
+ btn=gr.Button('Generate Morph πŸš€',variant='primary')
138
+ prog=gr.Progress()
139
+ out=gr.Video(label='Morphed Video')
140
+ btn.click(
 
141
  fn=process_video,
142
+ inputs=[vid,img,trans,res,step],
143
+ outputs=out,
144
+ progress=prog
145
  )
 
146
  gr.Markdown("---\n*Built with Gradio, OpenCV & MediaPipe.*")
147
+ if __name__=='__main__': iface.launch(debug=True)