Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -8,23 +8,41 @@ import time
|
|
8 |
RESIZE_DIM = 512 # Resize images for faster processing
|
9 |
|
10 |
# --- MediaPipe Initialization ---
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
# --- Helper Functions ---
|
20 |
|
21 |
def get_landmarks(img):
|
22 |
"""Detects face landmarks using MediaPipe Face Mesh."""
|
23 |
if img is None:
|
|
|
|
|
|
|
|
|
|
|
24 |
return None
|
25 |
|
26 |
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
if not results.multi_face_landmarks:
|
30 |
print("Warning: No face detected.")
|
@@ -32,11 +50,16 @@ def get_landmarks(img):
|
|
32 |
|
33 |
# Assuming only one face
|
34 |
landmarks_mp = results.multi_face_landmarks[0]
|
35 |
-
|
36 |
# Convert landmarks to numpy array of (x, y) coordinates
|
37 |
h, w, _ = img.shape
|
38 |
-
landmarks = np.array([(
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
40 |
# Add image corners as landmarks for better warping at edges
|
41 |
corners = np.array([
|
42 |
[0, 0], # Top-left
|
@@ -44,45 +67,102 @@ def get_landmarks(img):
|
|
44 |
[0, h - 1], # Bottom-left
|
45 |
[w - 1, h - 1] # Bottom-right
|
46 |
], dtype=np.float32)
|
47 |
-
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
return landmarks
|
51 |
|
52 |
def calculate_delaunay_triangles(rect, points):
|
53 |
"""Calculates Delaunay triangulation for a set of points."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
subdiv = cv2.Subdiv2D(rect)
|
55 |
-
|
56 |
-
|
57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
triangle_list = subdiv.getTriangleList()
|
60 |
|
61 |
# Map triangle vertex coordinates back to indices in the points array
|
62 |
delaunay_triangles = []
|
63 |
-
|
64 |
|
65 |
for t in triangle_list:
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
76 |
indices = []
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
|
88 |
return delaunay_triangles
|
@@ -90,54 +170,103 @@ def calculate_delaunay_triangles(rect, points):
|
|
90 |
|
91 |
def warp_triangle(img1, img2, t1, t2):
|
92 |
"""Warps a triangle from img1 to img2."""
|
93 |
-
#
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
|
142 |
# --- Main Morphing Function ---
|
143 |
|
@@ -150,12 +279,21 @@ def morph_faces(img1_orig, img2_orig, alpha):
|
|
150 |
|
151 |
# --- Input Validation and Preprocessing ---
|
152 |
if img1_orig is None or img2_orig is None:
|
153 |
-
|
154 |
-
|
|
|
|
|
|
|
155 |
|
156 |
# Resize images for consistency and speed
|
157 |
-
|
158 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
h, w, _ = img1.shape
|
160 |
rect = (0, 0, w, h) # Bounding rectangle for triangulation
|
161 |
|
@@ -163,12 +301,29 @@ def morph_faces(img1_orig, img2_orig, alpha):
|
|
163 |
landmarks1 = get_landmarks(img1)
|
164 |
landmarks2 = get_landmarks(img2)
|
165 |
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
|
173 |
# --- Landmark Interpolation ---
|
174 |
landmarks_morphed = (1 - alpha) * landmarks1 + alpha * landmarks2
|
@@ -176,54 +331,147 @@ def morph_faces(img1_orig, img2_orig, alpha):
|
|
176 |
|
177 |
# --- Delaunay Triangulation (based on morphed landmarks) ---
|
178 |
try:
|
179 |
-
triangles_indices = calculate_delaunay_triangles(rect, landmarks_morphed)
|
180 |
if not triangles_indices:
|
181 |
-
print("
|
182 |
# Fallback: blend images directly
|
183 |
-
|
|
|
184 |
|
185 |
except Exception as e:
|
186 |
-
print(f"Error during triangulation: {e}")
|
187 |
# Fallback: blend images directly
|
188 |
-
|
|
|
189 |
|
190 |
|
191 |
# --- Image Warping and Blending ---
|
192 |
-
morphed_img = np.zeros(img1.shape, dtype=img1.dtype)
|
193 |
-
|
194 |
# Convert images to float32 for intermediate calculations to avoid overflow/clipping
|
195 |
-
|
196 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
morphed_img_float = np.zeros(img1.shape, dtype=np.float32)
|
198 |
|
199 |
|
200 |
for indices in triangles_indices:
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
212 |
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
|
224 |
|
225 |
# Convert back to uint8
|
226 |
-
morphed_img = (
|
227 |
|
228 |
end_time = time.time()
|
229 |
print(f"Morphing took: {end_time - start_time:.4f} seconds")
|
@@ -234,31 +482,44 @@ def morph_faces(img1_orig, img2_orig, alpha):
|
|
234 |
|
235 |
def gradio_morph(image1, image2, transition_level):
|
236 |
"""Wrapper function for Gradio interface."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
237 |
# Map transition level (-1.0 to 1.0) to alpha (0.0 to 1.0)
|
238 |
alpha = (transition_level + 1.0) / 2.0
|
239 |
-
|
240 |
-
|
241 |
-
|
|
|
242 |
|
243 |
-
|
244 |
-
|
|
|
|
|
245 |
|
246 |
-
return
|
247 |
|
248 |
# --- Gradio Interface Definition ---
|
249 |
|
250 |
css = """
|
251 |
-
img { object-fit: contain; }
|
252 |
"""
|
253 |
|
254 |
with gr.Blocks(css=css) as iface:
|
255 |
gr.Markdown("# Face Morphing App\nUpload two face images and adjust the slider to morph between them.")
|
256 |
-
|
257 |
with gr.Row():
|
258 |
-
with gr.Column():
|
259 |
img_input1 = gr.Image(type="numpy", label="Face 1", height=RESIZE_DIM, width=RESIZE_DIM)
|
260 |
img_input2 = gr.Image(type="numpy", label="Face 2", height=RESIZE_DIM, width=RESIZE_DIM)
|
261 |
-
with gr.Column():
|
262 |
img_output = gr.Image(type="numpy", label="Morphed Face", height=RESIZE_DIM, width=RESIZE_DIM, interactive=False)
|
263 |
|
264 |
slider = gr.Slider(
|
@@ -269,16 +530,28 @@ with gr.Blocks(css=css) as iface:
|
|
269 |
label="Transition Level",
|
270 |
info="Slide from -1.0 (Face 1) to 1.0 (Face 2)"
|
271 |
)
|
272 |
-
|
273 |
-
#
|
274 |
inputs = [img_input1, img_input2, slider]
|
275 |
-
slider
|
|
|
276 |
img_input1.change(fn=gradio_morph, inputs=inputs, outputs=img_output, show_progress="minimal")
|
277 |
img_input2.change(fn=gradio_morph, inputs=inputs, outputs=img_output, show_progress="minimal")
|
|
|
|
|
|
|
|
|
278 |
|
279 |
gr.Markdown("---")
|
280 |
gr.Markdown("Built with Gradio, OpenCV, and MediaPipe.")
|
281 |
|
282 |
# --- Launch the App ---
|
283 |
if __name__ == "__main__":
|
284 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
RESIZE_DIM = 512 # Resize images for faster processing
|
9 |
|
10 |
# --- MediaPipe Initialization ---
|
11 |
+
# Use try-except block for robustness if mediapipe is not installed correctly
|
12 |
+
try:
|
13 |
+
mp_face_mesh = mp.solutions.face_mesh
|
14 |
+
face_mesh = mp_face_mesh.FaceMesh(
|
15 |
+
static_image_mode=True, # Process static images
|
16 |
+
max_num_faces=1, # Detect only one face for simplicity
|
17 |
+
refine_landmarks=True, # Get more refined landmarks (lips, eyes, iris)
|
18 |
+
min_detection_confidence=0.5 # Default detection confidence
|
19 |
+
)
|
20 |
+
print("MediaPipe Face Mesh initialized successfully.")
|
21 |
+
except AttributeError:
|
22 |
+
print("Error: Could not initialize MediaPipe Face Mesh. Is mediapipe installed correctly?")
|
23 |
+
# Provide a dummy object or exit if mediapipe is critical and missing
|
24 |
+
face_mesh = None # Or raise an exception
|
25 |
+
|
26 |
|
27 |
# --- Helper Functions ---
|
28 |
|
29 |
def get_landmarks(img):
|
30 |
"""Detects face landmarks using MediaPipe Face Mesh."""
|
31 |
if img is None:
|
32 |
+
print("Warning: Input image is None in get_landmarks.")
|
33 |
+
return None
|
34 |
+
# Ensure mediapipe is available
|
35 |
+
if face_mesh is None:
|
36 |
+
print("Error: MediaPipe Face Mesh not available.")
|
37 |
return None
|
38 |
|
39 |
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
40 |
+
try:
|
41 |
+
results = face_mesh.process(img_rgb)
|
42 |
+
except Exception as e:
|
43 |
+
print(f"Error processing image with MediaPipe: {e}")
|
44 |
+
return None
|
45 |
+
|
46 |
|
47 |
if not results.multi_face_landmarks:
|
48 |
print("Warning: No face detected.")
|
|
|
50 |
|
51 |
# Assuming only one face
|
52 |
landmarks_mp = results.multi_face_landmarks[0]
|
53 |
+
|
54 |
# Convert landmarks to numpy array of (x, y) coordinates
|
55 |
h, w, _ = img.shape
|
56 |
+
landmarks = np.array([(pt.x * w, pt.y * h) for pt in landmarks_mp.landmark], dtype=np.float32)
|
57 |
+
|
58 |
+
# Check if landmarks are valid numbers (sometimes mediapipe might return NaN/inf?)
|
59 |
+
if not np.all(np.isfinite(landmarks)):
|
60 |
+
print("Warning: Invalid landmark coordinates detected (NaN/inf).")
|
61 |
+
return None
|
62 |
+
|
63 |
# Add image corners as landmarks for better warping at edges
|
64 |
corners = np.array([
|
65 |
[0, 0], # Top-left
|
|
|
67 |
[0, h - 1], # Bottom-left
|
68 |
[w - 1, h - 1] # Bottom-right
|
69 |
], dtype=np.float32)
|
70 |
+
|
71 |
+
# Use vstack only if landmarks were successfully found
|
72 |
+
if landmarks.size > 0 :
|
73 |
+
landmarks = np.vstack((landmarks, corners))
|
74 |
+
else: # Should not happen if multi_face_landmarks check passed, but defensive coding
|
75 |
+
print("Warning: Landmarks array was empty unexpectedly.")
|
76 |
+
return None
|
77 |
+
|
78 |
+
|
79 |
return landmarks
|
80 |
|
81 |
def calculate_delaunay_triangles(rect, points):
|
82 |
"""Calculates Delaunay triangulation for a set of points."""
|
83 |
+
# Check for sufficient points
|
84 |
+
if points is None or len(points) < 3:
|
85 |
+
print("Warning: Not enough points for triangulation.")
|
86 |
+
return []
|
87 |
+
|
88 |
+
# Ensure points are finite
|
89 |
+
if not np.all(np.isfinite(points)):
|
90 |
+
print("Warning: Non-finite points passed to calculate_delaunay_triangles.")
|
91 |
+
# Attempt to filter out non-finite points
|
92 |
+
points = points[np.all(np.isfinite(points), axis=1)]
|
93 |
+
if len(points) < 3:
|
94 |
+
print("Warning: Not enough finite points left for triangulation.")
|
95 |
+
return []
|
96 |
+
|
97 |
+
# Ensure points are within reasonable bounds of the rect if possible
|
98 |
+
# This can prevent issues with Subdiv2D if coordinates are wildly off
|
99 |
+
points[:, 0] = np.clip(points[:, 0], rect[0], rect[0] + rect[2] - 1)
|
100 |
+
points[:, 1] = np.clip(points[:, 1], rect[1], rect[1] + rect[3] - 1)
|
101 |
+
|
102 |
+
|
103 |
subdiv = cv2.Subdiv2D(rect)
|
104 |
+
|
105 |
+
# Create a point map *before* inserting for reliable index lookup
|
106 |
+
# Use tuple representation as dict keys
|
107 |
+
point_map = { (int(p[0]), int(p[1])): i for i, p in enumerate(points) }
|
108 |
+
inserted_points_map = {} # Keep track of points actually inserted
|
109 |
+
|
110 |
+
for i, p in enumerate(points):
|
111 |
+
point_tuple = (int(p[0]), int(p[1]))
|
112 |
+
# Avoid inserting duplicate points which can cause issues
|
113 |
+
if point_tuple not in inserted_points_map:
|
114 |
+
try:
|
115 |
+
subdiv.insert(point_tuple)
|
116 |
+
inserted_points_map[point_tuple] = i # Map inserted tuple back to original index
|
117 |
+
except cv2.error as e:
|
118 |
+
# This might happen if points are outside the rect despite clipping, or very close
|
119 |
+
print(f"Warning: Could not insert point {point_tuple} into Subdiv2D: {e}")
|
120 |
+
continue # Skip this point
|
121 |
+
|
122 |
|
123 |
triangle_list = subdiv.getTriangleList()
|
124 |
|
125 |
# Map triangle vertex coordinates back to indices in the points array
|
126 |
delaunay_triangles = []
|
127 |
+
|
128 |
|
129 |
for t in triangle_list:
|
130 |
+
# Get points as tuples of integers
|
131 |
+
pt1_coord = (int(t[0]), int(t[1]))
|
132 |
+
pt2_coord = (int(t[2]), int(t[3]))
|
133 |
+
pt3_coord = (int(t[4]), int(t[5]))
|
134 |
+
|
135 |
+
pts_coords = [pt1_coord, pt2_coord, pt3_coord]
|
136 |
+
|
137 |
+
# Check if triangle vertices are within the rectangle (Subdiv2D can return triangles outside)
|
138 |
+
in_rect = all(
|
139 |
+
rect[0] <= p[0] < rect[0] + rect[2] and rect[1] <= p[1] < rect[1] + rect[3]
|
140 |
+
for p in pts_coords
|
141 |
+
)
|
142 |
+
|
143 |
+
if in_rect:
|
144 |
indices = []
|
145 |
+
all_indices_found = True
|
146 |
+
for coord in pts_coords:
|
147 |
+
# Find the original index using the inserted_points_map
|
148 |
+
original_index = inserted_points_map.get(coord, None)
|
149 |
+
if original_index is not None:
|
150 |
+
indices.append(original_index)
|
151 |
+
else:
|
152 |
+
# This means a vertex coordinate returned by getTriangleList
|
153 |
+
# doesn't match any *exactly* inserted point's integer tuple.
|
154 |
+
# This can occasionally happen due to floating point inaccuracies or
|
155 |
+
# if subdiv creates intermediate points. We should skip these triangles.
|
156 |
+
# print(f"Warning: Could not map triangle vertex {coord} back to an original point index.")
|
157 |
+
all_indices_found = False
|
158 |
+
break # Stop processing this triangle
|
159 |
+
|
160 |
+
if all_indices_found and len(indices) == 3:
|
161 |
+
# Check if we got 3 unique indices
|
162 |
+
if len(set(indices)) == 3:
|
163 |
+
delaunay_triangles.append(indices)
|
164 |
+
# else:
|
165 |
+
# print(f"Warning: Triangle mapping resulted in duplicate indices: {indices}")
|
166 |
|
167 |
|
168 |
return delaunay_triangles
|
|
|
170 |
|
171 |
def warp_triangle(img1, img2, t1, t2):
|
172 |
"""Warps a triangle from img1 to img2."""
|
173 |
+
# Ensure triangles have 3 points
|
174 |
+
if len(t1) != 3 or len(t2) != 3:
|
175 |
+
print("Warning: Invalid triangle vertex count in warp_triangle.")
|
176 |
+
return
|
177 |
+
|
178 |
+
# Ensure points are finite
|
179 |
+
if not np.all(np.isfinite(t1)) or not np.all(np.isfinite(t2)):
|
180 |
+
print("Warning: Non-finite triangle vertices in warp_triangle.")
|
181 |
+
return
|
182 |
+
|
183 |
+
try:
|
184 |
+
# Find bounding box for each triangle
|
185 |
+
r1 = cv2.boundingRect(np.float32([t1]))
|
186 |
+
r2 = cv2.boundingRect(np.float32([t2]))
|
187 |
+
|
188 |
+
# Check for valid bounding boxes (width and height > 0)
|
189 |
+
if r1[2] <= 0 or r1[3] <= 0 or r2[2] <= 0 or r2[3] <= 0:
|
190 |
+
# print("Warning: Skipping triangle due to zero-area bounding box.")
|
191 |
+
return
|
192 |
+
|
193 |
+
# Offset points by left-top corner of the respective rectangles
|
194 |
+
t1_rect = [(t1[i][0] - r1[0], t1[i][1] - r1[1]) for i in range(3)]
|
195 |
+
t2_rect = [(t2[i][0] - r2[0], t2[i][1] - r2[1]) for i in range(3)]
|
196 |
+
t2_rect_int = [(int(p[0]), int(p[1])) for p in t2_rect] # for fillConvexPoly
|
197 |
+
|
198 |
+
# Get mask by filling triangle
|
199 |
+
mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32)
|
200 |
+
cv2.fillConvexPoly(mask, np.int32(t2_rect_int), (1.0, 1.0, 1.0), 16, 0)
|
201 |
+
|
202 |
+
# Crop image patch
|
203 |
+
img1_rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
|
204 |
+
|
205 |
+
# Handle cases where bounding box is empty or has zero dimension after slicing
|
206 |
+
if img1_rect.shape[0] == 0 or img1_rect.shape[1] == 0:
|
207 |
+
# print("Warning: Skipping triangle due to empty source patch after slicing.")
|
208 |
+
return # Skip this triangle if the source patch is invalid
|
209 |
+
|
210 |
+
size = (r2[2], r2[3])
|
211 |
+
# Affine Transform
|
212 |
+
warp_mat = cv2.getAffineTransform(np.float32(t1_rect), np.float32(t2_rect))
|
213 |
+
|
214 |
+
# Apply Affine Transformation
|
215 |
+
img2_rect = cv2.warpAffine(img1_rect, warp_mat, size, None,
|
216 |
+
flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
|
217 |
+
|
218 |
+
# Handle potential size mismatch after warpaffine if bounding boxes were different
|
219 |
+
h_mask, w_mask, _ = mask.shape
|
220 |
+
h_warp, w_warp, _ = img2_rect.shape
|
221 |
+
if h_mask != h_warp or w_mask != w_warp:
|
222 |
+
# print(f"Adjusting warp size {img2_rect.shape} to mask size {mask.shape}")
|
223 |
+
img2_rect = cv2.resize(img2_rect, (w_mask, h_mask))
|
224 |
+
|
225 |
+
|
226 |
+
# Apply mask
|
227 |
+
img2_rect = img2_rect * mask
|
228 |
+
|
229 |
+
# Copy triangular region to the output image
|
230 |
+
# Ensure destination slice indices are valid
|
231 |
+
y_start, y_end = r2[1], r2[1] + r2[3]
|
232 |
+
x_start, x_end = r2[0], r2[0] + r2[2]
|
233 |
+
|
234 |
+
# Clip indices to be within the bounds of img2
|
235 |
+
h_img2, w_img2, _ = img2.shape
|
236 |
+
y_start = max(0, y_start)
|
237 |
+
y_end = min(h_img2, y_end)
|
238 |
+
x_start = max(0, x_start)
|
239 |
+
x_end = min(w_img2, x_end)
|
240 |
+
|
241 |
+
# Adjust mask and warped rect if clipping occurred
|
242 |
+
off_y_start = y_start - r2[1]
|
243 |
+
off_y_end = off_y_start + (y_end - y_start)
|
244 |
+
off_x_start = x_start - r2[0]
|
245 |
+
off_x_end = off_x_start + (x_end - x_start)
|
246 |
+
|
247 |
+
# Check if the clipped area is valid
|
248 |
+
if off_y_end <= off_y_start or off_x_end <= off_x_start:
|
249 |
+
# print("Warning: Skipping triangle blend due to invalid clipped area.")
|
250 |
+
return
|
251 |
+
|
252 |
+
clipped_mask = mask[off_y_start:off_y_end, off_x_start:off_x_end]
|
253 |
+
clipped_img2_rect = img2_rect[off_y_start:off_y_end, off_x_start:off_x_end]
|
254 |
+
|
255 |
+
|
256 |
+
img2_part = img2[y_start:y_end, x_start:x_end]
|
257 |
+
|
258 |
+
# Ensure shapes match before blending
|
259 |
+
if img2_part.shape == clipped_img2_rect.shape and img2_part.shape == clipped_mask.shape:
|
260 |
+
img2[y_start:y_end, x_start:x_end] = img2_part * (1.0 - clipped_mask) + clipped_img2_rect
|
261 |
+
# else:
|
262 |
+
# This indicates an issue with clipping or resizing logic, should be investigated if it occurs often
|
263 |
+
# print(f"Shape mismatch during blend: Part={img2_part.shape}, Rect={clipped_img2_rect.shape}, Mask={clipped_mask.shape}")
|
264 |
+
|
265 |
+
except cv2.error as e:
|
266 |
+
print(f"OpenCV Error during warp_triangle (possibly degenerate triangle): {e}")
|
267 |
+
except Exception as e:
|
268 |
+
print(f"Unexpected error during warp_triangle: {e}")
|
269 |
+
|
270 |
|
271 |
# --- Main Morphing Function ---
|
272 |
|
|
|
279 |
|
280 |
# --- Input Validation and Preprocessing ---
|
281 |
if img1_orig is None or img2_orig is None:
|
282 |
+
print("Error: One or both input images are None in morph_faces.")
|
283 |
+
# Return a black image of standard size
|
284 |
+
black_img = np.zeros((RESIZE_DIM, RESIZE_DIM, 3), dtype=np.uint8)
|
285 |
+
return black_img
|
286 |
+
|
287 |
|
288 |
# Resize images for consistency and speed
|
289 |
+
try:
|
290 |
+
img1 = cv2.resize(img1_orig, (RESIZE_DIM, RESIZE_DIM), interpolation=cv2.INTER_LINEAR)
|
291 |
+
img2 = cv2.resize(img2_orig, (RESIZE_DIM, RESIZE_DIM), interpolation=cv2.INTER_LINEAR)
|
292 |
+
except cv2.error as e:
|
293 |
+
print(f"Error resizing images: {e}")
|
294 |
+
black_img = np.zeros((RESIZE_DIM, RESIZE_DIM, 3), dtype=np.uint8)
|
295 |
+
return black_img # Return black image if resize fails
|
296 |
+
|
297 |
h, w, _ = img1.shape
|
298 |
rect = (0, 0, w, h) # Bounding rectangle for triangulation
|
299 |
|
|
|
301 |
landmarks1 = get_landmarks(img1)
|
302 |
landmarks2 = get_landmarks(img2)
|
303 |
|
304 |
+
# Handle landmark detection failure more robustly
|
305 |
+
if landmarks1 is None or landmarks2 is None:
|
306 |
+
print("Error: Landmark detection failed for one or both images. Blending directly.")
|
307 |
+
# Fallback: blend images directly (alpha blending)
|
308 |
+
try:
|
309 |
+
blended_img = cv2.addWeighted(img1, 1 - alpha, img2, alpha, 0)
|
310 |
+
return blended_img
|
311 |
+
except cv2.error as e:
|
312 |
+
print(f"Error during fallback alpha blending: {e}")
|
313 |
+
# If blending also fails, return one of the images or black
|
314 |
+
return img1 if landmarks1 is not None else (img2 if landmarks2 is not None else np.zeros_like(img1))
|
315 |
+
|
316 |
+
|
317 |
+
# Ensure landmarks have the same number of points before interpolation
|
318 |
+
if landmarks1.shape != landmarks2.shape:
|
319 |
+
print(f"Error: Landmark count mismatch! Img1: {landmarks1.shape}, Img2: {landmarks2.shape}. Blending directly.")
|
320 |
+
# Fallback: blend images directly
|
321 |
+
try:
|
322 |
+
blended_img = cv2.addWeighted(img1, 1 - alpha, img2, alpha, 0)
|
323 |
+
return blended_img
|
324 |
+
except cv2.error as e:
|
325 |
+
print(f"Error during fallback alpha blending after landmark mismatch: {e}")
|
326 |
+
return img1 # Or some default
|
327 |
|
328 |
# --- Landmark Interpolation ---
|
329 |
landmarks_morphed = (1 - alpha) * landmarks1 + alpha * landmarks2
|
|
|
331 |
|
332 |
# --- Delaunay Triangulation (based on morphed landmarks) ---
|
333 |
try:
|
334 |
+
triangles_indices = calculate_delaunay_triangles(rect, landmarks_morphed.copy()) # Pass copy to avoid modification
|
335 |
if not triangles_indices:
|
336 |
+
print("Warning: Delaunay triangulation resulted in 0 triangles. Blending directly.")
|
337 |
# Fallback: blend images directly
|
338 |
+
blended_img = cv2.addWeighted(img1, 1 - alpha, img2, alpha, 0)
|
339 |
+
return blended_img
|
340 |
|
341 |
except Exception as e:
|
342 |
+
print(f"Error during triangulation: {e}. Blending directly.")
|
343 |
# Fallback: blend images directly
|
344 |
+
blended_img = cv2.addWeighted(img1, 1 - alpha, img2, alpha, 0)
|
345 |
+
return blended_img
|
346 |
|
347 |
|
348 |
# --- Image Warping and Blending ---
|
|
|
|
|
349 |
# Convert images to float32 for intermediate calculations to avoid overflow/clipping
|
350 |
+
# Handle potential conversion errors if images are not standard BGR uint8
|
351 |
+
try:
|
352 |
+
img1_float = img1.astype(np.float32) / 255.0
|
353 |
+
img2_float = img2.astype(np.float32) / 255.0
|
354 |
+
except ValueError as e:
|
355 |
+
print(f"Error converting images to float32: {e}. Returning img1.")
|
356 |
+
return img1
|
357 |
+
|
358 |
morphed_img_float = np.zeros(img1.shape, dtype=np.float32)
|
359 |
|
360 |
|
361 |
for indices in triangles_indices:
|
362 |
+
# Check if indices are valid before accessing landmarks
|
363 |
+
if any(idx >= len(landmarks1) or idx < 0 for idx in indices):
|
364 |
+
print(f"Warning: Invalid triangle index found: {indices}. Max index: {len(landmarks1)-1}. Skipping triangle.")
|
365 |
+
continue
|
366 |
+
|
367 |
+
# Get vertices for each triangle
|
368 |
+
t1 = landmarks1[indices]
|
369 |
+
t2 = landmarks2[indices]
|
370 |
+
t_morphed = landmarks_morphed[indices]
|
371 |
+
|
372 |
+
# Warp triangles using the helper function that accumulates onto the final image
|
373 |
+
warp_triangle(img1_float, morphed_img_float, t1, (1-alpha) * t_morphed) # Warping img1 part
|
374 |
+
warp_triangle(img2_float, morphed_img_float, t2, alpha * t_morphed) # Warping img2 part
|
375 |
+
# Note: This simplified approach might lead to artifacts compared to warping both fully
|
376 |
+
# and then alpha blending the warped results pixel-wise within the triangle mask.
|
377 |
+
# Let's refine this part for better blending.
|
378 |
+
|
379 |
+
# --- Refined Image Warping and Blending (More Accurate) ---
|
380 |
+
morphed_img_float_refined = np.zeros(img1.shape, dtype=np.float32)
|
381 |
|
382 |
+
for indices in triangles_indices:
|
383 |
+
if any(idx >= len(landmarks1) or idx < 0 for idx in indices):
|
384 |
+
# Already printed warning above
|
385 |
+
continue
|
386 |
+
|
387 |
+
t1 = landmarks1[indices]
|
388 |
+
t2 = landmarks2[indices]
|
389 |
+
t_morphed = landmarks_morphed[indices]
|
390 |
+
|
391 |
+
# Find bounding box for the morphed triangle
|
392 |
+
r_morphed = cv2.boundingRect(t_morphed)
|
393 |
+
x, y, w_box, h_box = r_morphed
|
394 |
+
|
395 |
+
# Check for valid box
|
396 |
+
if w_box <= 0 or h_box <= 0: continue
|
397 |
+
|
398 |
+
# Offset points
|
399 |
+
t_morphed_rect = [(t_morphed[i][0] - x, t_morphed[i][1] - y) for i in range(3)]
|
400 |
+
t1_rect = [(t1[i][0] - x, t1[i][1] - y) for i in range(3)] # Not really used this way, use full t1, t2 for getAffineTransform
|
401 |
+
t2_rect = [(t2[i][0] - x, t2[i][1] - y) for i in range(3)] # Not really used this way
|
402 |
+
|
403 |
+
# Create mask for the morphed triangle within its bounding box
|
404 |
+
mask = np.zeros((h_box, w_box), dtype=np.float32)
|
405 |
+
cv2.fillConvexPoly(mask, np.int32(t_morphed_rect), 1.0, 16, 0)
|
406 |
+
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) # Make 3 channels
|
407 |
+
|
408 |
+
# Warp source triangles TO the morphed triangle's bounding box shape
|
409 |
+
try:
|
410 |
+
warp_mat1 = cv2.getAffineTransform(np.float32(t1), np.float32(t_morphed))
|
411 |
+
warp_mat2 = cv2.getAffineTransform(np.float32(t2), np.float32(t_morphed))
|
412 |
+
except cv2.error:
|
413 |
+
# Likely degenerate triangle
|
414 |
+
continue # Skip this triangle
|
415 |
+
|
416 |
+
img1_warped = cv2.warpAffine(img1_float, warp_mat1, (w, h), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
|
417 |
+
img2_warped = cv2.warpAffine(img2_float, warp_mat2, (w, h), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
|
418 |
+
|
419 |
+
# Blend the FULL warped images
|
420 |
+
blended_warped = (1.0 - alpha) * img1_warped + alpha * img2_warped
|
421 |
+
|
422 |
+
# Extract the relevant bounding box from the blended warped image
|
423 |
+
blended_rect = blended_warped[y:y + h_box, x:x + w_box]
|
424 |
+
|
425 |
+
# Ensure mask and blended_rect sizes match before applying mask
|
426 |
+
if mask.shape != blended_rect.shape:
|
427 |
+
# This might happen if the bounding box calculated goes slightly out of bounds
|
428 |
+
# Resize mask or rect - resizing mask is safer. Let's try resizing rect if needed.
|
429 |
+
h_mask, w_mask, _ = mask.shape
|
430 |
+
blended_rect = cv2.resize(blended_rect, (w_mask, h_mask))
|
431 |
+
# print(f"Adjusting blended_rect size {blended_rect.shape} to mask size {mask.shape}")
|
432 |
+
|
433 |
+
|
434 |
+
# Combine using the mask within the bounding box of the final image
|
435 |
+
# Ensure destination slice indices are valid
|
436 |
+
y_start, y_end = y, y + h_box
|
437 |
+
x_start, x_end = x, x + w_box
|
438 |
+
# Clip indices to be within the bounds of the destination image
|
439 |
+
h_dest, w_dest, _ = morphed_img_float_refined.shape
|
440 |
+
y_start = max(0, y_start)
|
441 |
+
y_end = min(h_dest, y_end)
|
442 |
+
x_start = max(0, x_start)
|
443 |
+
x_end = min(w_dest, x_end)
|
444 |
+
|
445 |
+
# Calculate offsets for slicing the mask and blended_rect correctly
|
446 |
+
off_y_start = y_start - y
|
447 |
+
off_y_end = off_y_start + (y_end - y_start)
|
448 |
+
off_x_start = x_start - x
|
449 |
+
off_x_end = off_x_start + (x_end - x_start)
|
450 |
+
|
451 |
+
# Check if the clipped area is valid before slicing
|
452 |
+
if off_y_end <= off_y_start or off_x_end <= off_x_start:
|
453 |
+
continue
|
454 |
+
|
455 |
+
try:
|
456 |
+
clipped_mask = mask[off_y_start:off_y_end, off_x_start:off_x_end]
|
457 |
+
clipped_blended_rect = blended_rect[off_y_start:off_y_end, off_x_start:off_x_end]
|
458 |
+
|
459 |
+
dest_part = morphed_img_float_refined[y_start:y_end, x_start:x_end]
|
460 |
+
|
461 |
+
if dest_part.shape == clipped_blended_rect.shape and dest_part.shape == clipped_mask.shape:
|
462 |
+
morphed_img_float_refined[y_start:y_end, x_start:x_end] = \
|
463 |
+
dest_part * (1.0 - clipped_mask) + clipped_blended_rect * clipped_mask
|
464 |
+
# else:
|
465 |
+
# print(f"Shape mismatch during refined blend: Dest={dest_part.shape}, Rect={clipped_blended_rect.shape}, Mask={clipped_mask.shape}")
|
466 |
+
|
467 |
+
except IndexError as e:
|
468 |
+
print(f"IndexError during refined blend slicing: {e}. Indices: y={y_start}:{y_end}, x={x_start}:{x_end}. Shapes: mask={mask.shape}, rect={blended_rect.shape}, refined={morphed_img_float_refined.shape}")
|
469 |
+
except Exception as e:
|
470 |
+
print(f"Unexpected error during refined blend: {e}")
|
471 |
|
472 |
|
473 |
# Convert back to uint8
|
474 |
+
morphed_img = (morphed_img_float_refined * 255.0).clip(0, 255).astype(np.uint8)
|
475 |
|
476 |
end_time = time.time()
|
477 |
print(f"Morphing took: {end_time - start_time:.4f} seconds")
|
|
|
482 |
|
483 |
def gradio_morph(image1, image2, transition_level):
|
484 |
"""Wrapper function for Gradio interface."""
|
485 |
+
print(f"Gradio inputs: img1 type={type(image1)}, img2 type={type(image2)}, transition={transition_level}")
|
486 |
+
|
487 |
+
# Check if inputs are valid numpy arrays
|
488 |
+
if image1 is None or not isinstance(image1, np.ndarray):
|
489 |
+
print("Input image 1 is missing or not a numpy array.")
|
490 |
+
image1 = None # Ensure it's None if invalid
|
491 |
+
if image2 is None or not isinstance(image2, np.ndarray):
|
492 |
+
print("Input image 2 is missing or not a numpy array.")
|
493 |
+
image2 = None # Ensure it's None if invalid
|
494 |
+
|
495 |
# Map transition level (-1.0 to 1.0) to alpha (0.0 to 1.0)
|
496 |
alpha = (transition_level + 1.0) / 2.0
|
497 |
+
alpha = max(0.0, min(1.0, alpha)) # Clamp to [0, 1]
|
498 |
+
|
499 |
+
# Call the main morphing function
|
500 |
+
result_img = morph_faces(image1, image2, alpha)
|
501 |
|
502 |
+
# Ensure the output is always a valid numpy array for Gradio
|
503 |
+
if not isinstance(result_img, np.ndarray) or result_img.ndim != 3 or result_img.shape[2] != 3:
|
504 |
+
print(f"Warning: Output image is not a valid BGR numpy array (shape: {result_img.shape if isinstance(result_img, np.ndarray) else type(result_img)}). Returning black image.")
|
505 |
+
return np.zeros((RESIZE_DIM, RESIZE_DIM, 3), dtype=np.uint8)
|
506 |
|
507 |
+
return result_img
|
508 |
|
509 |
# --- Gradio Interface Definition ---
|
510 |
|
511 |
css = """
|
512 |
+
img { object-fit: contain !important; }
|
513 |
"""
|
514 |
|
515 |
with gr.Blocks(css=css) as iface:
|
516 |
gr.Markdown("# Face Morphing App\nUpload two face images and adjust the slider to morph between them.")
|
517 |
+
|
518 |
with gr.Row():
|
519 |
+
with gr.Column(scale=1):
|
520 |
img_input1 = gr.Image(type="numpy", label="Face 1", height=RESIZE_DIM, width=RESIZE_DIM)
|
521 |
img_input2 = gr.Image(type="numpy", label="Face 2", height=RESIZE_DIM, width=RESIZE_DIM)
|
522 |
+
with gr.Column(scale=1):
|
523 |
img_output = gr.Image(type="numpy", label="Morphed Face", height=RESIZE_DIM, width=RESIZE_DIM, interactive=False)
|
524 |
|
525 |
slider = gr.Slider(
|
|
|
530 |
label="Transition Level",
|
531 |
info="Slide from -1.0 (Face 1) to 1.0 (Face 2)"
|
532 |
)
|
533 |
+
|
534 |
+
# --- CORRECTED EVENT WIRING ---
|
535 |
inputs = [img_input1, img_input2, slider]
|
536 |
+
# Trigger morph when slider is released OR when either image is changed/cleared
|
537 |
+
slider.release(fn=gradio_morph, inputs=inputs, outputs=img_output, show_progress="minimal")
|
538 |
img_input1.change(fn=gradio_morph, inputs=inputs, outputs=img_output, show_progress="minimal")
|
539 |
img_input2.change(fn=gradio_morph, inputs=inputs, outputs=img_output, show_progress="minimal")
|
540 |
+
# Also consider adding listeners for .clear() if needed
|
541 |
+
# img_input1.clear(fn=gradio_morph, inputs=inputs, outputs=img_output, show_progress="minimal")
|
542 |
+
# img_input2.clear(fn=gradio_morph, inputs=inputs, outputs=img_output, show_progress="minimal")
|
543 |
+
|
544 |
|
545 |
gr.Markdown("---")
|
546 |
gr.Markdown("Built with Gradio, OpenCV, and MediaPipe.")
|
547 |
|
548 |
# --- Launch the App ---
|
549 |
if __name__ == "__main__":
|
550 |
+
if face_mesh is None:
|
551 |
+
print("\nERROR: MediaPipe could not be initialized. The application cannot run.")
|
552 |
+
print("Please ensure the 'mediapipe' library is installed correctly (`pip install mediapipe`).")
|
553 |
+
else:
|
554 |
+
print("\nLaunching Gradio Interface...")
|
555 |
+
# Add share=True if you want to create a public link (requires internet)
|
556 |
+
# Add debug=True for more detailed Gradio logs if needed
|
557 |
+
iface.launch()
|