Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -104,21 +104,27 @@ async def full_detection_pipeline(image: UploadFile = File(...)):
|
|
104 |
if not detect_faces_roboflow(temp_image_path):
|
105 |
return JSONResponse(status_code=400, content={"error": "No face detected."})
|
106 |
|
|
|
|
|
107 |
image_to_process = raw_image
|
108 |
-
was_mirrored = False
|
109 |
|
|
|
110 |
eye_crops, error_msg = detect_eyes_roboflow(temp_image_path, image_to_process)
|
|
|
111 |
|
112 |
if len(eye_crops) != 2:
|
|
|
113 |
mirrored_image = cv2.flip(raw_image, 1)
|
114 |
image_to_process = mirrored_image
|
115 |
-
was_mirrored = True
|
116 |
|
117 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmp_mirrored:
|
118 |
cv2.imwrite(tmp_mirrored.name, mirrored_image)
|
119 |
temp_mirrored_image_path = tmp_mirrored.name
|
120 |
try:
|
121 |
eye_crops, error_msg = detect_eyes_roboflow(temp_mirrored_image_path, image_to_process)
|
|
|
122 |
finally:
|
123 |
os.remove(temp_mirrored_image_path)
|
124 |
|
@@ -128,15 +134,28 @@ async def full_detection_pipeline(image: UploadFile = File(...)):
|
|
128 |
content={"error": "Could not detect exactly two eyes. Please try another photo."}
|
129 |
)
|
130 |
|
|
|
|
|
|
|
|
|
|
|
131 |
eye_crops.sort(key=lambda c: cv2.boundingRect(cv2.cvtColor(c, cv2.COLOR_BGR2GRAY))[0])
|
132 |
|
|
|
|
|
|
|
|
|
133 |
if was_mirrored:
|
|
|
134 |
eye_crops.reverse()
|
|
|
|
|
135 |
|
136 |
flags = {}
|
137 |
eye_images_b64 = {}
|
138 |
for i, eye_crop in enumerate(eye_crops):
|
139 |
side = "right" if i == 0 else "left"
|
|
|
140 |
|
141 |
is_success, buffer = cv2.imencode(".jpg", eye_crop)
|
142 |
if is_success:
|
@@ -152,8 +171,8 @@ async def full_detection_pipeline(image: UploadFile = File(...)):
|
|
152 |
else:
|
153 |
flags[side] = None
|
154 |
|
155 |
-
|
156 |
-
|
157 |
is_success_main, buffer_main = cv2.imencode(".jpg", image_to_process)
|
158 |
analyzed_image_b64 = ""
|
159 |
if is_success_main:
|
|
|
104 |
if not detect_faces_roboflow(temp_image_path):
|
105 |
return JSONResponse(status_code=400, content={"error": "No face detected."})
|
106 |
|
107 |
+
# --- This is the final corrected logic with logging ---
|
108 |
+
|
109 |
image_to_process = raw_image
|
110 |
+
was_mirrored = False # Add a flag to track if we flipped the image
|
111 |
|
112 |
+
print("--- 1. Attempting detection on original image... ---")
|
113 |
eye_crops, error_msg = detect_eyes_roboflow(temp_image_path, image_to_process)
|
114 |
+
print(f"--- 2. Found {len(eye_crops)} eyes in original image. ---")
|
115 |
|
116 |
if len(eye_crops) != 2:
|
117 |
+
print("--- 3. Original failed. Attempting detection on mirrored image... ---")
|
118 |
mirrored_image = cv2.flip(raw_image, 1)
|
119 |
image_to_process = mirrored_image
|
120 |
+
was_mirrored = True # Set the flag to true
|
121 |
|
122 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmp_mirrored:
|
123 |
cv2.imwrite(tmp_mirrored.name, mirrored_image)
|
124 |
temp_mirrored_image_path = tmp_mirrored.name
|
125 |
try:
|
126 |
eye_crops, error_msg = detect_eyes_roboflow(temp_mirrored_image_path, image_to_process)
|
127 |
+
print(f"--- 4. Found {len(eye_crops)} eyes in mirrored image. ---")
|
128 |
finally:
|
129 |
os.remove(temp_mirrored_image_path)
|
130 |
|
|
|
134 |
content={"error": "Could not detect exactly two eyes. Please try another photo."}
|
135 |
)
|
136 |
|
137 |
+
# Get the bounding box coordinates before sorting
|
138 |
+
initial_boxes = [cv2.boundingRect(cv2.cvtColor(c, cv2.COLOR_BGR2GRAY)) for c in eye_crops]
|
139 |
+
print(f"--- 5. Initial eye coordinates (x,y,w,h): {initial_boxes} ---")
|
140 |
+
|
141 |
+
# Sort the eyes from left to right based on their position in the image
|
142 |
eye_crops.sort(key=lambda c: cv2.boundingRect(cv2.cvtColor(c, cv2.COLOR_BGR2GRAY))[0])
|
143 |
|
144 |
+
sorted_boxes = [cv2.boundingRect(cv2.cvtColor(c, cv2.COLOR_BGR2GRAY)) for c in eye_crops]
|
145 |
+
print(f"--- 6. Sorted eye coordinates (x,y,w,h): {sorted_boxes} ---")
|
146 |
+
|
147 |
+
# --- THE CRITICAL FIX ---
|
148 |
if was_mirrored:
|
149 |
+
print("--- 7. Image was mirrored, reversing eye order for correct labeling. ---")
|
150 |
eye_crops.reverse()
|
151 |
+
reversed_boxes = [cv2.boundingRect(cv2.cvtColor(c, cv2.COLOR_BGR2GRAY)) for c in eye_crops]
|
152 |
+
print(f"--- 8. Reversed eye coordinates (x,y,w,h): {reversed_boxes} ---")
|
153 |
|
154 |
flags = {}
|
155 |
eye_images_b64 = {}
|
156 |
for i, eye_crop in enumerate(eye_crops):
|
157 |
side = "right" if i == 0 else "left"
|
158 |
+
print(f"--- 9. Processing loop index {i}, assigning to: {side} eye. ---")
|
159 |
|
160 |
is_success, buffer = cv2.imencode(".jpg", eye_crop)
|
161 |
if is_success:
|
|
|
171 |
else:
|
172 |
flags[side] = None
|
173 |
|
174 |
+
print("--- 10. Final generated flags:", flags, "---")
|
175 |
+
|
176 |
is_success_main, buffer_main = cv2.imencode(".jpg", image_to_process)
|
177 |
analyzed_image_b64 = ""
|
178 |
if is_success_main:
|