Spaces:
Running
Running
added log messages to properly debug
Browse files
app.py
CHANGED
@@ -89,8 +89,6 @@ def run_leukocoria_prediction(iris_crop):
|
|
89 |
# --- 3. FastAPI Application ---
|
90 |
app = FastAPI()
|
91 |
|
92 |
-
# In app.py, replace the existing function with this one
|
93 |
-
|
94 |
@app.post("/detect/")
|
95 |
async def full_detection_pipeline(image: UploadFile = File(...)):
|
96 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmp:
|
@@ -105,26 +103,26 @@ async def full_detection_pipeline(image: UploadFile = File(...)):
|
|
105 |
|
106 |
if not detect_faces_roboflow(temp_image_path):
|
107 |
return JSONResponse(status_code=400, content={"error": "No face detected."})
|
108 |
-
|
109 |
-
# --- This is the final corrected logic ---
|
110 |
|
111 |
image_to_process = raw_image
|
112 |
-
was_mirrored = False
|
113 |
|
114 |
-
print("--- Attempting detection on original image... ---")
|
115 |
eye_crops, error_msg = detect_eyes_roboflow(temp_image_path, image_to_process)
|
|
|
116 |
|
117 |
if len(eye_crops) != 2:
|
118 |
-
print("--- Original failed. Attempting detection on mirrored image... ---")
|
119 |
mirrored_image = cv2.flip(raw_image, 1)
|
120 |
image_to_process = mirrored_image
|
121 |
-
was_mirrored = True
|
122 |
|
123 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmp_mirrored:
|
124 |
cv2.imwrite(tmp_mirrored.name, mirrored_image)
|
125 |
temp_mirrored_image_path = tmp_mirrored.name
|
126 |
try:
|
127 |
eye_crops, error_msg = detect_eyes_roboflow(temp_mirrored_image_path, image_to_process)
|
|
|
128 |
finally:
|
129 |
os.remove(temp_mirrored_image_path)
|
130 |
|
@@ -134,26 +132,32 @@ async def full_detection_pipeline(image: UploadFile = File(...)):
|
|
134 |
content={"error": "Could not detect exactly two eyes. Please try another photo."}
|
135 |
)
|
136 |
|
|
|
|
|
|
|
|
|
137 |
# Sort the eyes from left to right based on their position in the image
|
138 |
eye_crops.sort(key=lambda c: cv2.boundingRect(cv2.cvtColor(c, cv2.COLOR_BGR2GRAY))[0])
|
139 |
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
if was_mirrored:
|
144 |
-
print("--- Image was mirrored, reversing eye order for correct labeling. ---")
|
145 |
eye_crops.reverse()
|
|
|
|
|
146 |
|
147 |
flags = {}
|
148 |
eye_images_b64 = {}
|
149 |
for i, eye_crop in enumerate(eye_crops):
|
150 |
-
# Because of the sort and potential reverse, i=0 is ALWAYS the person's right eye
|
151 |
side = "right" if i == 0 else "left"
|
|
|
152 |
|
|
|
153 |
is_success, buffer = cv2.imencode(".jpg", eye_crop)
|
154 |
if is_success:
|
155 |
eye_images_b64[side] = "data:image/jpeg;base64," + base64.b64encode(buffer).decode("utf-8")
|
156 |
-
|
157 |
pred = get_largest_iris_prediction(eye_crop)
|
158 |
if pred:
|
159 |
x1, y1 = int(pred['x'] - pred['width'] / 2), int(pred['y'] - pred['height'] / 2)
|
@@ -164,6 +168,8 @@ async def full_detection_pipeline(image: UploadFile = File(...)):
|
|
164 |
else:
|
165 |
flags[side] = None
|
166 |
|
|
|
|
|
167 |
is_success_main, buffer_main = cv2.imencode(".jpg", image_to_process)
|
168 |
analyzed_image_b64 = ""
|
169 |
if is_success_main:
|
|
|
89 |
# --- 3. FastAPI Application ---
|
90 |
app = FastAPI()
|
91 |
|
|
|
|
|
92 |
@app.post("/detect/")
|
93 |
async def full_detection_pipeline(image: UploadFile = File(...)):
|
94 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmp:
|
|
|
103 |
|
104 |
if not detect_faces_roboflow(temp_image_path):
|
105 |
return JSONResponse(status_code=400, content={"error": "No face detected."})
|
|
|
|
|
106 |
|
107 |
image_to_process = raw_image
|
108 |
+
was_mirrored = False
|
109 |
|
110 |
+
print("--- 1. Attempting detection on original image... ---")
|
111 |
eye_crops, error_msg = detect_eyes_roboflow(temp_image_path, image_to_process)
|
112 |
+
print(f"--- 2. Found {len(eye_crops)} eyes in original image. ---")
|
113 |
|
114 |
if len(eye_crops) != 2:
|
115 |
+
print("--- 3. Original failed. Attempting detection on mirrored image... ---")
|
116 |
mirrored_image = cv2.flip(raw_image, 1)
|
117 |
image_to_process = mirrored_image
|
118 |
+
was_mirrored = True
|
119 |
|
120 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmp_mirrored:
|
121 |
cv2.imwrite(tmp_mirrored.name, mirrored_image)
|
122 |
temp_mirrored_image_path = tmp_mirrored.name
|
123 |
try:
|
124 |
eye_crops, error_msg = detect_eyes_roboflow(temp_mirrored_image_path, image_to_process)
|
125 |
+
print(f"--- 4. Found {len(eye_crops)} eyes in mirrored image. ---")
|
126 |
finally:
|
127 |
os.remove(temp_mirrored_image_path)
|
128 |
|
|
|
132 |
content={"error": "Could not detect exactly two eyes. Please try another photo."}
|
133 |
)
|
134 |
|
135 |
+
# Get the bounding box coordinates before sorting
|
136 |
+
initial_boxes = [cv2.boundingRect(cv2.cvtColor(c, cv2.COLOR_BGR2GRAY)) for c in eye_crops]
|
137 |
+
print(f"--- 5. Initial eye coordinates (x,y,w,h): {initial_boxes} ---")
|
138 |
+
|
139 |
# Sort the eyes from left to right based on their position in the image
|
140 |
eye_crops.sort(key=lambda c: cv2.boundingRect(cv2.cvtColor(c, cv2.COLOR_BGR2GRAY))[0])
|
141 |
|
142 |
+
sorted_boxes = [cv2.boundingRect(cv2.cvtColor(c, cv2.COLOR_BGR2GRAY)) for c in eye_crops]
|
143 |
+
print(f"--- 6. Sorted eye coordinates (x,y,w,h): {sorted_boxes} ---")
|
144 |
+
|
145 |
if was_mirrored:
|
146 |
+
print("--- 7. Image was mirrored, reversing eye order for correct labeling. ---")
|
147 |
eye_crops.reverse()
|
148 |
+
reversed_boxes = [cv2.boundingRect(cv2.cvtColor(c, cv2.COLOR_BGR2GRAY)) for c in eye_crops]
|
149 |
+
print(f"--- 8. Reversed eye coordinates (x,y,w,h): {reversed_boxes} ---")
|
150 |
|
151 |
flags = {}
|
152 |
eye_images_b64 = {}
|
153 |
for i, eye_crop in enumerate(eye_crops):
|
|
|
154 |
side = "right" if i == 0 else "left"
|
155 |
+
print(f"--- 9. Processing loop index {i}, assigning to: {side} eye. ---")
|
156 |
|
157 |
+
# ... (rest of the processing loop remains the same) ...
|
158 |
is_success, buffer = cv2.imencode(".jpg", eye_crop)
|
159 |
if is_success:
|
160 |
eye_images_b64[side] = "data:image/jpeg;base64," + base64.b64encode(buffer).decode("utf-8")
|
|
|
161 |
pred = get_largest_iris_prediction(eye_crop)
|
162 |
if pred:
|
163 |
x1, y1 = int(pred['x'] - pred['width'] / 2), int(pred['y'] - pred['height'] / 2)
|
|
|
168 |
else:
|
169 |
flags[side] = None
|
170 |
|
171 |
+
print("--- 10. Final generated flags:", flags, "---")
|
172 |
+
|
173 |
is_success_main, buffer_main = cv2.imencode(".jpg", image_to_process)
|
174 |
analyzed_image_b64 = ""
|
175 |
if is_success_main:
|