Spaces:
Sleeping
Sleeping
Eric P. Nusbaum
commited on
Commit
·
b7bd655
1
Parent(s):
c62ea31
Updated app.py
Browse files
app.py
CHANGED
@@ -58,6 +58,7 @@ def preprocess_image(image):
|
|
58 |
- Normalize pixel values
|
59 |
- Convert RGB to BGR if required by the model
|
60 |
"""
|
|
|
61 |
image = image.resize((TARGET_WIDTH, TARGET_HEIGHT))
|
62 |
image_np = np.array(image).astype(np.float32)
|
63 |
image_np = image_np / 255.0 # Normalize to [0,1]
|
@@ -84,9 +85,12 @@ def draw_boxes(image, boxes, classes, scores, threshold=0.5):
|
|
84 |
except IOError:
|
85 |
font = ImageFont.load_default()
|
86 |
|
|
|
|
|
87 |
for box, cls, score in zip(boxes[0], classes[0], scores[0]):
|
88 |
if score < threshold:
|
89 |
continue
|
|
|
90 |
# Convert box coordinates from normalized to absolute
|
91 |
ymin, xmin, ymax, xmax = box
|
92 |
left = xmin * image.width
|
@@ -111,6 +115,30 @@ def draw_boxes(image, boxes, classes, scores, threshold=0.5):
|
|
111 |
# Draw text
|
112 |
draw.text((left + 2, top - text_height - 2), label, fill="white", font=font)
|
113 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
return image
|
115 |
|
116 |
def predict(image):
|
@@ -122,17 +150,25 @@ def predict(image):
|
|
122 |
PIL.Image: Annotated image with bounding boxes and labels.
|
123 |
"""
|
124 |
try:
|
|
|
125 |
# Preprocess the image
|
126 |
input_array = preprocess_image(image)
|
|
|
127 |
|
128 |
# Run inference
|
129 |
boxes, classes, scores = sess.run(
|
130 |
[detected_boxes, detected_classes, detected_scores],
|
131 |
feed_dict={input_tensor: input_array}
|
132 |
)
|
|
|
|
|
|
|
|
|
|
|
133 |
|
134 |
# Annotate the image with bounding boxes and labels
|
135 |
annotated_image = draw_boxes(image.copy(), boxes, classes, scores, threshold=0.5)
|
|
|
136 |
|
137 |
return annotated_image
|
138 |
|
|
|
58 |
- Normalize pixel values
|
59 |
- Convert RGB to BGR if required by the model
|
60 |
"""
|
61 |
+
image = image.convert('RGB') # Ensure image is in RGB
|
62 |
image = image.resize((TARGET_WIDTH, TARGET_HEIGHT))
|
63 |
image_np = np.array(image).astype(np.float32)
|
64 |
image_np = image_np / 255.0 # Normalize to [0,1]
|
|
|
85 |
except IOError:
|
86 |
font = ImageFont.load_default()
|
87 |
|
88 |
+
detections = False # Flag to check if any detections are above threshold
|
89 |
+
|
90 |
for box, cls, score in zip(boxes[0], classes[0], scores[0]):
|
91 |
if score < threshold:
|
92 |
continue
|
93 |
+
detections = True
|
94 |
# Convert box coordinates from normalized to absolute
|
95 |
ymin, xmin, ymax, xmax = box
|
96 |
left = xmin * image.width
|
|
|
115 |
# Draw text
|
116 |
draw.text((left + 2, top - text_height - 2), label, fill="white", font=font)
|
117 |
|
118 |
+
if not detections:
|
119 |
+
# Optionally, you can add text indicating no detections were found
|
120 |
+
try:
|
121 |
+
font_large = ImageFont.truetype("arial.ttf", 20)
|
122 |
+
except IOError:
|
123 |
+
font_large = ImageFont.load_default()
|
124 |
+
no_detect_text = "No detections found."
|
125 |
+
text_bbox = draw.textbbox((0, 0), no_detect_text, font=font_large)
|
126 |
+
text_width = text_bbox[2] - text_bbox[0]
|
127 |
+
text_height = text_bbox[3] - text_bbox[1]
|
128 |
+
draw.rectangle(
|
129 |
+
[
|
130 |
+
((image.width - text_width) / 2 - 10, (image.height - text_height) / 2 - 10),
|
131 |
+
((image.width + text_width) / 2 + 10, (image.height + text_height) / 2 + 10)
|
132 |
+
],
|
133 |
+
fill="black"
|
134 |
+
)
|
135 |
+
draw.text(
|
136 |
+
((image.width - text_width) / 2, (image.height - text_height) / 2),
|
137 |
+
no_detect_text,
|
138 |
+
fill="white",
|
139 |
+
font=font_large
|
140 |
+
)
|
141 |
+
|
142 |
return image
|
143 |
|
144 |
def predict(image):
|
|
|
150 |
PIL.Image: Annotated image with bounding boxes and labels.
|
151 |
"""
|
152 |
try:
|
153 |
+
print("Starting prediction...")
|
154 |
# Preprocess the image
|
155 |
input_array = preprocess_image(image)
|
156 |
+
print(f"Preprocessed image shape: {input_array.shape}")
|
157 |
|
158 |
# Run inference
|
159 |
boxes, classes, scores = sess.run(
|
160 |
[detected_boxes, detected_classes, detected_scores],
|
161 |
feed_dict={input_tensor: input_array}
|
162 |
)
|
163 |
+
print(f"Inference completed. Number of detections: {len(boxes[0])}")
|
164 |
+
|
165 |
+
# Check if detections are present
|
166 |
+
if boxes.shape[1] == 0:
|
167 |
+
print("No detections returned by the model.")
|
168 |
|
169 |
# Annotate the image with bounding boxes and labels
|
170 |
annotated_image = draw_boxes(image.copy(), boxes, classes, scores, threshold=0.5)
|
171 |
+
print("Annotated image created successfully.")
|
172 |
|
173 |
return annotated_image
|
174 |
|