fshow the count of ripe and unripe tomatoes
Browse files
app.py
CHANGED
@@ -93,58 +93,66 @@ interface_image = gr.Interface(
|
|
93 |
)
|
94 |
|
95 |
def show_preds_video(video_path):
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
cap = cv2.VideoCapture(video_path)
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
names = model.model.names
|
|
|
|
|
|
|
108 |
|
109 |
-
for box,
|
110 |
x1, y1, x2, y2 = map(int, box)
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
if class_name.lower() ==
|
115 |
-
|
|
|
|
|
|
|
116 |
else:
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
else:
|
145 |
-
break
|
146 |
-
|
147 |
-
cap.release()
|
148 |
|
149 |
inputs_video = [
|
150 |
gr.components.Video(label="Input Video"),
|
|
|
93 |
)
|
94 |
|
95 |
def show_preds_video(video_path):
|
96 |
+
results = model.track(source=video_path, persist=True, tracker="bytetrack.yaml", verbose=False, stream=True)
|
97 |
+
|
98 |
+
ripe_ids = set()
|
99 |
+
unripe_ids = set()
|
100 |
+
|
101 |
+
# Get video frame dimensions for centering text
|
102 |
cap = cv2.VideoCapture(video_path)
|
103 |
+
if not cap.isOpened():
|
104 |
+
print("Error: Could not open video.")
|
105 |
+
return
|
106 |
+
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
107 |
+
cap.release()
|
108 |
+
|
109 |
+
for output in results:
|
110 |
+
frame = output.orig_img
|
111 |
+
|
112 |
+
if output.boxes and output.boxes.id is not None:
|
113 |
names = model.model.names
|
114 |
+
boxes = output.boxes
|
115 |
+
ids = boxes.id.cpu().numpy().astype(int)
|
116 |
+
classes = boxes.cls.cpu().numpy().astype(int)
|
117 |
|
118 |
+
for box, cls, track_id in zip(boxes.xyxy, classes, ids):
|
119 |
x1, y1, x2, y2 = map(int, box)
|
120 |
+
class_name = names[cls]
|
121 |
+
|
122 |
+
# Define BGR colors directly for OpenCV functions
|
123 |
+
if class_name.lower() == "ripe":
|
124 |
+
# To get RED in Gradio (RGB), you need to use (255, 0, 0) BGR
|
125 |
+
# Note: You were using (0, 0, 255) which is Blue in RGB after conversion.
|
126 |
+
color = (0, 0, 255)
|
127 |
+
ripe_ids.add(track_id)
|
128 |
else:
|
129 |
+
# To get GREEN in Gradio (RGB), you need to use (0, 255, 0) BGR.
|
130 |
+
# This color is already correct.
|
131 |
+
color = (0, 255, 0)
|
132 |
+
unripe_ids.add(track_id)
|
133 |
+
|
134 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
|
135 |
+
cv2.putText(frame, f"{class_name.capitalize()} ID:{track_id}",
|
136 |
+
(x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
137 |
+
|
138 |
+
ripe_count_text = f"Ripe: {len(ripe_ids)}"
|
139 |
+
unripe_count_text = f"Unripe: {len(unripe_ids)}"
|
140 |
+
full_text = f"{ripe_count_text} | {unripe_count_text}"
|
141 |
+
|
142 |
+
# Get text size to center it
|
143 |
+
(text_width, text_height), baseline = cv2.getTextSize(full_text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
|
144 |
+
text_x = (frame_width - text_width) // 2
|
145 |
+
text_y = 40 # A fixed position at the top
|
146 |
+
|
147 |
+
# Display the counts at the top center
|
148 |
+
cv2.putText(frame, full_text, (text_x, text_y),
|
149 |
+
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
|
150 |
+
|
151 |
+
# This line is crucial for the fix.
|
152 |
+
# It correctly converts the frame from BGR to RGB for Gradio.
|
153 |
+
yield cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
154 |
+
|
155 |
+
print(f"Final Counts → Ripe: {len(ripe_ids)}, Unripe: {len(unripe_ids)}")
|
|
|
|
|
|
|
|
|
156 |
|
157 |
inputs_video = [
|
158 |
gr.components.Video(label="Input Video"),
|