Spaces:
Paused
Paused
File size: 5,042 Bytes
c05d727 c00bf6a c05d727 4b5873a c05d727 4b5873a c05d727 4b5873a c05d727 4b5873a c05d727 4b5873a c05d727 4b5873a c05d727 4b5873a c05d727 4b5873a c05d727 4b5873a c05d727 4b5873a c05d727 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import gradio as gr
import cv2
import requests
import os
from ultralytics import YOLO
file_urls = [
'https://huggingface.co/spaces/iamsuman/ripe-and-unripe-tomatoes-detection/resolve/main/samples/riped_tomato_93.jpeg?download=true',
'https://huggingface.co/spaces/iamsuman/ripe-and-unripe-tomatoes-detection/resolve/main/samples/unriped_tomato_18.jpeg?download=true',
'https://huggingface.co/spaces/iamsuman/ripe-and-unripe-tomatoes-detection/resolve/main/samples/tomatoes.mp4?download=true',
]
def download_file(url, save_name):
url = url
if not os.path.exists(save_name):
file = requests.get(url)
open(save_name, 'wb').write(file.content)
for i, url in enumerate(file_urls):
if 'mp4' in file_urls[i]:
download_file(
file_urls[i],
f"video.mp4"
)
else:
download_file(
file_urls[i],
f"image_{i}.jpg"
)
model = YOLO('best.pt')
path = [['image_0.jpg'], ['image_1.jpg']]
video_path = [['video.mp4']]
def show_preds_image(image_path):
image = cv2.imread(image_path)
outputs = model.predict(source=image_path)
results = outputs[0].cpu().numpy()
# Print the detected objects' information (class, coordinates, and probability)
box = results[0].boxes
names = model.model.names
boxes = results.boxes
for box, conf, cls in zip(boxes.xyxy, boxes.conf, boxes.cls):
x1, y1, x2, y2 = map(int, box)
class_name = names[int(cls)]
print(class_name, "class_name", class_name.lower() == 'ripe')
if class_name.lower() == 'ripe':
color = (0, 0, 255) # Red for ripe
else:
color = (0, 255, 0) # Green for unripe
# Draw rectangle around object
cv2.rectangle(
image,
(x1, y1),
(x2, y2),
color=color,
thickness=2,
lineType=cv2.LINE_AA
)
# Display class label on top of rectangle
label = f"{class_name.capitalize()}: {conf:.2f}"
cv2.putText(image, label, (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, # Use the same color as the rectangle
2,
cv2.LINE_AA)
# Convert image to RGB (Gradio expects RGB format)
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
inputs_image = [
gr.components.Image(type="filepath", label="Input Image"),
]
outputs_image = [
gr.components.Image(type="numpy", label="Output Image"),
]
interface_image = gr.Interface(
fn=show_preds_image,
inputs=inputs_image,
outputs=outputs_image,
title="Ripe And Unripe Tomatoes Detection",
examples=path,
cache_examples=False,
)
def show_preds_video(video_path):
cap = cv2.VideoCapture(video_path)
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
frame_copy = frame.copy()
outputs = model.predict(source=frame)
results = outputs[0].cpu().numpy()
boxes = results.boxes
confidences = boxes.conf
classes = boxes.cls
names = model.model.names
for box, conf, cls in zip(boxes.xyxy, confidences, classes):
x1, y1, x2, y2 = map(int, box)
# Determine color based on class
class_name = names[int(cls)]
if class_name.lower() == 'ripe':
color = (0, 0, 255) # Red for ripe
else:
color = (0, 255, 0) # Green for unripe
# Draw rectangle around object
cv2.rectangle(
frame_copy,
(x1, y1),
(x2, y2),
color=color,
thickness=2,
lineType=cv2.LINE_AA
)
# Display class label on top of rectangle with capitalized class name
label = f"{class_name.capitalize()}: {conf:.2f}"
cv2.putText(
frame_copy,
label,
(x1, y1 - 10), # Position slightly above the top of the rectangle
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
color, # Use the same color as the rectangle
1,
cv2.LINE_AA
)
# Convert frame to RGB (Gradio expects RGB format)
yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
else:
break
cap.release()
inputs_video = [
gr.components.Video(label="Input Video"),
]
outputs_video = [
gr.components.Image(type="numpy", label="Output Image"),
]
interface_video = gr.Interface(
fn=show_preds_video,
inputs=inputs_video,
outputs=outputs_video,
title="Ripe And Unripe Tomatoes Detection",
examples=video_path,
cache_examples=False,
)
gr.TabbedInterface(
[interface_image, interface_video],
tab_names=['Image inference', 'Video inference']
).queue().launch() |