Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,24 +1,51 @@
|
|
1 |
import torch
|
2 |
import numpy as np
|
3 |
import gradio as gr
|
|
|
|
|
4 |
from PIL import Image
|
5 |
|
6 |
-
|
|
|
7 |
|
8 |
-
|
|
|
9 |
|
10 |
-
if device.type ==
|
11 |
-
model.half()
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
def detect_objects(image):
|
|
|
|
|
14 |
image_pil = Image.fromarray(image)
|
15 |
-
|
16 |
with torch.no_grad():
|
17 |
-
results = model(image_pil)
|
18 |
-
|
19 |
-
rendered_images = results.render()
|
20 |
-
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
# Gradio interface
|
24 |
iface = gr.Interface(
|
@@ -26,9 +53,9 @@ iface = gr.Interface(
|
|
26 |
inputs=gr.Image(type="numpy", label="Upload Image"),
|
27 |
outputs=gr.Image(type="numpy", label="Detected Objects"),
|
28 |
title="Object Detection with YOLOv5",
|
29 |
-
description="Use webcam or upload an image to detect objects.",
|
30 |
allow_flagging="never",
|
31 |
-
examples=["spring_street_after.jpg", "pexels-hikaique-109919.jpg"]
|
32 |
)
|
33 |
|
34 |
-
iface.launch()
|
|
|
1 |
import torch
|
2 |
import numpy as np
|
3 |
import gradio as gr
|
4 |
+
import cv2
|
5 |
+
import time
|
6 |
from PIL import Image
|
7 |
|
8 |
+
# Check device availability
|
9 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
10 |
|
11 |
+
# Load YOLOv5 model
|
12 |
+
model = torch.hub.load("ultralytics/yolov5", "yolov5x", pretrained=True).to(device)
|
13 |
|
14 |
+
if device.type == "cuda":
|
15 |
+
model.half() # Use FP16 for performance boost
|
16 |
+
|
17 |
+
# Print available object classes
|
18 |
+
print(f"Model loaded with {len(model.names)} classes: {model.names}")
|
19 |
+
|
20 |
+
# Assign random colors to each class for bounding boxes
|
21 |
+
colors = {i: [int(c) for c in np.random.randint(0, 255, 3)] for i in range(len(model.names))}
|
22 |
|
23 |
def detect_objects(image):
|
24 |
+
start_time = time.time() # Start FPS measurement
|
25 |
+
|
26 |
image_pil = Image.fromarray(image)
|
27 |
+
|
28 |
with torch.no_grad():
|
29 |
+
results = model(image_pil, conf=0.3, iou=0.3) # Apply NMS with IoU = 0.3
|
30 |
+
|
31 |
+
rendered_images = results.render() # Get rendered image with default YOLOv5 visualization
|
32 |
+
|
33 |
+
# Get bounding boxes and draw color-coded boxes
|
34 |
+
img_cv = np.array(rendered_images[0]) if rendered_images else image
|
35 |
+
|
36 |
+
for det in results.xyxy[0]: # Bounding box format: x1, y1, x2, y2, conf, cls
|
37 |
+
x1, y1, x2, y2, conf, cls = map(int, det[:6])
|
38 |
+
label = f"{model.names[cls]}: {conf:.2f}"
|
39 |
+
|
40 |
+
cv2.rectangle(img_cv, (x1, y1), (x2, y2), colors[cls], 2)
|
41 |
+
cv2.putText(img_cv, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, colors[cls], 2)
|
42 |
+
|
43 |
+
# FPS Calculation
|
44 |
+
end_time = time.time()
|
45 |
+
fps = 1 / (end_time - start_time)
|
46 |
+
print(f"FPS: {fps:.2f}")
|
47 |
+
|
48 |
+
return img_cv
|
49 |
|
50 |
# Gradio interface
|
51 |
iface = gr.Interface(
|
|
|
53 |
inputs=gr.Image(type="numpy", label="Upload Image"),
|
54 |
outputs=gr.Image(type="numpy", label="Detected Objects"),
|
55 |
title="Object Detection with YOLOv5",
|
56 |
+
description="Use webcam or upload an image to detect objects. Optimized for speed and accuracy!",
|
57 |
allow_flagging="never",
|
58 |
+
examples=["spring_street_after.jpg", "pexels-hikaique-109919.jpg"],
|
59 |
)
|
60 |
|
61 |
+
iface.launch()
|