Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import torch
|
2 |
-
import cv2
|
3 |
import numpy as np
|
4 |
import gradio as gr
|
5 |
from PIL import Image
|
@@ -7,57 +6,34 @@ from PIL import Image
|
|
7 |
# Device configuration
|
8 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
9 |
|
10 |
-
# Load
|
11 |
-
model = torch.hub.load('ultralytics/yolov5', '
|
12 |
|
13 |
-
#
|
14 |
-
|
15 |
-
|
16 |
-
for i, class_name in enumerate(model.names):
|
17 |
-
# Use HSV to generate evenly distributed hues
|
18 |
-
hue = (i * 255 // num_classes)
|
19 |
-
# Convert HSV to BGR (OpenCV uses BGR)
|
20 |
-
hsv_color = np.uint8([[[hue, 255, 255]]])
|
21 |
-
bgr_color = cv2.cvtColor(hsv_color, cv2.COLOR_HSV2BGR)[0][0]
|
22 |
-
# Store as tuple for easier use
|
23 |
-
colors[class_name] = tuple(map(int, bgr_color))
|
24 |
-
return colors
|
25 |
-
|
26 |
-
# Generate colors once at startup
|
27 |
-
CLASS_COLORS = generate_distinct_colors(len(model.names))
|
28 |
-
|
29 |
-
def preprocess_image(image):
|
30 |
-
image = Image.fromarray(image)
|
31 |
-
image = image.convert("RGB")
|
32 |
-
return image
|
33 |
|
34 |
def detect_objects(image):
|
35 |
-
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
38 |
|
39 |
-
#
|
40 |
-
|
41 |
-
for *box, conf, cls in detections:
|
42 |
-
x1, y1, x2, y2 = map(int, box)
|
43 |
-
class_name = model.names[int(cls)]
|
44 |
-
confidence = conf.item() * 100
|
45 |
-
color = CLASS_COLORS[class_name]
|
46 |
-
|
47 |
-
# Draw rectangle and label
|
48 |
-
cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
|
49 |
-
label = f"{class_name} ({confidence:.1f}%)"
|
50 |
-
cv2.putText(image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA)
|
51 |
|
52 |
-
|
|
|
53 |
|
54 |
# Gradio interface
|
55 |
iface = gr.Interface(
|
56 |
fn=detect_objects,
|
57 |
inputs=gr.Image(type="numpy", label="Upload Image"),
|
58 |
outputs=gr.Image(type="numpy", label="Detected Objects"),
|
59 |
-
title="Object Detection with
|
60 |
-
description="
|
61 |
allow_flagging="never",
|
62 |
examples=["spring_street_after.jpg", "pexels-hikaique-109919.jpg"]
|
63 |
)
|
|
|
1 |
import torch
|
|
|
2 |
import numpy as np
|
3 |
import gradio as gr
|
4 |
from PIL import Image
|
|
|
6 |
# Device configuration
|
7 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
8 |
|
9 |
+
# Load YOLOv5s model (smallest and fastest variant)
|
10 |
+
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True).to(device)
|
11 |
|
12 |
+
# Enable half-precision for CUDA devices
|
13 |
+
if device.type == 'cuda':
|
14 |
+
model.half()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
def detect_objects(image):
|
17 |
+
# Convert numpy array to PIL Image
|
18 |
+
image_pil = Image.fromarray(image)
|
19 |
+
|
20 |
+
# Perform inference without gradient calculation
|
21 |
+
with torch.no_grad():
|
22 |
+
results = model(image_pil)
|
23 |
|
24 |
+
# Render detections using optimized YOLOv5 method
|
25 |
+
rendered_images = results.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
+
# Return the first rendered image as numpy array
|
28 |
+
return np.array(rendered_images[0]) if rendered_images else image
|
29 |
|
30 |
# Gradio interface
|
31 |
iface = gr.Interface(
|
32 |
fn=detect_objects,
|
33 |
inputs=gr.Image(type="numpy", label="Upload Image"),
|
34 |
outputs=gr.Image(type="numpy", label="Detected Objects"),
|
35 |
+
title="High-Speed Object Detection with YOLOv5s",
|
36 |
+
description="Optimized for speed using YOLOv5s and GPU acceleration.",
|
37 |
allow_flagging="never",
|
38 |
examples=["spring_street_after.jpg", "pexels-hikaique-109919.jpg"]
|
39 |
)
|