Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -3,50 +3,55 @@ import cv2
|
|
3 |
import numpy as np
|
4 |
import gradio as gr
|
5 |
from PIL import Image
|
6 |
-
import random
|
7 |
|
8 |
-
#
|
9 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
10 |
-
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True).to(device)
|
11 |
|
12 |
-
#
|
13 |
-
|
14 |
|
15 |
-
# Generate
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
def preprocess_image(image):
|
20 |
-
"""Convert numpy image to PIL format for YOLOv5 processing."""
|
21 |
image = Image.fromarray(image)
|
22 |
image = image.convert("RGB")
|
23 |
return image
|
24 |
|
25 |
def detect_objects(image):
|
26 |
-
"""Detect objects in the image and draw bounding boxes with consistent colors."""
|
27 |
image = preprocess_image(image)
|
28 |
-
results = model(
|
29 |
-
image = np.array(image)
|
30 |
-
|
31 |
-
|
|
|
|
|
32 |
x1, y1, x2, y2 = map(int, box)
|
33 |
-
class_name =
|
34 |
-
confidence = conf.item() * 100
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
|
40 |
-
|
41 |
-
# Display class label with confidence score
|
42 |
label = f"{class_name} ({confidence:.1f}%)"
|
43 |
-
cv2.putText(image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX,
|
44 |
-
|
45 |
-
|
46 |
return image
|
47 |
|
48 |
-
|
49 |
-
# Create Gradio Interface
|
50 |
iface = gr.Interface(
|
51 |
fn=detect_objects,
|
52 |
inputs=gr.Image(type="numpy", label="Upload Image"),
|
@@ -57,5 +62,4 @@ iface = gr.Interface(
|
|
57 |
examples=["spring_street_after.jpg", "pexels-hikaique-109919.jpg"]
|
58 |
)
|
59 |
|
60 |
-
|
61 |
-
iface.launch()
|
|
|
3 |
import numpy as np
|
4 |
import gradio as gr
|
5 |
from PIL import Image
|
|
|
6 |
|
7 |
+
# Device configuration
|
8 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
9 |
|
10 |
+
# Load YOLOv5x model
|
11 |
+
model = torch.hub.load('ultralytics/yolov5', 'yolov5x', pretrained=True).to(device)
|
12 |
|
13 |
+
# Generate distinct colors for each class using HSV color space
|
14 |
+
def generate_distinct_colors(num_classes):
|
15 |
+
colors = {}
|
16 |
+
for i, class_name in enumerate(model.names):
|
17 |
+
# Use HSV to generate evenly distributed hues
|
18 |
+
hue = (i * 255 // num_classes)
|
19 |
+
# Convert HSV to BGR (OpenCV uses BGR)
|
20 |
+
hsv_color = np.uint8([[[hue, 255, 255]]])
|
21 |
+
bgr_color = cv2.cvtColor(hsv_color, cv2.COLOR_HSV2BGR)[0][0]
|
22 |
+
# Store as tuple for easier use
|
23 |
+
colors[class_name] = tuple(map(int, bgr_color))
|
24 |
+
return colors
|
25 |
+
|
26 |
+
# Generate colors once at startup
|
27 |
+
CLASS_COLORS = generate_distinct_colors(len(model.names))
|
28 |
|
29 |
def preprocess_image(image):
|
|
|
30 |
image = Image.fromarray(image)
|
31 |
image = image.convert("RGB")
|
32 |
return image
|
33 |
|
34 |
def detect_objects(image):
|
|
|
35 |
image = preprocess_image(image)
|
36 |
+
results = model(image)
|
37 |
+
image = np.array(image)
|
38 |
+
|
39 |
+
# Process all detections at once
|
40 |
+
detections = results.xyxy[0]
|
41 |
+
for *box, conf, cls in detections:
|
42 |
x1, y1, x2, y2 = map(int, box)
|
43 |
+
class_name = model.names[int(cls)]
|
44 |
+
confidence = conf.item() * 100
|
45 |
+
color = CLASS_COLORS[class_name]
|
46 |
+
|
47 |
+
# Draw rectangle and label
|
48 |
+
cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
|
|
|
|
|
|
|
49 |
label = f"{class_name} ({confidence:.1f}%)"
|
50 |
+
cv2.putText(image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA)
|
51 |
+
|
|
|
52 |
return image
|
53 |
|
54 |
+
# Gradio interface
|
|
|
55 |
iface = gr.Interface(
|
56 |
fn=detect_objects,
|
57 |
inputs=gr.Image(type="numpy", label="Upload Image"),
|
|
|
62 |
examples=["spring_street_after.jpg", "pexels-hikaique-109919.jpg"]
|
63 |
)
|
64 |
|
65 |
+
iface.launch()
|
|