File size: 1,356 Bytes
36e1064
caff61e
e82b28e
bccf53b
dc80d48
c5a0ba8
caff61e
c5a0ba8
e82b28e
dc80d48
 
caff61e
 
36e1064
 
 
a29d5e2
c5a0ba8
dc80d48
 
a29d5e2
dc80d48
 
 
e82b28e
dc80d48
36e1064
dc80d48
36e1064
dc80d48
36e1064
e82b28e
36e1064
 
 
 
 
 
a29d5e2
dc80d48
46e3370
dc80d48
e82b28e
 
bccf53b
 
dc80d48
e82b28e
46e3370
dc80d48
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from ultralytics import YOLO  
import torch
import cv2
import numpy as np
import gradio as gr
from PIL import Image

# Load YOLOv5 model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = YOLO("yolov5s.pt")  # Load pre-trained YOLOv5s model
model.to(device)
model.eval()

# Load COCO class labels
CLASS_NAMES = model.names  # YOLOv5's built-in class names

def preprocess_image(image):
    image = Image.fromarray(image)
    image = image.convert("RGB")
    return image

def detect_objects(image):
    image = preprocess_image(image)
    results = model.predict(image)  # Run YOLOv5 inference

    # Convert results to bounding box format
    image = np.array(image)
    for result in results:
        for box, cls in zip(result.boxes.xyxy, result.boxes.cls):
            x1, y1, x2, y2 = map(int, box[:4])
            class_name = CLASS_NAMES[int(cls)]  # Get class name

            # Draw bounding box
            cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 2)

            # Put class label
            cv2.putText(image, class_name, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 
                        0.5, (255, 0, 0), 2, cv2.LINE_AA)

    return image

# Gradio UI
iface = gr.Interface(
    fn=detect_objects,
    inputs=gr.Image(type="numpy"),
    outputs=gr.Image(type="numpy"),
    live=True,
)

iface.launch()