Spaces:
Running
Running
File size: 1,566 Bytes
caff61e e82b28e bccf53b dc80d48 c5a0ba8 248b9ce caff61e e82b28e 73df658 caff61e 248b9ce 36e1064 a29d5e2 c5a0ba8 dc80d48 a29d5e2 dc80d48 73df658 e82b28e 36e1064 73df658 248b9ce 73df658 248b9ce e82b28e 248b9ce 36e1064 248b9ce 73df658 248b9ce a29d5e2 dc80d48 46e3370 248b9ce e82b28e 1195707 73df658 54164af e28f214 e82b28e 46e3370 73df658 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import torch
import cv2
import numpy as np
import gradio as gr
from PIL import Image
import random
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = torch.hub.load('ultralytics/yolov5', 'yolov5x', pretrained=True).to(device) # Load YOLOv5x model
CLASS_NAMES = model.names
random.seed(42)
CLASS_COLORS = {cls: (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) for cls in CLASS_NAMES}
def preprocess_image(image):
image = Image.fromarray(image)
image = image.convert("RGB")
return image
def detect_objects(image):
image = preprocess_image(image)
results = model(image)
image = np.array(image)
for *box, conf, cls in results.xyxy[0]:
x1, y1, x2, y2 = map(int, box)
class_name = CLASS_NAMES[int(cls)]
confidence = conf.item() * 100
color = CLASS_COLORS[class_name]
cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
label = f"{class_name} ({confidence:.1f}%)"
cv2.putText(image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX,
1, color, 3, cv2.LINE_AA) # Larger text
return image
iface = gr.Interface(
fn=detect_objects,
inputs=gr.Image(type="numpy", label="Upload Image"),
outputs=gr.Image(type="numpy", label="Detected Objects"),
title="Object Detection with YOLOv5",
description="Use webcam or upload an image to detect objects.",
allow_flagging="never",
examples=["spring_street_after.jpg", "pexels-hikaique-109919.jpg"]
)
iface.launch()
|