Spaces:
Runtime error
Runtime error
File size: 1,077 Bytes
4f23172 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import torch
from torchvision import transforms
from PIL import Image
import gradio as gr
# Load the YOLO model
model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt') # Ensure 'best.pt' is the path to your trained model
# Define a function to process the image and make predictions
def detect_objects(image):
# Preprocess the image
transform = transforms.Compose([
transforms.ToTensor()
])
image = transform(image).unsqueeze(0) # Add batch dimension
# Perform inference
results = model(image)
# Extract bounding boxes and labels
bbox_img = results.render()[0] # This gets the image with bounding boxes drawn
return Image.fromarray(bbox_img)
# Create the Gradio interface
inputs = gr.inputs.Image(shape=(640, 480))
outputs = gr.outputs.Image(type="pil")
gr_interface = gr.Interface(fn=detect_objects, inputs=inputs, outputs=outputs, title="YOLO Object Detection", description="Upload an image to detect objects using a YOLO model.")
# Run the Gradio app
if __name__ == "__main__":
gr_interface.launch()
|