Spaces:
Running
Running
File size: 2,252 Bytes
caff61e bccf53b dc80d48 4fa263e caff61e 35669c6 e82b28e 248b9ce 0569316 8378a4b 36e1064 eaa57e7 8378a4b eaa57e7 e82b28e eaa57e7 8378a4b eaa57e7 35669c6 eaa57e7 a29d5e2 eaa57e7 b86490c eaa57e7 b86490c eaa57e7 b86490c eaa57e7 46e3370 eaa57e7 0569316 eaa57e7 0569316 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import torch
import numpy as np
import gradio as gr
from PIL import Image
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Load YOLOv5s model
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True).to(device)
# Performance optimizations
model.conf = 0.5 # Confidence threshold (adjust for speed/accuracy balance)
if device.type == 'cuda':
model.half() # FP16 precision
def process_frame(image):
"""Process single frame with error handling"""
if image is None:
return None
try:
# Convert numpy array to PIL Image
image_pil = Image.fromarray(image)
# Perform inference
with torch.no_grad():
results = model(image_pil)
# Render results
rendered_images = results.render()
return np.array(rendered_images[0]) if rendered_images else image
except Exception as e:
print(f"Processing error: {e}")
return image
with gr.Blocks(title="Real-Time Object Detection") as app:
gr.Markdown("# Real-Time Object Detection with Dual Input")
gr.Markdown("Supports live webcam streaming and image uploads")
with gr.Tabs():
with gr.TabItem("📷 Live Camera"):
with gr.Row():
webcam_input = gr.Video(label="Live Feed", streaming=True)
live_output = gr.Image(label="Processed Feed", streaming=True)
webcam_input.change(process_frame, webcam_input, live_output)
with gr.TabItem("🖼️ Image Upload"):
with gr.Row():
upload_input = gr.Image(type="numpy", label="Upload Image")
upload_output = gr.Image(label="Detection Result")
upload_input.change(process_frame, upload_input, upload_output)
gr.Markdown("Performance Settings")
with gr.Accordion("Advanced Settings", open=False):
gr.Slider(minimum=0.1, maximum=0.9, value=0.5,
label="Confidence Threshold", interactive=True)
gr.Checkbox(label="Enable FP16 Acceleration", value=True)
# Configure queue and launch
app.queue().launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
)
|