Spaces:
Running
Running
import torch | |
import numpy as np | |
import gradio as gr | |
from PIL import Image | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True).to(device) | |
model.conf = 0.5 | |
if device.type == 'cuda': | |
model.half() | |
def process_frame(image): | |
if image is None: | |
print("No image received!") | |
return None | |
try: | |
print("Processing frame...") | |
image_pil = Image.fromarray(image) | |
with torch.no_grad(): | |
results = model(image_pil) | |
rendered_images = results.render() | |
processed_image = np.array(rendered_images[0]) if rendered_images else image | |
print("Frame processed successfully!") | |
return processed_image | |
except Exception as e: | |
print(f"Processing error: {e}") | |
return image | |
with gr.Blocks(title="Real-Time Object Detection") as app: | |
gr.Markdown("# Real-Time Object Detection with Dual Input") | |
with gr.Tabs(): | |
with gr.TabItem("π· Live Camera"): | |
with gr.Row(): | |
webcam_input = gr.Image(source="webcam", streaming=True, label="Live Feed") # β FIXED | |
live_output = gr.Image(label="Processed Feed") | |
webcam_input.stream(process_frame, inputs=webcam_input, outputs=live_output) # β FIXED | |
with gr.TabItem("πΌοΈ Image Upload"): | |
with gr.Row(): | |
upload_input = gr.Image(type="numpy", label="Upload Image") | |
upload_output = gr.Image(label="Detection Result") | |
upload_input.change(process_frame, upload_input, upload_output) | |
app.queue().launch(server_name="0.0.0.0", server_port=7860, share=False) | |