Spaces:
Sleeping
Sleeping
File size: 1,732 Bytes
caff61e bccf53b dc80d48 4fa263e caff61e e82b28e 8378a4b 36e1064 ab96246 8378a4b ab96246 e82b28e eaa57e7 ab96246 eaa57e7 ab96246 eaa57e7 ab96246 eaa57e7 ab96246 35669c6 eaa57e7 a29d5e2 eaa57e7 b86490c eaa57e7 ab96246 eaa57e7 46e3370 ab96246 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import torch
import numpy as np
import gradio as gr
from PIL import Image
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True).to(device)
model.conf = 0.5
if device.type == 'cuda':
model.half()
def process_frame(image):
if image is None:
print("No image received!")
return None
try:
print("Processing frame...")
image_pil = Image.fromarray(image)
with torch.no_grad():
results = model(image_pil)
rendered_images = results.render()
processed_image = np.array(rendered_images[0]) if rendered_images else image
print("Frame processed successfully!")
return processed_image
except Exception as e:
print(f"Processing error: {e}")
return image
with gr.Blocks(title="Real-Time Object Detection") as app:
gr.Markdown("# Real-Time Object Detection with Dual Input")
with gr.Tabs():
with gr.TabItem("π· Live Camera"):
with gr.Row():
webcam_input = gr.Image(source="webcam", streaming=True, label="Live Feed") # β
FIXED
live_output = gr.Image(label="Processed Feed")
webcam_input.stream(process_frame, inputs=webcam_input, outputs=live_output) # β
FIXED
with gr.TabItem("πΌοΈ Image Upload"):
with gr.Row():
upload_input = gr.Image(type="numpy", label="Upload Image")
upload_output = gr.Image(label="Detection Result")
upload_input.change(process_frame, upload_input, upload_output)
app.queue().launch(server_name="0.0.0.0", server_port=7860, share=False)
|