Spaces:
Sleeping
Sleeping
File size: 1,916 Bytes
a9e441b faba419 a9e441b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
import gradio as gr
from ultralytics import YOLO
import cv2
import os
import torch
import numpy as np
# Charger le modèle YOLOv8 pré-entraîné
model = YOLO("yolov8n.pt")
# Fonction pour la détection sur image
def detect_objects_image(img):
results = model(img) # Détection
annotated_frame = results[0].plot() # Annoter les résultats
return annotated_frame
import tempfile
# Fonction pour la détection sur vidéo
def detect_objects_video(video):
# Si l'entrée est une chaîne, utiliser telle quelle. Sinon, utiliser .name (cas Gradio)
video_path = video.name if hasattr(video, 'name') else video
temp_output = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
cap = cv2.VideoCapture(video_path)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(temp_output.name, fourcc, fps, (width, height))
while True:
ret, frame = cap.read()
if not ret:
break
results = model(frame)
annotated_frame = results[0].plot()
out.write(annotated_frame)
cap.release()
out.release()
return temp_output.name
demo = gr.Blocks(theme='NoCrypt/miku')
#Interface Gradio
image_input = gr.Image(type='numpy',label="Image à analyser")
image_output = gr.Image(type = 'numpy', label="Image annotée")
video_input = gr.Video(label="Video à analyser")
video_output = gr.Video(label="Video annotée")
interface1 = gr.Interface(fn=detect_objects_image, inputs=image_input, outputs=image_output, title="Détection sur Image")
interface2 = gr.Interface(fn=detect_objects_video, inputs=video_input, outputs=video_output, title="Détection sur Video")
with demo:
gr.TabbedInterface([interface1, interface2], ['image detection', 'video detection'])
demo.launch() |