File size: 2,308 Bytes
92ce950
414f345
b763b36
92ce950
b763b36
0e7c662
4dd8158
 
 
6bd84a1
bbc2907
 
 
8d573ce
 
 
bbc2907
92ce950
bbc2907
 
 
 
 
 
 
8d573ce
bbc2907
 
 
e140e6c
414f345
 
 
92ce950
bbc2907
 
 
92ce950
b763b36
92ce950
 
 
0e7c662
bbc2907
 
414f345
bbc2907
 
 
 
 
 
 
 
414f345
 
92ce950
 
289a4db
414f345
b763b36
bbc2907
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92ce950
414f345
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import gradio as gr
import torch
import cv2
from ultralytics import YOLO

# Load YOLO models
def safe_load_yolo_model(path):
    torch.serialization.add_safe_globals([torch, 'ultralytics.nn.tasks.DetectionModel'])
    return YOLO(path)

# Dictionary of model paths
model_paths = {
    'YOLOv11': './data/yolo11n.pt',
    'Crack & Pothole Detector': './data/best.pt',
    'Toll gates': './data/best2.pt'
    
}

# Load models into memory
models = {name: safe_load_yolo_model(path) for name, path in model_paths.items()}

# Assign colors for each model
model_colors = {
    'YOLOv11': (0, 255, 0),
    'Crack & Pothole Detector': (255, 0, 0),
    'Toll gates': (0, 0, 255)
}

def process_video(video, selected_model):
    cap = cv2.VideoCapture(video)
    fps = cap.get(cv2.CAP_PROP_FPS)
    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    out = cv2.VideoWriter('output_video.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (frame_width, frame_height))

    use_models = models if selected_model == 'All' else {selected_model: models[selected_model]}

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        for model_name, model in use_models.items():
            results = model(frame)

            for result in results:
                for box in result.boxes:
                    x1, y1, x2, y2 = map(int, box.xyxy[0].tolist())
                    class_id = int(box.cls[0])
                    label = f"{model.names[class_id]} - {box.conf[0]:.2f}"
                    color = model_colors.get(model_name, (0, 255, 255))
                    cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
                    cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2)

        out.write(frame)

    cap.release()
    out.release()
    return 'output_video.mp4'

# Gradio Interface
iface = gr.Interface(
    fn=process_video,
    inputs=[
        gr.Video(label="Upload a Video"),
        gr.Dropdown(
            choices=["All"] + list(model_paths.keys()),
            label="Select Model(s)",
            value="All"
        )
    ],
    outputs=gr.Video(label="Processed Output"),
    live=False,
    title="Multi-Model YOLOv8 Video Inference"
)

iface.launch()