File size: 3,185 Bytes
9457184
c5cff55
9457184
1b65107
2c6959d
 
c5cff55
 
9457184
c5cff55
 
41ba03c
3e7703e
 
 
 
 
 
 
 
 
 
 
 
6cb50ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
from ultralytics import YOLO
import os

# Define the correct path to config.yaml (in the root directory)
config_path = './config.yaml'  # Adjust based on the actual path to your config.yaml

# Load YOLO model
model = YOLO("yolo11n.yaml")  # You can choose a different model type like yolo5n, yolo6n, etc.

# Train the model
results = model.train(data=config_path, epochs=1)

# Define the save directory
save_dir = './runs/detect/train/weights'

# Create directory if it doesn't exist
if not os.path.exists(save_dir):
    os.makedirs(save_dir)

# Save the model
model.save(os.path.join(save_dir, 'best.pt'))

# Print confirmation
print("Model saved to:", os.path.join(save_dir, 'best.pt'))
from ultralytics import YOLO
import gradio as gr
import cv2
import os
import tempfile

# Load the trained YOLO model
model = YOLO("./runs/detect/train/weights/best.pt")  # Path to your trained model

def process_video(video_path):
    """
    Process the input video using the YOLO model and save the output with bounding boxes.
    Returns the path to the output video.
    """
    # Create a temporary file for the output video
    output_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
    
    # Open the input video
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        raise ValueError("Error opening video file")

    # Get video properties
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    # Define the codec and create VideoWriter object
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # Use 'mp4v' for MP4 format
    out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))

    # Process each frame
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        # Perform YOLO inference on the frame
        results = model(frame)

        # Draw bounding boxes and labels on the frame
        annotated_frame = results[0].plot()  # Ultralytics provides a plot method to draw boxes

        # Write the annotated frame to the output video
        out.write(annotated_frame)

    # Release resources
    cap.release()
    out.release()
    cv2.destroyAllWindows()

    return output_path

def gradio_interface(video):
    """
    Gradio interface function to handle video input and return the processed video.
    """
    if video is None:
        return "Please upload a video file."
    
    try:
        # Process the video and get the output path
        output_video_path = process_video(video)
        
        # Return the output video for Gradio to display
        return output_video_path
    except Exception as e:
        return f"Error processing video: {str(e)}"

# Create Gradio interface
iface = gr.Interface(
    fn=gradio_interface,
    inputs=gr.Video(label="Upload Video"),
    outputs=gr.Video(label="Processed Video with Detections"),
    title="YOLOv11 Object Detection on Video",
    description="Upload a video to run object detection using a trained YOLOv11 model."
)

# Launch the Gradio interface
iface.launch()