Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import cv2
|
2 |
import torch
|
3 |
import gradio as gr
|
|
|
4 |
from ultralytics import YOLO
|
5 |
|
6 |
# Load YOLOv8 model
|
@@ -16,10 +17,8 @@ def process_video(video):
|
|
16 |
frame_height = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
17 |
fps = input_video.get(cv2.CAP_PROP_FPS)
|
18 |
|
19 |
-
#
|
20 |
-
|
21 |
-
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
22 |
-
output_video = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height))
|
23 |
|
24 |
while True:
|
25 |
# Read a frame from the video
|
@@ -33,19 +32,22 @@ def process_video(video):
|
|
33 |
# The results object contains annotations for the frame
|
34 |
annotated_frame = results[0].plot() # Plot the frame with bounding boxes
|
35 |
|
36 |
-
#
|
37 |
-
|
|
|
|
|
|
|
38 |
|
39 |
# Release resources
|
40 |
input_video.release()
|
41 |
-
output_video.release()
|
42 |
|
43 |
-
|
|
|
44 |
|
45 |
# Create a Gradio interface for video upload
|
46 |
iface = gr.Interface(fn=process_video,
|
47 |
inputs=gr.Video(label="Upload Video"), # Updated line
|
48 |
-
outputs=
|
49 |
title="YOLOv8 Object Detection on Video",
|
50 |
description="Upload a video for object detection using YOLOv8")
|
51 |
|
|
|
1 |
import cv2
|
2 |
import torch
|
3 |
import gradio as gr
|
4 |
+
import numpy as np
|
5 |
from ultralytics import YOLO
|
6 |
|
7 |
# Load YOLOv8 model
|
|
|
17 |
frame_height = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
18 |
fps = input_video.get(cv2.CAP_PROP_FPS)
|
19 |
|
20 |
+
# Create an empty list to store processed frames
|
21 |
+
processed_frames = []
|
|
|
|
|
22 |
|
23 |
while True:
|
24 |
# Read a frame from the video
|
|
|
32 |
# The results object contains annotations for the frame
|
33 |
annotated_frame = results[0].plot() # Plot the frame with bounding boxes
|
34 |
|
35 |
+
# Convert the annotated frame to RGB format
|
36 |
+
annotated_frame_rgb = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
|
37 |
+
|
38 |
+
# Append the frame to the list
|
39 |
+
processed_frames.append(annotated_frame_rgb)
|
40 |
|
41 |
# Release resources
|
42 |
input_video.release()
|
|
|
43 |
|
44 |
+
# Return the processed frames as an output video in Gradio
|
45 |
+
return processed_frames
|
46 |
|
47 |
# Create a Gradio interface for video upload
|
48 |
iface = gr.Interface(fn=process_video,
|
49 |
inputs=gr.Video(label="Upload Video"), # Updated line
|
50 |
+
outputs=gr.Video(), # This will display the output video directly
|
51 |
title="YOLOv8 Object Detection on Video",
|
52 |
description="Upload a video for object detection using YOLOv8")
|
53 |
|