Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,49 +1,53 @@
|
|
1 |
-
import torch
|
2 |
import cv2
|
3 |
-
import
|
4 |
-
|
5 |
-
|
6 |
-
model = torch.load('./data/model.pt', weights_only=False)
|
7 |
|
8 |
-
model
|
|
|
9 |
|
10 |
-
#
|
11 |
-
|
|
|
|
|
12 |
|
13 |
-
# Get
|
14 |
-
frame_width = int(input_video.get(cv2.CAP_PROP_FRAME_WIDTH))
|
15 |
-
frame_height = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
16 |
-
fps = input_video.get(cv2.CAP_PROP_FPS)
|
17 |
|
18 |
-
# Define
|
19 |
-
|
20 |
-
|
|
|
21 |
|
22 |
-
while True:
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
frame_tensor = torch.tensor(frame).float().unsqueeze(0) # Add batch dimension
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
output = model(frame_tensor) # Adjust based on your model's requirements
|
35 |
|
36 |
-
|
37 |
-
|
38 |
|
39 |
-
#
|
40 |
-
|
|
|
41 |
|
42 |
-
|
43 |
-
output_video.write(output_frame)
|
44 |
|
45 |
-
#
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
48 |
|
49 |
-
|
|
|
|
|
|
1 |
import cv2
|
2 |
+
import torch
|
3 |
+
import gradio as gr
|
4 |
+
from ultralytics import YOLO
|
|
|
5 |
|
6 |
+
# Load YOLOv8 model
|
7 |
+
model = YOLO('./data/model.pt') # Path to your model
|
8 |
|
9 |
+
# Define the function that processes the uploaded video
|
10 |
+
def process_video(video):
|
11 |
+
# Read the uploaded video file
|
12 |
+
input_video = cv2.VideoCapture(video.name) # 'video' here is the uploaded video file
|
13 |
|
14 |
+
# Get frame width, height, and fps from input video
|
15 |
+
frame_width = int(input_video.get(cv2.CAP_PROP_FRAME_WIDTH))
|
16 |
+
frame_height = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
17 |
+
fps = input_video.get(cv2.CAP_PROP_FPS)
|
18 |
|
19 |
+
# Define output video writer
|
20 |
+
output_video_path = "/mnt/data/output_video.mp4" # Path to save the output video
|
21 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
22 |
+
output_video = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height))
|
23 |
|
24 |
+
while True:
|
25 |
+
# Read a frame from the video
|
26 |
+
ret, frame = input_video.read()
|
27 |
+
if not ret:
|
28 |
+
break # End of video
|
29 |
|
30 |
+
# Perform inference on the frame
|
31 |
+
results = model(frame)
|
|
|
32 |
|
33 |
+
# Extract annotated image from results
|
34 |
+
annotated_frame = results.render()[0] # This will give the frame with bounding boxes
|
|
|
35 |
|
36 |
+
# Write the annotated frame to the output video
|
37 |
+
output_video.write(annotated_frame)
|
38 |
|
39 |
+
# Release resources
|
40 |
+
input_video.release()
|
41 |
+
output_video.release()
|
42 |
|
43 |
+
return output_video_path
|
|
|
44 |
|
45 |
+
# Create a Gradio interface for video upload
|
46 |
+
iface = gr.Interface(fn=process_video,
|
47 |
+
inputs=gr.inputs.Video(label="Upload Video"),
|
48 |
+
outputs="file",
|
49 |
+
title="YOLOv8 Object Detection on Video",
|
50 |
+
description="Upload a video for object detection using YOLOv8")
|
51 |
|
52 |
+
# Launch the interface
|
53 |
+
iface.launch()
|