nagasurendra commited on
Commit
04f4d0b
·
verified ·
1 Parent(s): 5099bc6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -15
app.py CHANGED
@@ -1,10 +1,13 @@
1
  import cv2
2
  import torch
3
  import gradio as gr
 
4
  from ultralytics import YOLO
5
 
6
- # Load YOLOv8 model
 
7
  model = YOLO('./data/best.pt') # Path to your model
 
8
 
9
  # Define the function that processes the uploaded video
10
  def process_video(video):
@@ -16,10 +19,9 @@ def process_video(video):
16
  frame_height = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
17
  fps = input_video.get(cv2.CAP_PROP_FPS)
18
 
19
- # Define output video writer
20
- output_video_path = "/mnt/data/output_video.mp4" # Path to save the output video
21
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
22
- output_video = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height))
23
 
24
  while True:
25
  # Read a frame from the video
@@ -27,27 +29,39 @@ def process_video(video):
27
  if not ret:
28
  break # End of video
29
 
 
 
 
30
  # Perform inference on the frame
31
- results = model(frame)
 
 
 
 
 
 
 
 
32
 
33
- # The results object contains annotations for the frame
34
- annotated_frame = results[0].plot() # Plot the frame with bounding boxes
35
 
36
- # Write the annotated frame to the output video
37
- output_video.write(annotated_frame)
 
38
 
39
  # Release resources
40
  input_video.release()
41
- output_video.release()
42
 
43
- return output_video_path
44
 
45
  # Create a Gradio interface for video upload
46
  iface = gr.Interface(fn=process_video,
47
  inputs=gr.Video(label="Upload Video"), # Updated line
48
- outputs="file",
49
- title="YOLOv8 Object Detection on Video",
50
- description="Upload a video for object detection using YOLOv8")
51
 
52
  # Launch the interface
53
  iface.launch()
 
1
  import cv2
2
  import torch
3
  import gradio as gr
4
+ import numpy as np
5
  from ultralytics import YOLO
6
 
7
+ # Load YOLOv8 model and set device (GPU if available)
8
+ device = "cuda" if torch.cuda.is_available() else "cpu"
9
  model = YOLO('./data/best.pt') # Path to your model
10
+ model.to(device)
11
 
12
  # Define the function that processes the uploaded video
13
  def process_video(video):
 
19
  frame_height = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
20
  fps = input_video.get(cv2.CAP_PROP_FPS)
21
 
22
+ # Resize to reduce computation (optional)
23
+ new_width, new_height = 640, 480 # Resize to 640x480 resolution
24
+ frame_width, frame_height = new_width, new_height
 
25
 
26
  while True:
27
  # Read a frame from the video
 
29
  if not ret:
30
  break # End of video
31
 
32
+ # Resize the frame to reduce computational load
33
+ frame = cv2.resize(frame, (new_width, new_height))
34
+
35
  # Perform inference on the frame
36
+ results = model(frame) # Automatically uses GPU if available
37
+
38
+ # Check if any object was detected
39
+ if len(results[0].boxes) > 0: # If there are detected objects
40
+ # Annotate the frame with bounding boxes
41
+ annotated_frame = results[0].plot() # Plot the frame with bounding boxes
42
+
43
+ # Convert the annotated frame to RGB format for displaying
44
+ annotated_frame_rgb = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
45
 
46
+ # Display the frame with detections
47
+ cv2.imshow("Detected Frame", annotated_frame_rgb)
48
 
49
+ # Wait for a key press (optional: press 'q' to quit early)
50
+ if cv2.waitKey(1) & 0xFF == ord('q'):
51
+ break
52
 
53
  # Release resources
54
  input_video.release()
55
+ cv2.destroyAllWindows()
56
 
57
+ return "Video processing complete!"
58
 
59
  # Create a Gradio interface for video upload
60
  iface = gr.Interface(fn=process_video,
61
  inputs=gr.Video(label="Upload Video"), # Updated line
62
+ outputs=gr.Textbox(label="Processing Status"), # Output text showing processing status
63
+ title="YOLOv8 Object Detection - Real-Time Display",
64
+ description="Upload a video for object detection using YOLOv8. The frames with detections will be shown in real-time.")
65
 
66
  # Launch the interface
67
  iface.launch()