nagasurendra commited on
Commit
a03d512
·
verified ·
1 Parent(s): a668c53

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -36
app.py CHANGED
@@ -1,49 +1,53 @@
1
- import torch
2
  import cv2
3
- import numpy as np
4
-
5
- # Load your model (assuming it is a PyTorch model)
6
- model = torch.load('./data/model.pt', weights_only=False)
7
 
8
- model.eval()
 
9
 
10
- # Open video file (input video)
11
- input_video = cv2.VideoCapture('input_video.mp4')
 
 
12
 
13
- # Get the frame width, height, and frames per second (fps) from the input video
14
- frame_width = int(input_video.get(cv2.CAP_PROP_FRAME_WIDTH))
15
- frame_height = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
16
- fps = input_video.get(cv2.CAP_PROP_FPS)
17
 
18
- # Define the output video writer
19
- fourcc = cv2.VideoWriter_fourcc(*'mp4v') # You can change this to any codec
20
- output_video = cv2.VideoWriter('output_video.mp4', fourcc, fps, (frame_width, frame_height))
 
21
 
22
- while True:
23
- # Read a frame from the input video
24
- ret, frame = input_video.read()
25
- if not ret:
26
- break # End of video
27
 
28
- # Preprocess the frame if necessary (depends on your model)
29
- # For example, convert to tensor and normalize if required
30
- frame_tensor = torch.tensor(frame).float().unsqueeze(0) # Add batch dimension
31
 
32
- # Pass the frame through the model
33
- with torch.no_grad():
34
- output = model(frame_tensor) # Adjust based on your model's requirements
35
 
36
- # Postprocess the output if necessary (depends on your model's output format)
37
- output_frame = output.squeeze(0).cpu().numpy() # Remove batch dimension and convert to NumPy
38
 
39
- # Convert the model output to a valid image format (if necessary)
40
- output_frame = np.uint8(output_frame)
 
41
 
42
- # Write the frame to the output video
43
- output_video.write(output_frame)
44
 
45
- # Release resources
46
- input_video.release()
47
- output_video.release()
 
 
 
48
 
49
- cv2.destroyAllWindows()
 
 
 
1
  import cv2
2
+ import torch
3
+ import gradio as gr
4
+ from ultralytics import YOLO
 
5
 
6
+ # Load YOLOv8 model
7
+ model = YOLO('./data/model.pt') # Path to your model
8
 
9
+ # Define the function that processes the uploaded video
10
+ def process_video(video):
11
+ # Read the uploaded video file
12
+ input_video = cv2.VideoCapture(video.name) # 'video' here is the uploaded video file
13
 
14
+ # Get frame width, height, and fps from input video
15
+ frame_width = int(input_video.get(cv2.CAP_PROP_FRAME_WIDTH))
16
+ frame_height = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
17
+ fps = input_video.get(cv2.CAP_PROP_FPS)
18
 
19
+ # Define output video writer
20
+ output_video_path = "/mnt/data/output_video.mp4" # Path to save the output video
21
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
22
+ output_video = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height))
23
 
24
+ while True:
25
+ # Read a frame from the video
26
+ ret, frame = input_video.read()
27
+ if not ret:
28
+ break # End of video
29
 
30
+ # Perform inference on the frame
31
+ results = model(frame)
 
32
 
33
+ # Extract annotated image from results
34
+ annotated_frame = results.render()[0] # This will give the frame with bounding boxes
 
35
 
36
+ # Write the annotated frame to the output video
37
+ output_video.write(annotated_frame)
38
 
39
+ # Release resources
40
+ input_video.release()
41
+ output_video.release()
42
 
43
+ return output_video_path
 
44
 
45
+ # Create a Gradio interface for video upload
46
+ iface = gr.Interface(fn=process_video,
47
+ inputs=gr.inputs.Video(label="Upload Video"),
48
+ outputs="file",
49
+ title="YOLOv8 Object Detection on Video",
50
+ description="Upload a video for object detection using YOLOv8")
51
 
52
+ # Launch the interface
53
+ iface.launch()