nagasurendra commited on
Commit
54d292a
·
verified ·
1 Parent(s): 238ed07

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -92
app.py CHANGED
@@ -1,105 +1,48 @@
1
- from ultralytics import YOLO
2
- import os
3
-
4
- # Define the correct path to config.yaml (in the root directory)
5
- config_path = './config.yaml' # Adjust based on the actual path to your config.yaml
6
-
7
- # Load YOLO model
8
- model = YOLO("yolo11n.yaml") # You can choose a different model type like yolo5n, yolo6n, etc.
9
-
10
- # Train the model
11
- results = model.train(data=config_path, epochs=1)
12
-
13
- # Define the save directory
14
- save_dir = './runs/detect/train/weights'
15
-
16
- # Create directory if it doesn't exist
17
- if not os.path.exists(save_dir):
18
- os.makedirs(save_dir)
19
-
20
- # Save the model
21
- model.save(os.path.join(save_dir, 'best.pt'))
22
-
23
- # Print confirmation
24
- print("Model saved to:", os.path.join(save_dir, 'best.pt'))
25
- from ultralytics import YOLO
26
- import gradio as gr
27
  import cv2
28
- import os
29
- import tempfile
30
-
31
- # Load the trained YOLO model
32
- model = YOLO("./runs/detect/train/weights/best.pt") # Path to your trained model
33
 
34
- def process_video(video_path):
35
- """
36
- Process the input video using the YOLO model and save the output with bounding boxes.
37
- Returns the path to the output video.
38
- """
39
- # Create a temporary file for the output video
40
- output_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
41
-
42
- # Open the input video
43
- cap = cv2.VideoCapture(video_path)
44
- if not cap.isOpened():
45
- raise ValueError("Error opening video file")
46
 
47
- # Get video properties
48
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
49
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
50
- fps = int(cap.get(cv2.CAP_PROP_FPS))
51
- total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
52
 
53
- # Define the codec and create VideoWriter object
54
- fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Use 'mp4v' for MP4 format
55
- out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
 
56
 
57
- # Process each frame
58
- while cap.isOpened():
59
- ret, frame = cap.read()
60
- if not ret:
61
- break
62
 
63
- # Perform YOLO inference on the frame
64
- results = model(frame)
 
 
 
65
 
66
- # Draw bounding boxes and labels on the frame
67
- annotated_frame = results[0].plot() # Ultralytics provides a plot method to draw boxes
 
68
 
69
- # Write the annotated frame to the output video
70
- out.write(annotated_frame)
 
71
 
72
- # Release resources
73
- cap.release()
74
- out.release()
75
- cv2.destroyAllWindows()
76
 
77
- return output_path
 
78
 
79
- def gradio_interface(video):
80
- """
81
- Gradio interface function to handle video input and return the processed video.
82
- """
83
- if video is None:
84
- return "Please upload a video file."
85
-
86
- try:
87
- # Process the video and get the output path
88
- output_video_path = process_video(video)
89
-
90
- # Return the output video for Gradio to display
91
- return output_video_path
92
- except Exception as e:
93
- return f"Error processing video: {str(e)}"
94
 
95
- # Create Gradio interface
96
- iface = gr.Interface(
97
- fn=gradio_interface,
98
- inputs=gr.Video(label="Upload Video"),
99
- outputs=gr.Video(label="Processed Video with Detections"),
100
- title="YOLOv11 Object Detection on Video",
101
- description="Upload a video to run object detection using a trained YOLOv11 model."
102
- )
103
 
104
- # Launch the Gradio interface
105
- iface.launch()
 
1
+ import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import cv2
3
+ import numpy as np
 
 
 
 
4
 
5
+ # Load your model (assuming it is a PyTorch model)
6
+ model = torch.load('.data/model.pt')
7
+ model.eval()
 
 
 
 
 
 
 
 
 
8
 
9
+ # Open video file (input video)
10
+ input_video = cv2.VideoCapture('input_video.mp4')
 
 
 
11
 
12
+ # Get the frame width, height, and frames per second (fps) from the input video
13
+ frame_width = int(input_video.get(cv2.CAP_PROP_FRAME_WIDTH))
14
+ frame_height = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
15
+ fps = input_video.get(cv2.CAP_PROP_FPS)
16
 
17
+ # Define the output video writer
18
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v') # You can change this to any codec
19
+ output_video = cv2.VideoWriter('output_video.mp4', fourcc, fps, (frame_width, frame_height))
 
 
20
 
21
+ while True:
22
+ # Read a frame from the input video
23
+ ret, frame = input_video.read()
24
+ if not ret:
25
+ break # End of video
26
 
27
+ # Preprocess the frame if necessary (depends on your model)
28
+ # For example, convert to tensor and normalize if required
29
+ frame_tensor = torch.tensor(frame).float().unsqueeze(0) # Add batch dimension
30
 
31
+ # Pass the frame through the model
32
+ with torch.no_grad():
33
+ output = model(frame_tensor) # Adjust based on your model's requirements
34
 
35
+ # Postprocess the output if necessary (depends on your model's output format)
36
+ output_frame = output.squeeze(0).cpu().numpy() # Remove batch dimension and convert to NumPy
 
 
37
 
38
+ # Convert the model output to a valid image format (if necessary)
39
+ output_frame = np.uint8(output_frame)
40
 
41
+ # Write the frame to the output video
42
+ output_video.write(output_frame)
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
+ # Release resources
45
+ input_video.release()
46
+ output_video.release()
 
 
 
 
 
47
 
48
+ cv2.destroyAllWindows()