Aumkeshchy2003 commited on
Commit
ae0ee90
·
verified ·
1 Parent(s): 5b68bf2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -44
app.py CHANGED
@@ -12,7 +12,7 @@ os.makedirs("models", exist_ok=True)
12
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
  print(f"Using device: {device}")
14
 
15
- # Load YOLOv5 Nano model
16
  model_path = Path("models/yolov5n.pt")
17
  if model_path.exists():
18
  print(f"Loading model from cache: {model_path}")
@@ -22,54 +22,57 @@ else:
22
  model = torch.hub.load("ultralytics/yolov5", "yolov5n", pretrained=True).to(device)
23
  torch.save(model.state_dict(), model_path)
24
 
25
- # Optimize model for speed
26
- model.conf = 0.3 # Lower confidence threshold
27
- model.iou = 0.3 # Non-Maximum Suppression IoU threshold
28
- model.classes = None # Detect all classes
29
-
30
  if device.type == "cuda":
31
- model.half() # Use FP16 for faster inference
32
  else:
33
  torch.set_num_threads(os.cpu_count())
34
-
35
  model.eval()
36
 
37
- # Pre-generate colors for bounding boxes
38
  np.random.seed(42)
39
  colors = np.random.uniform(0, 255, size=(len(model.names), 3))
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  def process_video(video_path):
42
  cap = cv2.VideoCapture(video_path)
43
-
44
  if not cap.isOpened():
45
  return "Error: Could not open video file."
46
-
47
  frame_width = int(cap.get(3))
48
  frame_height = int(cap.get(4))
49
  fps = cap.get(cv2.CAP_PROP_FPS)
50
-
51
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
52
  output_path = "output_video.mp4"
53
  out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
54
-
55
- total_frames = 0
56
- total_time = 0
57
-
58
  while cap.isOpened():
59
  ret, frame = cap.read()
60
  if not ret:
61
- break # Break if no more frames
62
-
63
- start_time = time.time()
64
 
65
- # Convert frame for YOLOv5
66
  img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
67
  results = model(img, size=640)
68
-
69
- inference_time = time.time() - start_time
70
- total_time += inference_time
71
- total_frames += 1
72
-
73
  detections = results.pred[0].cpu().numpy()
74
 
75
  for *xyxy, conf, cls in detections:
@@ -79,28 +82,27 @@ def process_video(video_path):
79
  cv2.rectangle(frame, (x1, y1), (x2, y2), color, 3, lineType=cv2.LINE_AA)
80
  label = f"{model.names[class_id]} {conf:.2f}"
81
  cv2.putText(frame, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 255), 2)
82
-
83
- # Calculate FPS
84
- avg_fps = total_frames / total_time if total_time > 0 else 0
85
- cv2.putText(frame, f"FPS: {avg_fps:.2f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
86
-
87
  out.write(frame)
88
-
89
  cap.release()
90
  out.release()
91
-
92
  return output_path
93
 
94
  # Gradio Interface
95
- with gr.Blocks(title="Real-Time YOLOv5 Video Detection") as demo:
96
- gr.Markdown("# Real-Time YOLOv5 Video Detection (30+ FPS)")
97
-
98
- with gr.Row():
99
- video_input = gr.Video(label="Upload Video")
100
- process_button = gr.Button("Process Video")
101
-
102
- video_output = gr.Video(label="Processed Video")
103
-
104
- process_button.click(fn=process_video, inputs=video_input, outputs=video_output)
105
-
106
- demo.launch()
 
 
 
 
 
12
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
  print(f"Using device: {device}")
14
 
15
+ # Load YOLOv5 Model
16
  model_path = Path("models/yolov5n.pt")
17
  if model_path.exists():
18
  print(f"Loading model from cache: {model_path}")
 
22
  model = torch.hub.load("ultralytics/yolov5", "yolov5n", pretrained=True).to(device)
23
  torch.save(model.state_dict(), model_path)
24
 
25
+ # Configure model
26
+ model.conf = 0.5
27
+ model.iou = 0.5
28
+ model.classes = None
 
29
  if device.type == "cuda":
30
+ model.half()
31
  else:
32
  torch.set_num_threads(os.cpu_count())
 
33
  model.eval()
34
 
35
+ # Generate colors for bounding boxes
36
  np.random.seed(42)
37
  colors = np.random.uniform(0, 255, size=(len(model.names), 3))
38
 
39
+ def detect_objects(image):
40
+ if image is None:
41
+ return None
42
+
43
+ output_image = image.copy()
44
+ results = model(image, size=640)
45
+ detections = results.pred[0].cpu().numpy()
46
+
47
+ for *xyxy, conf, cls in detections:
48
+ x1, y1, x2, y2 = map(int, xyxy)
49
+ class_id = int(cls)
50
+ color = colors[class_id].tolist()
51
+ cv2.rectangle(output_image, (x1, y1), (x2, y2), color, 3, lineType=cv2.LINE_AA)
52
+ label = f"{model.names[class_id]} {conf:.2f}"
53
+ cv2.putText(output_image, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 255), 2)
54
+
55
+ return output_image
56
+
57
  def process_video(video_path):
58
  cap = cv2.VideoCapture(video_path)
 
59
  if not cap.isOpened():
60
  return "Error: Could not open video file."
61
+
62
  frame_width = int(cap.get(3))
63
  frame_height = int(cap.get(4))
64
  fps = cap.get(cv2.CAP_PROP_FPS)
 
65
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
66
  output_path = "output_video.mp4"
67
  out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
68
+
 
 
 
69
  while cap.isOpened():
70
  ret, frame = cap.read()
71
  if not ret:
72
+ break
 
 
73
 
 
74
  img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
75
  results = model(img, size=640)
 
 
 
 
 
76
  detections = results.pred[0].cpu().numpy()
77
 
78
  for *xyxy, conf, cls in detections:
 
82
  cv2.rectangle(frame, (x1, y1), (x2, y2), color, 3, lineType=cv2.LINE_AA)
83
  label = f"{model.names[class_id]} {conf:.2f}"
84
  cv2.putText(frame, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 255), 2)
85
+
 
 
 
 
86
  out.write(frame)
87
+
88
  cap.release()
89
  out.release()
 
90
  return output_path
91
 
92
  # Gradio Interface
93
+ with gr.Blocks(title="YOLOv5 Object Detection") as demo:
94
+ gr.Markdown("# YOLOv5 Object Detection (Image & Video)")
95
+
96
+ with gr.Tab("Image Detection"):
97
+ img_input = gr.Image(label="Upload Image", type="numpy")
98
+ img_output = gr.Image(label="Detected Objects", type="numpy")
99
+ img_submit = gr.Button("Detect Objects")
100
+ img_submit.click(fn=detect_objects, inputs=img_input, outputs=img_output)
101
+
102
+ with gr.Tab("Video Detection"):
103
+ vid_input = gr.Video(label="Upload Video")
104
+ vid_output = gr.Video(label="Processed Video")
105
+ vid_submit = gr.Button("Process Video")
106
+ vid_submit.click(fn=process_video, inputs=vid_input, outputs=vid_output)
107
+
108
+ demo.launch()