AbhishekShrimali commited on
Commit
e4b2521
·
verified ·
1 Parent(s): 3e795e7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -38
app.py CHANGED
@@ -4,51 +4,50 @@ import streamlit as st
4
  from datetime import datetime
5
  from tempfile import NamedTemporaryFile
6
  from ultralytics import YOLO
7
- import os
8
- os.system("pip install opencv-python")
9
- import cv2
10
 
11
-
12
- # Initialize YOLOv8 model
13
- model = YOLO("best.pt") # Replace with your YOLOv8 model path
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  # Output folder for storing detected clips
16
  output_folder = "detected_clips"
17
  os.makedirs(output_folder, exist_ok=True)
18
 
19
- # Initialize session state variables
 
 
 
 
 
20
  if "log" not in st.session_state:
21
  st.session_state.log = ""
22
  if "stop_camera" not in st.session_state:
23
  st.session_state.stop_camera = False
24
 
25
- # Streamlit UI
26
- st.title("Weapon Detection System")
27
- st.write("This is a weapon detection system using YOLOv8 and OpenCV.")
28
-
29
- # Placeholder for video feed (Camera Stream)
30
- frame_placeholder = st.empty()
31
-
32
- # Placeholder for logs (⚠ Now defined before use)
33
- log_placeholder = st.empty()
34
-
35
- # Function to write log messages in real-time
36
  def write_log(message):
37
  st.session_state.log += message + "\n"
38
- # Display logs in a scrolling text area
39
  log_placeholder.text_area("Detection Log", value=st.session_state.log, height=300, max_chars=5000, disabled=True)
40
 
41
- # Function to process video feed and detect weapons
42
  def process_video_feed(video_file=None):
43
  st.session_state.stop_camera = False
44
 
45
- # Open webcam or video file
46
  if video_file:
47
- # Save the uploaded video to a temporary file
48
  with NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file:
49
  temp_file.write(video_file.read())
50
  temp_file_path = temp_file.name
51
-
52
  cap = cv2.VideoCapture(temp_file_path)
53
  else:
54
  cap = cv2.VideoCapture(0)
@@ -67,24 +66,51 @@ def process_video_feed(video_file=None):
67
  if not ret:
68
  break
69
 
70
- # Run YOLOv8 inference
71
- results = model(frame, verbose=False)
72
- annotated_frame = results[0].plot()
73
 
 
74
  detected = False
75
  weapon_name = ""
76
  now = datetime.now()
77
 
78
- # Check for weapon detection
79
- for result in results[0].boxes:
80
- class_id = int(result.cls[0])
81
- confidence = float(result.conf[0])
82
- if class_id == 0 and confidence > 0.5:
83
- detected = True
84
- weapon_name = "Weapon"
85
- break
86
-
87
- # If weapon detected, start saving the video
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  if detected:
89
  if not recording:
90
  formatted_date = now.strftime("%d-%m-%y")
@@ -102,8 +128,6 @@ def process_video_feed(video_file=None):
102
  write_log(f"🎥 Recording started for {output_video_path}")
103
 
104
  out.write(frame)
105
-
106
- # Add detection log
107
  timestamp = now.strftime("%d/%m/%y %I:%M:%S %p")
108
  write_log(f"⚠️ {weapon_name} detected at {timestamp}")
109
 
 
4
  from datetime import datetime
5
  from tempfile import NamedTemporaryFile
6
  from ultralytics import YOLO
 
 
 
7
 
8
+ # Initialize YOLO models
9
+ weapon_model = YOLO("best.pt") # Weapon detection model
10
+ effect_model = YOLO("yolov8n.pt") # Pretrained YOLO model
11
+
12
+ # Load class labels for effect_model (YOLOv8n)
13
+ coco_labels = [
14
+ "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
15
+ "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
16
+ "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
17
+ "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
18
+ "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
19
+ "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
20
+ "potted plant", "bed", "dining table", "toilet", "TV", "laptop", "mouse", "remote", "keyboard",
21
+ "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase",
22
+ "scissors", "teddy bear", "hair drier", "toothbrush"
23
+ ]
24
 
25
  # Output folder for storing detected clips
26
  output_folder = "detected_clips"
27
  os.makedirs(output_folder, exist_ok=True)
28
 
29
+ # Streamlit UI
30
+ st.title("Weapon Detection System with Object Recognition")
31
+ frame_placeholder = st.empty()
32
+ log_placeholder = st.empty()
33
+
34
+ # Session state variables
35
  if "log" not in st.session_state:
36
  st.session_state.log = ""
37
  if "stop_camera" not in st.session_state:
38
  st.session_state.stop_camera = False
39
 
 
 
 
 
 
 
 
 
 
 
 
40
  def write_log(message):
41
  st.session_state.log += message + "\n"
 
42
  log_placeholder.text_area("Detection Log", value=st.session_state.log, height=300, max_chars=5000, disabled=True)
43
 
 
44
  def process_video_feed(video_file=None):
45
  st.session_state.stop_camera = False
46
 
 
47
  if video_file:
 
48
  with NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file:
49
  temp_file.write(video_file.read())
50
  temp_file_path = temp_file.name
 
51
  cap = cv2.VideoCapture(temp_file_path)
52
  else:
53
  cap = cv2.VideoCapture(0)
 
66
  if not ret:
67
  break
68
 
69
+ # Run YOLO inference on both models
70
+ weapon_results = weapon_model(frame, verbose=False)
71
+ effect_results = effect_model(frame, verbose=False)
72
 
73
+ annotated_frame = frame.copy()
74
  detected = False
75
  weapon_name = ""
76
  now = datetime.now()
77
 
78
+ # 🟥 **Weapon Model Bounding Boxes**
79
+ for result in weapon_results:
80
+ if result.boxes is not None and len(result.boxes) > 0:
81
+ for box in result.boxes:
82
+ class_id = int(box.cls.item())
83
+ confidence = float(box.conf.item())
84
+
85
+ if class_id == 0 and confidence > 0.5: # Assuming class 0 is weapon
86
+ detected = True
87
+ weapon_name = "Weapon"
88
+
89
+ x1, y1, x2, y2 = map(int, box.xyxy[0].tolist())
90
+ cv2.rectangle(annotated_frame, (x1, y1), (x2, y2), (0, 0, 255), 2) # Red
91
+ cv2.putText(
92
+ annotated_frame, f"{weapon_name} ({confidence:.2f})",
93
+ (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2
94
+ )
95
+
96
+ # 🔵 **Effect Model Bounding Boxes**
97
+ for result in effect_results:
98
+ if result.boxes is not None and len(result.boxes) > 0:
99
+ for box in result.boxes:
100
+ class_id = int(box.cls.item())
101
+ confidence = float(box.conf.item())
102
+
103
+ if confidence > 0.5: # General detection for all objects
104
+ object_name = coco_labels[class_id] if class_id < len(coco_labels) else f"Object-{class_id}"
105
+
106
+ x1, y1, x2, y2 = map(int, box.xyxy[0].tolist())
107
+ cv2.rectangle(annotated_frame, (x1, y1), (x2, y2), (255, 0, 0), 2) # Blue
108
+ cv2.putText(
109
+ annotated_frame, f"{object_name} ({confidence:.2f})",
110
+ (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2
111
+ )
112
+
113
+ # **Start Recording if Weapon is Detected**
114
  if detected:
115
  if not recording:
116
  formatted_date = now.strftime("%d-%m-%y")
 
128
  write_log(f"🎥 Recording started for {output_video_path}")
129
 
130
  out.write(frame)
 
 
131
  timestamp = now.strftime("%d/%m/%y %I:%M:%S %p")
132
  write_log(f"⚠️ {weapon_name} detected at {timestamp}")
133