ntsc207 commited on
Commit
a355dad
·
verified ·
1 Parent(s): 1f32ffc

Update detect.py

Browse files
Files changed (1) hide show
  1. detect.py +26 -3
detect.py CHANGED
@@ -3,7 +3,7 @@ import os
3
  import platform
4
  import sys
5
  from pathlib import Path
6
-
7
  import torch
8
 
9
  FILE = Path(__file__).resolve()
@@ -20,6 +20,9 @@ from utils.plots import Annotator, colors, save_one_box
20
  from utils.torch_utils import select_device, smart_inference_mode
21
 
22
 
 
 
 
23
  @smart_inference_mode()
24
  def run(
25
  weights=ROOT / 'yolo.pt', # model path or triton URL
@@ -84,6 +87,7 @@ def run(
84
  # Run inference
85
  model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup
86
  seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
 
87
  for path, im, im0s, vid_cap, s in dataset:
88
  with dt[0]:
89
  im = torch.from_numpy(im).to(model.device)
@@ -106,6 +110,7 @@ def run(
106
 
107
  # Second-stage classifier (optional)
108
  # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
 
109
 
110
  # Process predictions
111
  for i, det in enumerate(pred): # per image
@@ -131,6 +136,7 @@ def run(
131
  for c in det[:, 5].unique():
132
  n = (det[:, 5] == c).sum() # detections per class
133
  s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
 
134
 
135
  # Write results
136
  for *xyxy, conf, cls in reversed(det):
@@ -146,7 +152,7 @@ def run(
146
  annotator.box_label(xyxy, label, color=colors(c, True))
147
  if save_crop:
148
  save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
149
-
150
  # Stream results
151
  im0 = annotator.result()
152
  if view_img:
@@ -177,8 +183,25 @@ def run(
177
  vid_writer[i].write(im0)
178
 
179
  # Print time (inference-only)
 
180
  LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
 
 
 
182
  #vid_writer.release()
183
  # Print results
184
  t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
@@ -188,7 +211,7 @@ def run(
188
  LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
189
  if update:
190
  strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
191
- return save_path
192
 
193
  def parse_opt():
194
  parser = argparse.ArgumentParser()
 
3
  import platform
4
  import sys
5
  from pathlib import Path
6
+ import pandas as pd
7
  import torch
8
 
9
  FILE = Path(__file__).resolve()
 
20
  from utils.torch_utils import select_device, smart_inference_mode
21
 
22
 
23
+ def convert_to_int(tensor):
24
+ return tensor.type(torch.int16).item()
25
+
26
  @smart_inference_mode()
27
  def run(
28
  weights=ROOT / 'yolo.pt', # model path or triton URL
 
87
  # Run inference
88
  model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup
89
  seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
90
+ frame_counts = []
91
  for path, im, im0s, vid_cap, s in dataset:
92
  with dt[0]:
93
  im = torch.from_numpy(im).to(model.device)
 
110
 
111
  # Second-stage classifier (optional)
112
  # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
113
+ counts = {}
114
 
115
  # Process predictions
116
  for i, det in enumerate(pred): # per image
 
136
  for c in det[:, 5].unique():
137
  n = (det[:, 5] == c).sum() # detections per class
138
  s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
139
+ counts[names[int(c)]] = n
140
 
141
  # Write results
142
  for *xyxy, conf, cls in reversed(det):
 
152
  annotator.box_label(xyxy, label, color=colors(c, True))
153
  if save_crop:
154
  save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
155
+ label_name = names[int(cls)]
156
  # Stream results
157
  im0 = annotator.result()
158
  if view_img:
 
183
  vid_writer[i].write(im0)
184
 
185
  # Print time (inference-only)
186
+
187
  LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
188
+ frame_counts.append((frame, counts)) # Append the counts for each frame
189
+ transformed_data = []
190
+
191
+ # Iterate over frame_counts and transform each entry into a row in the DataFrame
192
+ for frame, counts_dict in frame_counts:
193
+ for label, count in counts_dict.items():
194
+ transformed_data.append((frame, label.capitalize(), count))
195
+
196
+ # Create a DataFrame from the transformed data
197
+ df = pd.DataFrame(transformed_data, columns=['frame', 'label', 'count'])
198
+
199
+ # Convert count column from tensors to integers
200
+ df['count'] = df['count'].apply(convert_to_int)
201
 
202
+ counts_df = pd.DataFrame(counts.items(), columns=['label', 'count'])
203
+ counts_df['count'] = counts_df['count'].apply(convert_to_int)
204
+ counts_df['label'] = counts_df['label'].astype(str)
205
  #vid_writer.release()
206
  # Print results
207
  t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
 
211
  LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
212
  if update:
213
  strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
214
+ return save_path, counts_df, df
215
 
216
  def parse_opt():
217
  parser = argparse.ArgumentParser()