kisa-misa commited on
Commit
ae8baa1
·
1 Parent(s): 367c50e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +346 -3
app.py CHANGED
@@ -1,9 +1,352 @@
 
 
 
 
 
 
 
1
  import cv2
 
 
 
 
 
 
 
 
 
 
 
 
2
  import numpy as np
3
- from glob import glob
4
- from ultralytics.yolo.v8.detect import predict
5
- #from models import Yolov4
6
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  #model = Yolov4(weight_path="yolov4.weights", class_name_path='coco_classes.txt')
8
  def gradio_wrapper(img):
9
  result = predict(model="YOLOv8-real-time/ultralytics/yolo/v8/detect/yolov8x6.pt", source=img)
 
1
+
2
+ from glob import glob
3
+ import hydra
4
+ import argparse
5
+ import time
6
+ from pathlib import Path
7
+ import math
8
  import cv2
9
+ import torch
10
+ import torch.backends.cudnn as cudnn
11
+ from numpy import random
12
+ from ultralytics.yolo.engine.predictor import BasePredictor
13
+ from ultralytics.yolo.utils import DEFAULT_CONFIG, ROOT, ops
14
+ from ultralytics.yolo.utils.checks import check_imgsz
15
+ from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box
16
+ import pandas as pd
17
+ import cv2
18
+ from deep_sort_pytorch.utils.parser import get_config
19
+ from deep_sort_pytorch.deep_sort import DeepSort
20
+ from collections import deque
21
  import numpy as np
22
+ import csv
23
+ import matplotlib.pyplot as plt
24
+ import seaborn as sns
25
  import gradio as gr
26
+
27
+
28
+
29
+ palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
30
+ deq = {}
31
+ indices = [0] * 100
32
+ c = 0
33
+ num = 1
34
+ f = open('/content/pulse.csv', "w+")
35
+ f.close()
36
+ with open('/content/pulse.csv', 'a') as f:
37
+ # create the csv writer
38
+ writer = csv.writer(f)
39
+
40
+ header = ['time', 'pulse']
41
+ writer.writerow(header)
42
+
43
+
44
+ deepsort = None
45
+
46
+ object_counter = {}
47
+
48
+ speed_line_queue = {}
49
+ def estimatespeed(Location1, Location2, h, w):
50
+ #Euclidean Distance Formula
51
+ d_pixel = math.sqrt(math.pow(Location2[0] - Location1[0], 2) + math.pow(Location2[1] - Location1[1], 2))
52
+ # defining thr pixels per meter
53
+ ppm = max(h, w) // 10
54
+ d_meters = d_pixel/ppm
55
+ time_constant = 15*3.6
56
+ #distance = speed/time
57
+ speed = d_meters * time_constant
58
+
59
+ return int(speed)
60
+ def init_tracker():
61
+ global deepsort
62
+ cfg_deep = get_config()
63
+ cfg_deep.merge_from_file("deep_sort_pytorch/configs/deep_sort.yaml")
64
+
65
+ deepsort= DeepSort(cfg_deep.DEEPSORT.REID_CKPT,
66
+ max_dist=cfg_deep.DEEPSORT.MAX_DIST, min_confidence=cfg_deep.DEEPSORT.MIN_CONFIDENCE,
67
+ nms_max_overlap=cfg_deep.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg_deep.DEEPSORT.MAX_IOU_DISTANCE,
68
+ max_age=cfg_deep.DEEPSORT.MAX_AGE, n_init=cfg_deep.DEEPSORT.N_INIT, nn_budget=cfg_deep.DEEPSORT.NN_BUDGET,
69
+ use_cuda=True)
70
+ ##########################################################################################
71
+ def xyxy_to_xywh(*xyxy):
72
+ """" Calculates the relative bounding box from absolute pixel values. """
73
+ bbox_left = min([xyxy[0].item(), xyxy[2].item()])
74
+ bbox_top = min([xyxy[1].item(), xyxy[3].item()])
75
+ bbox_w = abs(xyxy[0].item() - xyxy[2].item())
76
+ bbox_h = abs(xyxy[1].item() - xyxy[3].item())
77
+ x_c = (bbox_left + bbox_w / 2)
78
+ y_c = (bbox_top + bbox_h / 2)
79
+ w = bbox_w
80
+ h = bbox_h
81
+ return x_c, y_c, w, h
82
+
83
+
84
+ def compute_color_for_labels(label):
85
+ """
86
+ Simple function that adds fixed color depending on the class
87
+ """
88
+ if label == 7: #truck
89
+ color = (85,45,255)
90
+ elif label == 2: # Car
91
+ color = (222,82,175)
92
+ elif label == 3: # Motorcycle
93
+ color = (0, 204, 255)
94
+ elif label == 5: # Bus
95
+ color = (0, 149, 255)
96
+ else:
97
+ color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]
98
+ return tuple(color)
99
+
100
+ def draw_border(img, pt1, pt2, color, thickness, r, d):
101
+ x1,y1 = pt1
102
+ x2,y2 = pt2
103
+ # Top left
104
+ cv2.line(img, (x1 + r, y1), (x1 + r + d, y1), color, thickness)
105
+ cv2.line(img, (x1, y1 + r), (x1, y1 + r + d), color, thickness)
106
+ cv2.ellipse(img, (x1 + r, y1 + r), (r, r), 180, 0, 90, color, thickness)
107
+ # Top right
108
+ cv2.line(img, (x2 - r, y1), (x2 - r - d, y1), color, thickness)
109
+ cv2.line(img, (x2, y1 + r), (x2, y1 + r + d), color, thickness)
110
+ cv2.ellipse(img, (x2 - r, y1 + r), (r, r), 270, 0, 90, color, thickness)
111
+ # Bottom left
112
+ cv2.line(img, (x1 + r, y2), (x1 + r + d, y2), color, thickness)
113
+ cv2.line(img, (x1, y2 - r), (x1, y2 - r - d), color, thickness)
114
+ cv2.ellipse(img, (x1 + r, y2 - r), (r, r), 90, 0, 90, color, thickness)
115
+ # Bottom right
116
+ cv2.line(img, (x2 - r, y2), (x2 - r - d, y2), color, thickness)
117
+ cv2.line(img, (x2, y2 - r), (x2, y2 - r - d), color, thickness)
118
+ cv2.ellipse(img, (x2 - r, y2 - r), (r, r), 0, 0, 90, color, thickness)
119
+
120
+ cv2.rectangle(img, (x1 + r, y1), (x2 - r, y2), color, -1, cv2.LINE_AA)
121
+ cv2.rectangle(img, (x1, y1 + r), (x2, y2 - r - d), color, -1, cv2.LINE_AA)
122
+
123
+ cv2.circle(img, (x1 +r, y1+r), 2, color, 12)
124
+ cv2.circle(img, (x2 -r, y1+r), 2, color, 12)
125
+ cv2.circle(img, (x1 +r, y2-r), 2, color, 12)
126
+ cv2.circle(img, (x2 -r, y2-r), 2, color, 12)
127
+
128
+ return img
129
+
130
+ def UI_box(x, img, color=None, label=None, line_thickness=None):
131
+ # Plots one bounding box on image img
132
+ tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
133
+ color = color or [random.randint(0, 255) for _ in range(3)]
134
+ c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
135
+ cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
136
+ if label:
137
+ tf = max(tl - 1, 1) # font thickness
138
+ t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
139
+
140
+ img = draw_border(img, (c1[0], c1[1] - t_size[1] -3), (c1[0] + t_size[0], c1[1]+3), color, 1, 8, 2)
141
+
142
+ cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
143
+
144
+
145
+ def ccw(A,B,C):
146
+ return (C[1]-A[1]) * (B[0]-A[0]) > (B[1]-A[1]) * (C[0]-A[0])
147
+
148
+
149
+ def draw_boxes(img, bbox, names,object_id,writer, writer2, identities=None, offset=(0, 0)):
150
+ height, width, _ = img.shape
151
+ # remove tracked point from buffer if object is lost
152
+ global c
153
+
154
+ for key in list(deq):
155
+ if key not in identities:
156
+ deq.pop(key)
157
+
158
+ weights = [0,0,int(6.72),int(1.638),0,30,0,int(18.75)]
159
+ speeds = [0] * 8
160
+
161
+ for i, box in enumerate(bbox):
162
+ obj_name = names[object_id[i]]
163
+ x1, y1, x2, y2 = [int(i) for i in box]
164
+ x1 += offset[0]
165
+ x2 += offset[0]
166
+ y1 += offset[1]
167
+ y2 += offset[1]
168
+
169
+ # code to find center of bottom edge
170
+ center = (int((x2+x1)/ 2), int((y2+y2)/2))
171
+
172
+ # get ID of object
173
+
174
+ id = int(identities[i]) if identities is not None else 0
175
+
176
+ # create new buffer for new object
177
+ if id not in deq:
178
+ deq[id] = deque(maxlen= 64)
179
+ if object_id[i] in [2, 3, 5, 7]:
180
+ c +=1
181
+ indices[id] = c
182
+ speed_line_queue[id] = []
183
+ color = compute_color_for_labels(object_id[i])
184
+
185
+
186
+ label = '{}{:d}'.format("", indices[id]) + ":"+ '%s' % (obj_name)
187
+
188
+
189
+ # add center to buffer
190
+ deq[id].appendleft(center)
191
+ if len(deq[id]) >= 2:
192
+ object_speed = estimatespeed(deq[id][1], deq[id][0], x2-x1, y2-y1)
193
+ speed_line_queue[id].append(object_speed)
194
+ if obj_name not in object_counter:
195
+ object_counter[obj_name] = 1
196
+
197
+ #motorcycle_weight = 1.638
198
+ #car_weight = 6.72
199
+ #truck_weight = 18.75
200
+ #bus_weight = 30
201
+
202
+ try:
203
+ spd = sum(speed_line_queue[id])//len(speed_line_queue[id])
204
+ speeds[object_id[i]] += spd
205
+ label = label + " v=" + str(spd) + " m=" + str(weights[object_id[i]])
206
+ writer2.writerow([str(indices[id]), obj_name, str(spd), str(weights[object_id[i]])])
207
+
208
+ except:
209
+ pass
210
+ UI_box(box, img, label=label, color=color, line_thickness=2)
211
+ #cv2.putText(img, f"{speeds}", (500, 50), 0, 1, [0, 255, 0], thickness=2, lineType=cv2.LINE_AA)
212
+ t = time.localtime()
213
+ current_time = time.strftime("%H:%M:%S %d.%m.%Y", t)
214
+ pulse = sum(np.multiply(speeds, weights))
215
+
216
+ # write a row to the csv file
217
+ writer.writerow([f"{current_time}", f"{pulse}"])
218
+
219
+ cv2.putText(img, f"pulse: {pulse}", (500, 50), 0, 1, [0, 255, 0], thickness=2, lineType=cv2.LINE_AA)
220
+ #for i, object_speed in enumerate(speeds):
221
+ # object_speed = sum(object_speed)*weights[i]
222
+
223
+
224
+
225
+ return img
226
+
227
+
228
+ class DetectionPredictor(BasePredictor):
229
+
230
+ def get_annotator(self, img):
231
+ return Annotator(img, line_width=self.args.line_thickness, example=str(self.model.names))
232
+
233
+ def preprocess(self, img):
234
+ img = torch.from_numpy(img).to(self.model.device)
235
+ img = img.half() if self.model.fp16 else img.float() # uint8 to fp16/32
236
+ img /= 255 # 0 - 255 to 0.0 - 1.0
237
+ return img
238
+
239
+ def postprocess(self, preds, img, orig_img):
240
+ preds = ops.non_max_suppression(preds,
241
+ self.args.conf,
242
+ self.args.iou,
243
+ classes = [2, 3, 5, 7],
244
+ agnostic=self.args.agnostic_nms,
245
+ max_det=self.args.max_det)
246
+
247
+ for i, pred in enumerate(preds):
248
+ shape = orig_img[i].shape if self.webcam else orig_img.shape
249
+ pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round()
250
+
251
+ return preds
252
+
253
+ def write_results(self, idx, preds, batch):
254
+ global num
255
+ p, im, im0 = batch
256
+ all_outputs = []
257
+ log_string = ""
258
+ if len(im.shape) == 3:
259
+ im = im[None] # expand for batch dim
260
+ self.seen += 1
261
+ im0 = im0.copy()
262
+ if self.webcam: # batch_size >= 1
263
+ log_string += f'{idx}: '
264
+ frame = self.dataset.count
265
+ else:
266
+ frame = getattr(self.dataset, 'frame', 0)
267
+
268
+ self.data_path = p
269
+ save_path = str(self.save_dir / p.name) # im.jpg
270
+ self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}')
271
+ log_string += '%gx%g ' % im.shape[2:] # print string
272
+ self.annotator = self.get_annotator(im0)
273
+
274
+ det = preds[idx]
275
+ all_outputs.append(det)
276
+ if len(det) == 0:
277
+ return log_string
278
+
279
+ count = 0
280
+ for c in det[:, 5].unique():
281
+ count += 1
282
+ n = (det[:, 5] == c).sum() # detections per class
283
+ cv2.putText(im0, f"{n} {self.model.names[int(c)]}", (11, count*50), 0, 1, [0, 255, 0], thickness=2, lineType=cv2.LINE_AA)
284
+ log_string += f"{n} {self.model.names[int(c)]}{'s' * (n > 1)}, "
285
+ # write
286
+ gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
287
+ xywh_bboxs = []
288
+ confs = []
289
+ oids = []
290
+ outputs = []
291
+ for *xyxy, conf, cls in reversed(det):
292
+ x_c, y_c, bbox_w, bbox_h = xyxy_to_xywh(*xyxy)
293
+ xywh_obj = [x_c, y_c, bbox_w, bbox_h]
294
+ xywh_bboxs.append(xywh_obj)
295
+ confs.append([conf.item()])
296
+ oids.append(int(cls))
297
+ xywhs = torch.Tensor(xywh_bboxs)
298
+ confss = torch.Tensor(confs)
299
+
300
+ outputs = deepsort.update(xywhs, confss, oids, im0)
301
+
302
+ with open('/content/pulse.csv', 'a') as f:
303
+ # create the csv writer
304
+ writer = csv.writer(f)
305
+ if len(outputs) > 0:
306
+ bbox_xyxy = outputs[:, :4]
307
+ identities = outputs[:, -2]
308
+ object_id = outputs[:, -1]
309
+
310
+ f2 = open('/content/vehicles_data.csv', "w+")
311
+ f2.close()
312
+ with open('/content/vehicles_data.csv', 'a') as f:
313
+ writer2 = csv.writer(f)
314
+ header = ['id', 'class', 'speed', 'weight']
315
+ writer2.writerow(header)
316
+ draw_boxes(im0, bbox_xyxy, self.model.names, object_id,writer, writer2, identities)
317
+ df = pd.read_csv("/content/pulse.csv")
318
+ df['time'] = pd.to_datetime(df['time'], format = '%H:%M:%S %d.%m.%Y')
319
+
320
+
321
+ df.index = df['time']
322
+ del df['time']
323
+
324
+ try:
325
+ fig, ax = plt.subplots()
326
+ #plt.clf()
327
+ sns.lineplot(df)
328
+ #ax.set_xticklabels([t.get_text().split(".")[0] for t in ax.get_xticklabels()])
329
+ ax.set_xticklabels([pd.to_datetime(t.get_text()).strftime('%H:%M:%S') for t in ax.get_xticklabels()])
330
+ plt.ylabel('Pulse')
331
+ plt.xlabel('time')
332
+ plt.savefig(f'/content/time_series/figure_{num:010d}.png')
333
+ num += 1
334
+ except:
335
+ log_string += f'An error occured while saving figure_{num:010d}.png, '
336
+
337
+ return log_string
338
+
339
+
340
+ @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
341
+ def predict(cfg):
342
+ init_tracker()
343
+ cfg.model = cfg.model or "yolov8n.pt"
344
+ cfg.imgsz = check_imgsz(cfg.imgsz, min_dim=2) # check image size
345
+ cfg.source = cfg.source if cfg.source is not None else ROOT / "assets"
346
+ predictor = DetectionPredictor(cfg)
347
+ predictor()
348
+
349
+
350
  #model = Yolov4(weight_path="yolov4.weights", class_name_path='coco_classes.txt')
351
  def gradio_wrapper(img):
352
  result = predict(model="YOLOv8-real-time/ultralytics/yolo/v8/detect/yolov8x6.pt", source=img)