ismot khaclinh commited on
Commit
dd090c5
·
0 Parent(s):

Duplicate from khaclinh/self-driving-anonymization

Browse files

Co-authored-by: Linh Trinh <[email protected]>

.gitattributes ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.npy filter=lfs diff=lfs merge=lfs -text
13
+ *.npz filter=lfs diff=lfs merge=lfs -text
14
+ *.onnx filter=lfs diff=lfs merge=lfs -text
15
+ *.ot filter=lfs diff=lfs merge=lfs -text
16
+ *.parquet filter=lfs diff=lfs merge=lfs -text
17
+ *.pickle filter=lfs diff=lfs merge=lfs -text
18
+ *.pkl filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pt filter=lfs diff=lfs merge=lfs -text
21
+ *.pth filter=lfs diff=lfs merge=lfs -text
22
+ *.rar filter=lfs diff=lfs merge=lfs -text
23
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
24
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
25
+ *.tflite filter=lfs diff=lfs merge=lfs -text
26
+ *.tgz filter=lfs diff=lfs merge=lfs -text
27
+ *.wasm filter=lfs diff=lfs merge=lfs -text
28
+ *.xz filter=lfs diff=lfs merge=lfs -text
29
+ *.zip filter=lfs diff=lfs merge=lfs -text
30
+ *.zst filter=lfs diff=lfs merge=lfs -text
31
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Self Driving Anonymization
3
+ emoji: 📈
4
+ colorFrom: yellow
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 3.4.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: cc-by-nc-4.0
11
+ duplicated_from: khaclinh/self-driving-anonymization
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: khaclinh
2
+ import os
3
+
4
+ os.system('pip install yolox')
5
+
6
+ import gradio as gr
7
+ import torch
8
+ import numpy as np
9
+ from PIL import Image
10
+ import importlib
11
+
12
+ import cv2
13
+
14
+ from yolox.utils import postprocess
15
+ from yolox.data.data_augment import ValTransform
16
+
17
+ ckpt_file = 'model_weights/best_ckpt.pth'
18
+
19
+ # get YOLOX experiment
20
+ current_exp = importlib.import_module('pp4av_exp')
21
+ exp = current_exp.Exp()
22
+
23
+ # set inference parameters
24
+ test_size = (800, 800)
25
+ num_classes = 2
26
+ nmsthre = 0.3
27
+
28
+ GDPR_CLASSES = (
29
+ "Face",
30
+ "Plate"
31
+ )
32
+
33
+
34
+ # get YOLOX model
35
+ model = exp.get_model()
36
+ #model.cuda()
37
+ model.eval()
38
+
39
+ # get custom trained checkpoint
40
+ ckpt = torch.load(ckpt_file, map_location="cpu")
41
+ model.load_state_dict(ckpt["model"])
42
+
43
+
44
+ def yolox_inference(img, model, prob_threshold, test_size):
45
+ bboxes = []
46
+ bbclasses = []
47
+ scores = []
48
+
49
+ preproc = ValTransform(legacy = False)
50
+
51
+ tensor_img, _ = preproc(img, None, test_size)
52
+ tensor_img = torch.from_numpy(tensor_img).unsqueeze(0)
53
+ tensor_img = tensor_img.float()
54
+ #tensor_img = tensor_img.cuda()
55
+
56
+ with torch.no_grad():
57
+ outputs = model(tensor_img)
58
+ outputs = postprocess(
59
+ outputs, num_classes, prob_threshold,
60
+ nmsthre, class_agnostic=True
61
+ )
62
+
63
+ if outputs[0] is None:
64
+ return [], [], []
65
+
66
+ outputs = outputs[0].cpu()
67
+ bboxes = outputs[:, 0:4]
68
+
69
+ bboxes /= min(test_size[0] / img.shape[0], test_size[1] / img.shape[1])
70
+ bbclasses = outputs[:, 6]
71
+ scores = outputs[:, 4] * outputs[:, 5]
72
+
73
+ return bboxes, bbclasses, scores
74
+
75
+
76
+ def draw_yolox_predictions(img, bboxes, scores, bbclasses, prob_threshold, classes_dict):
77
+ for i in range(len(bboxes)):
78
+ box = bboxes[i]
79
+ cls_id = int(bbclasses[i])
80
+ score = scores[i]
81
+ if score < prob_threshold:
82
+ continue
83
+ x0 = int(box[0])
84
+ y0 = int(box[1])
85
+ x1 = int(box[2])
86
+ y1 = int(box[3])
87
+ if cls_id == 0:
88
+
89
+ cv2.rectangle(img, (x0, y0), (x1, y1), (0, 255, 0), 2)
90
+ cv2.putText(img, '{}:{:.1f}%'.format(classes_dict[cls_id], score * 100), (x0, y0 - 3), cv2.FONT_HERSHEY_PLAIN, 0.8, (0,255,0), thickness = 1)
91
+ else:
92
+ cv2.rectangle(img, (x0, y0), (x1, y1), (255, 0, 0), 2)
93
+ cv2.putText(img, '{}:{:.1f}%'.format(classes_dict[cls_id], score * 100), (x0, y0 - 3), cv2.FONT_HERSHEY_PLAIN, 0.8, (255,0,0), thickness = 1)
94
+
95
+
96
+ return img
97
+
98
+
99
+ def pp4av_detect(img, prob_threshold=0.1):
100
+ # Convert PIL image to CV2
101
+ open_cv_image = np.array(img)
102
+ # Convert RGB to BGR
103
+ open_cv_image = open_cv_image[:, :, ::-1].copy()
104
+
105
+ bboxes, bbclasses, scores = yolox_inference(open_cv_image, model, prob_threshold, test_size)
106
+
107
+ out = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
108
+ # Draw predictions
109
+ out_image = draw_yolox_predictions(out, bboxes, scores, bbclasses, prob_threshold, GDPR_CLASSES)
110
+
111
+ return Image.fromarray(out_image)
112
+
113
+
114
+ img_input = gr.inputs.Image(type='pil', label="Original Image")
115
+ img_output = gr.outputs.Image(type="pil", label="Output Image")
116
+
117
+ prob_threshold_slider = gr.Slider(minimum=0, maximum=1.0, step=0.01, value=0.1, label="Confidence Threshold")
118
+
119
+ title = "PP4AV: Deep Learning model for Data Anonymization in Autonomous Driving"
120
+ description = "Detecting faces and license plates in image data from self-driving cars. Take a picture, upload an image, or click an example image to use."
121
+ article = ""
122
+
123
+ examples = [['data/fisheye.jpg'], ['data/zurich.jpg'], ['data/stuttgart.jpg'], ['data/strasbourg.jpg']]
124
+ gr.Interface(
125
+ fn = pp4av_detect,
126
+ inputs = [img_input, prob_threshold_slider],
127
+ outputs = img_output,
128
+ title = title,
129
+ description = description,
130
+ article = article,
131
+ examples = examples,
132
+ theme = "huggingface"
133
+ ).launch(enable_queue=True)
data/fisheye.jpg ADDED
data/strasbourg.jpg ADDED
data/stuttgart.jpg ADDED
data/zurich.jpg ADDED
model_weights/.keep ADDED
@@ -0,0 +1 @@
 
 
1
+
model_weights/best_ckpt.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f2abeffba9454f8c88a1cb42dd1358ce054f5a4656bed4c9f1542911f5e5f99
3
+ size 433859563
pp4av_exp.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding:utf-8 -*-
3
+ # Copyright (c) Megvii, Inc. and its affiliates.
4
+
5
+ import os
6
+
7
+ from yolox.exp import Exp as MyExp
8
+
9
+ class Exp(MyExp):
10
+ def __init__(self):
11
+ super(Exp, self).__init__()
12
+ self.depth = 1.0 # indicate size yolo model
13
+ self.width = 1.0 #
14
+ self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
15
+
16
+ self.data_dir = ''
17
+ self.train_ann = ''
18
+ self.val_ann = ''
19
+ self.test_ann = ''
20
+
21
+ self.num_classes = 2
22
+ self.data_num_workers = 32 # number of cpu for splitting batch
23
+
24
+ self.input_size = (800, 800)
25
+ self.print_interval = 100
26
+ self.eval_interval = 1
27
+ self.test_size = (800, 800)
28
+ self.enable_mixup = True
29
+ self.mosaic_scale = (0.5, 1.5)
30
+ self.max_epoch = 300
31
+ self.hsv_prob = 1.0
32
+
33
+ self.degrees = 20.0
34
+ self.translate = 0.2
35
+ self.shear = 2.0
36
+ # Turn off mosaic
37
+ self.mosaic_prob = 1.0
38
+ # Turn off Mixup
39
+ self.mixup_prob = 1.0
40
+ # Change SGD by ADAM
41
+
42
+
43
+ self.basic_lr_per_img = 0.01 / 28.0
44
+ self.no_aug_epochs = 15
45
+ self.min_lr_ratio = 0.05
46
+ self.ema = True
47
+
48
+ self.nmsthre = 0.3
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ torch
3
+ torchvision
4
+ numpy
5
+ opencv-python
6
+ seaborn
7
+ tabulate
8
+ loguru
9
+ thop