JASON123454321 commited on
Commit
d3e394f
·
verified ·
1 Parent(s): 422ed8b

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +42 -78
src/streamlit_app.py CHANGED
@@ -15,146 +15,110 @@ import sys
15
  sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
16
 
17
  from models.experimental import attempt_load
18
- from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
19
- scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
20
  from utils.plots import plot_one_box
21
 
22
  def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
23
- # Resize and pad image while meeting stride-multiple constraints
24
  shape = img.shape[:2] # current shape [height, width]
25
  if isinstance(new_shape, int):
26
  new_shape = (new_shape, new_shape)
27
 
28
- # Scale ratio (new / old)
29
  r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
30
- if not scaleup: # only scale down, do not scale up (for better test mAP)
31
  r = min(r, 1.0)
32
 
33
- # Compute padding
34
- ratio = r, r # width, height ratios
35
  new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
36
- dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
37
- if auto: # minimum rectangle
38
- dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
39
- elif scaleFill: # stretch
40
  dw, dh = 0.0, 0.0
41
  new_unpad = (new_shape[1], new_shape[0])
42
- ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
43
 
44
- dw /= 2 # divide padding into 2 sides
45
  dh /= 2
46
 
47
- if shape[::-1] != new_unpad: # resize
48
  img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
49
  top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
50
  left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
51
- img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
52
  return img, ratio, (dw, dh)
53
 
54
- def detect_modify(img0, model, conf=0.4, imgsz=640, conf_thres = 0.25, iou_thres=0.45):
55
  st.image(img0, caption="Your image", use_column_width=True)
56
 
57
- stride = int(model.stride.max()) # model stride
58
- imgsz = check_img_size(imgsz, s=stride) # check img_size
59
 
60
- # Padded resize
61
  img0 = cv2.cvtColor(np.asarray(img0), cv2.COLOR_RGB2BGR)
62
  img = letterbox(img0, imgsz, stride=stride)[0]
63
- # Convert
64
- img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
65
  img = np.ascontiguousarray(img)
66
 
67
-
68
- # Get names and colors
69
  names = model.module.names if hasattr(model, 'module') else model.names
70
  colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
71
 
72
- # Run inference
73
- old_img_w = old_img_h = imgsz
74
- old_img_b = 1
75
-
76
- t0 = time.time()
77
- img = torch.from_numpy(img).to(device)
78
- # img /= 255.0 # 0 - 255 to 0.0 - 1.0
79
- img = img/255.0
80
  if img.ndimension() == 3:
81
  img = img.unsqueeze(0)
82
 
83
- # Inference
84
- # t1 = time_synchronized()
85
- with torch.no_grad(): # Calculating gradients would cause a GPU memory leak
86
  pred = model(img)[0]
87
- # t2 = time_synchronized()
88
 
89
- # Apply NMS
90
  pred = non_max_suppression(pred, conf_thres, iou_thres)
91
- # t3 = time_synchronized()
92
 
93
- # Process detections
94
- # for i, det in enumerate(pred): # detections per image
95
-
96
- gn = torch.tensor(img0.shape)[[1, 0, 1, 0]] # normalization gain whwh
97
 
98
  det = pred[0]
99
  if len(det):
100
- # Rescale boxes from img_size to im0 size
101
  det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
102
-
103
- # Print results
104
- s = ''
105
- for c in det[:, -1].unique():
106
- n = (det[:, -1] == c).sum() # detections per class
107
- s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
108
-
109
- # Write results
110
  for *xyxy, conf, cls in reversed(det):
111
  label = f'{names[int(cls)]} {conf:.2f}'
112
  plot_one_box(xyxy, img0, label=label, color=colors[int(cls)], line_thickness=1)
113
 
114
- f"""
115
- ### Prediction result:
116
- """
117
- img0 = cv2.cvtColor(np.asarray(img0), cv2.COLOR_BGR2RGB)
118
  st.image(img0, caption="Prediction Result", use_column_width=True)
119
-
120
- #set paramters
121
-
122
- # 取得目前檔案 (streamlit_app.py) 所在的目錄
123
  current_dir = os.path.dirname(os.path.abspath(__file__))
124
 
125
- # 回到根目錄後組合出 .pkl 檔案的路徑
126
  weight_path = os.path.join(current_dir, 'best.pt')
127
 
 
128
  imgsz = 640
129
  conf = 0.4
130
  conf_thres = 0.25
131
- iou_thres=0.45
132
  device = torch.device("cpu")
133
- path = "./"
134
 
135
- # Load model
136
- #model = attempt_load(weight_path, map_location=torch.device('cpu')) # load FP32 model
137
- ckpt = torch.load(weight_path, map_location=torch.device('cpu'), weights_only=False)
138
  model = ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()
139
 
140
- """
141
- # YOLOv7
142
- Detect whether a person is wearing a face mask or not.
143
- """
144
- option = st.radio("", ["Upload Image", "Image URL"])
145
 
146
- if option == "Upload Image":
147
- uploaded_file = st.file_uploader("Please upload an image.")
148
 
 
 
149
  if uploaded_file is not None:
150
  img = PILImage.create(uploaded_file)
151
- detect_modify(img, model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
152
- else:
153
- url = st.text_input("Please input a url.")
154
- if url != "":
 
155
  try:
156
  response = requests.get(url)
 
157
  pil_img = PILImage.create(BytesIO(response.content))
158
- detect_modify(pil_img, model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
159
- except:
160
- st.text("Problem reading image from", url)
 
 
 
15
  sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
16
 
17
  from models.experimental import attempt_load
18
+ from utils.general import check_img_size, non_max_suppression, scale_coords
 
19
  from utils.plots import plot_one_box
20
 
21
  def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
 
22
  shape = img.shape[:2] # current shape [height, width]
23
  if isinstance(new_shape, int):
24
  new_shape = (new_shape, new_shape)
25
 
 
26
  r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
27
+ if not scaleup:
28
  r = min(r, 1.0)
29
 
30
+ ratio = r, r
 
31
  new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
32
+ dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]
33
+ if auto:
34
+ dw, dh = np.mod(dw, stride), np.mod(dh, stride)
35
+ elif scaleFill:
36
  dw, dh = 0.0, 0.0
37
  new_unpad = (new_shape[1], new_shape[0])
38
+ ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]
39
 
40
+ dw /= 2
41
  dh /= 2
42
 
43
+ if shape[::-1] != new_unpad:
44
  img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
45
  top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
46
  left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
47
+ img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
48
  return img, ratio, (dw, dh)
49
 
50
+ def detect_modify(img0, model, device, conf=0.4, imgsz=640, conf_thres=0.25, iou_thres=0.45):
51
  st.image(img0, caption="Your image", use_column_width=True)
52
 
53
+ stride = int(model.stride.max())
54
+ imgsz = check_img_size(imgsz, s=stride)
55
 
 
56
  img0 = cv2.cvtColor(np.asarray(img0), cv2.COLOR_RGB2BGR)
57
  img = letterbox(img0, imgsz, stride=stride)[0]
58
+ img = img[:, :, ::-1].transpose(2, 0, 1)
 
59
  img = np.ascontiguousarray(img)
60
 
 
 
61
  names = model.module.names if hasattr(model, 'module') else model.names
62
  colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
63
 
64
+ img = torch.from_numpy(img).to(device).float() / 255.0
 
 
 
 
 
 
 
65
  if img.ndimension() == 3:
66
  img = img.unsqueeze(0)
67
 
68
+ with torch.no_grad():
 
 
69
  pred = model(img)[0]
 
70
 
 
71
  pred = non_max_suppression(pred, conf_thres, iou_thres)
 
72
 
73
+ gn = torch.tensor(img0.shape)[[1, 0, 1, 0]]
 
 
 
74
 
75
  det = pred[0]
76
  if len(det):
 
77
  det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
 
 
 
 
 
 
 
 
78
  for *xyxy, conf, cls in reversed(det):
79
  label = f'{names[int(cls)]} {conf:.2f}'
80
  plot_one_box(xyxy, img0, label=label, color=colors[int(cls)], line_thickness=1)
81
 
82
+ img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB)
 
 
 
83
  st.image(img0, caption="Prediction Result", use_column_width=True)
84
+ # 取得目前檔案所在目錄
 
 
 
85
  current_dir = os.path.dirname(os.path.abspath(__file__))
86
 
87
+ # 模型權重路徑
88
  weight_path = os.path.join(current_dir, 'best.pt')
89
 
90
+ # 參數設定
91
  imgsz = 640
92
  conf = 0.4
93
  conf_thres = 0.25
94
+ iou_thres = 0.45
95
  device = torch.device("cpu")
 
96
 
97
+ # 載入模型
98
+ ckpt = torch.load(weight_path, map_location=device, weights_only=False)
 
99
  model = ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()
100
 
101
+ # Streamlit 介面
102
+ st.title("YOLOv7 Mask Detection")
103
+ st.write("Detect whether a person is wearing a face mask or not.")
 
 
104
 
105
+ option = st.radio("Select Input Method", ["Upload Image", "Image URL"])
 
106
 
107
+ if option == "Upload Image":
108
+ uploaded_file = st.file_uploader("Please upload an image.", type=["jpg", "jpeg", "png"])
109
  if uploaded_file is not None:
110
  img = PILImage.create(uploaded_file)
111
+ detect_modify(img, model, device, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
112
+
113
+ elif option == "Image URL":
114
+ url = st.text_input("Please input an image URL.")
115
+ if url:
116
  try:
117
  response = requests.get(url)
118
+ response.raise_for_status() # 檢查 http status
119
  pil_img = PILImage.create(BytesIO(response.content))
120
+ detect_modify(pil_img, model, device, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
121
+ except Exception as e:
122
+ st.error(f"Problem reading image from URL: {url}")
123
+ st.error(str(e))
124
+