JASON123454321 commited on
Commit
a722ea2
·
verified ·
1 Parent(s): a42f937

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +119 -65
src/streamlit_app.py CHANGED
@@ -1,4 +1,8 @@
1
- from PIL import Image
 
 
 
 
2
  import numpy as np
3
  import torch
4
  import time
@@ -6,101 +10,151 @@ import cv2
6
  from numpy import random
7
  import os
8
  import sys
9
- from io import BytesIO
10
- import requests
11
- import streamlit as st
12
 
13
- # 將 yolov7 的副程式路徑加入系統中
14
- sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), 'yolov7')))
15
 
16
  from models.experimental import attempt_load
17
- from utils.general import check_img_size, non_max_suppression, scale_coords
 
18
  from utils.plots import plot_one_box
19
 
20
- # Device 設定
21
- device = torch.device("cpu")
22
-
23
- # 圖像 resize(YOLO 專用)
24
- def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=False, scaleFill=False, scaleup=True, stride=32):
25
- shape = img.shape[:2]
26
  if isinstance(new_shape, int):
27
  new_shape = (new_shape, new_shape)
 
 
28
  r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
29
- if not scaleup:
30
  r = min(r, 1.0)
31
- ratio = r, r
 
 
32
  new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
33
- dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]
34
- dw /= 2
 
 
 
 
 
 
 
35
  dh /= 2
36
- img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
 
 
37
  top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
38
  left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
39
- img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
40
  return img, ratio, (dw, dh)
41
- # 模型路徑(確保 best.pt 放在同一層)
42
- weight_path = os.path.join(os.path.dirname(__file__), "best.pt")
43
 
44
- # 載入模型
45
- ckpt = torch.load(weight_path, map_location=device)
46
- model = ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()
47
 
48
- # 加入自定義類別名稱(必要,否則無法畫框標籤)
49
- model.names = ['WithMask', 'WithoutMask'] # 替換為你的類別
50
 
51
- # 推論主函式
52
- def detect(img_pil, conf_thres=0.25, iou_thres=0.45, imgsz=640):
53
- img0 = np.array(img_pil.convert('RGB'))
54
- img = cv2.cvtColor(img0, cv2.COLOR_RGB2BGR)
55
- img, _, _ = letterbox(img, new_shape=imgsz)
56
- img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, CHW
57
  img = np.ascontiguousarray(img)
58
- img = torch.from_numpy(img).to(device).float() / 255.0
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  if img.ndimension() == 3:
60
  img = img.unsqueeze(0)
61
 
62
- with torch.no_grad():
 
 
63
  pred = model(img)[0]
64
- pred = non_max_suppression(pred, conf_thres, iou_thres)
65
 
66
- det = pred[0]
67
- names = model.names
68
- colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
69
 
70
- if det is not None and len(det):
 
 
 
 
 
 
 
71
  det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
72
- for *xyxy, conf, cls in det:
 
 
 
 
 
 
 
 
73
  label = f'{names[int(cls)]} {conf:.2f}'
74
- plot_one_box(xyxy, img0, label=label, color=colors[int(cls)], line_thickness=2)
75
 
76
- return Image.fromarray(img0)
77
- st.set_page_config(page_title="YOLOv7 Mask Detection", layout="centered")
78
- st.title("🛡️ YOLOv7 Mask Detection")
79
- st.write("請上傳圖片或提供圖片網址,辨識是否有戴口罩。")
 
80
 
81
- option = st.radio("選擇輸入方式:", ["上傳圖片", "輸入圖片網址"])
82
 
83
- img = None
 
84
 
85
- if option == "上傳圖片":
86
- uploaded_file = st.file_uploader("請上傳圖片(格式如 JPG, PNG)", type=["jpg", "jpeg", "png"])
87
- if uploaded_file:
88
- try:
89
- img = Image.open(uploaded_file).convert("RGB")
90
- except:
91
- st.error("⚠️ 無法讀取圖片,請確認格式是否正確。")
 
 
92
 
93
- elif option == "輸入圖片網址":
94
- url = st.text_input("請貼上圖片網址")
95
- if url:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  try:
97
  response = requests.get(url)
98
- img = Image.open(BytesIO(response.content)).convert("RGB")
 
99
  except:
100
- st.error("⚠️ 無法從網址讀取圖片,請確認連結是否正確。")
101
-
102
- if img:
103
- st.image(img, caption="原始圖片", use_column_width=True)
104
- with st.spinner("模型推論中,請稍候..."):
105
- result_img = detect(img)
106
- st.image(result_img, caption="模型辨識結果", use_column_width=True)
 
1
+ from fastai.vision.all import *
2
+ from io import BytesIO
3
+ import requests
4
+ import streamlit as st
5
+
6
  import numpy as np
7
  import torch
8
  import time
 
10
  from numpy import random
11
  import os
12
  import sys
 
 
 
13
 
14
+ # 加入上層目錄到模組搜尋路徑中
15
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
16
 
17
  from models.experimental import attempt_load
18
+ from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
19
+ scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
20
  from utils.plots import plot_one_box
21
 
22
+ def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
23
+ # Resize and pad image while meeting stride-multiple constraints
24
+ shape = img.shape[:2] # current shape [height, width]
 
 
 
25
  if isinstance(new_shape, int):
26
  new_shape = (new_shape, new_shape)
27
+
28
+ # Scale ratio (new / old)
29
  r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
30
+ if not scaleup: # only scale down, do not scale up (for better test mAP)
31
  r = min(r, 1.0)
32
+
33
+ # Compute padding
34
+ ratio = r, r # width, height ratios
35
  new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
36
+ dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
37
+ if auto: # minimum rectangle
38
+ dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
39
+ elif scaleFill: # stretch
40
+ dw, dh = 0.0, 0.0
41
+ new_unpad = (new_shape[1], new_shape[0])
42
+ ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
43
+
44
+ dw /= 2 # divide padding into 2 sides
45
  dh /= 2
46
+
47
+ if shape[::-1] != new_unpad: # resize
48
+ img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
49
  top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
50
  left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
51
+ img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
52
  return img, ratio, (dw, dh)
 
 
53
 
54
+ def detect_modify(img0, model, conf=0.4, imgsz=640, conf_thres = 0.25, iou_thres=0.45):
55
+ st.image(img0, caption="Your image", use_column_width=True)
 
56
 
57
+ stride = int(model.stride.max()) # model stride
58
+ imgsz = check_img_size(imgsz, s=stride) # check img_size
59
 
60
+ # Padded resize
61
+ img0 = cv2.cvtColor(np.asarray(img0), cv2.COLOR_RGB2BGR)
62
+ img = letterbox(img0, imgsz, stride=stride)[0]
63
+ # Convert
64
+ img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
 
65
  img = np.ascontiguousarray(img)
66
+
67
+
68
+ # Get names and colors
69
+ names = model.module.names if hasattr(model, 'module') else model.names
70
+ colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
71
+
72
+ # Run inference
73
+ old_img_w = old_img_h = imgsz
74
+ old_img_b = 1
75
+
76
+ t0 = time.time()
77
+ img = torch.from_numpy(img).to(device)
78
+ # img /= 255.0 # 0 - 255 to 0.0 - 1.0
79
+ img = img/255.0
80
  if img.ndimension() == 3:
81
  img = img.unsqueeze(0)
82
 
83
+ # Inference
84
+ # t1 = time_synchronized()
85
+ with torch.no_grad(): # Calculating gradients would cause a GPU memory leak
86
  pred = model(img)[0]
87
+ # t2 = time_synchronized()
88
 
89
+ # Apply NMS
90
+ pred = non_max_suppression(pred, conf_thres, iou_thres)
91
+ # t3 = time_synchronized()
92
 
93
+ # Process detections
94
+ # for i, det in enumerate(pred): # detections per image
95
+
96
+ gn = torch.tensor(img0.shape)[[1, 0, 1, 0]] # normalization gain whwh
97
+
98
+ det = pred[0]
99
+ if len(det):
100
+ # Rescale boxes from img_size to im0 size
101
  det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
102
+
103
+ # Print results
104
+ s = ''
105
+ for c in det[:, -1].unique():
106
+ n = (det[:, -1] == c).sum() # detections per class
107
+ s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
108
+
109
+ # Write results
110
+ for *xyxy, conf, cls in reversed(det):
111
  label = f'{names[int(cls)]} {conf:.2f}'
112
+ plot_one_box(xyxy, img0, label=label, color=colors[int(cls)], line_thickness=1)
113
 
114
+ f"""
115
+ ### Prediction result:
116
+ """
117
+ img0 = cv2.cvtColor(np.asarray(img0), cv2.COLOR_BGR2RGB)
118
+ st.image(img0, caption="Prediction Result", use_column_width=True)
119
 
120
+ #set paramters
121
 
122
+ # 取得目前檔案 (streamlit_app.py) 所在的目錄
123
+ current_dir = os.path.dirname(os.path.abspath(__file__))
124
 
125
+ # 回到根目錄後組合出 .pkl 檔案的路徑
126
+ weight_path = os.path.join(current_dir, 'best.pt')
127
+
128
+ imgsz = 640
129
+ conf = 0.4
130
+ conf_thres = 0.25
131
+ iou_thres=0.45
132
+ device = torch.device("cpu")
133
+ path = "./"
134
 
135
+ # Load model
136
+ #model = attempt_load(weight_path, map_location=torch.device('cpu')) # load FP32 model
137
+ ckpt = torch.load(weight_path, map_location=torch.device('cpu'), weights_only=False)
138
+ model = ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()
139
+
140
+ """
141
+ # YOLOv7
142
+ detect whether a person is wearing a face mask or not, using image or URL input.
143
+ """
144
+ option = st.radio("", ["Upload Image", "Image URL"])
145
+
146
+ if option == "Upload Image":
147
+ uploaded_file = st.file_uploader("Please upload an image.")
148
+
149
+ if uploaded_file is not None:
150
+ img = PILImage.create(uploaded_file)
151
+ detect_modify(img, model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
152
+ else:
153
+ url = st.text_input("Please input a url.")
154
+ if url != "":
155
  try:
156
  response = requests.get(url)
157
+ pil_img = PILImage.create(BytesIO(response.content))
158
+ detect_modify(pil_img, model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
159
  except:
160
+ st.text("Problem reading image from", url)