Spaces:
Sleeping
Sleeping
File size: 4,362 Bytes
a722ea2 7d0e758 bb3b3a0 7d0e758 a722ea2 7d0e758 d3e394f 7d0e758 a722ea2 7d0e758 a722ea2 7d0e758 d3e394f 7d0e758 a722ea2 d3e394f 7d0e758 d3e394f a722ea2 d3e394f a722ea2 d3e394f 7d0e758 a722ea2 d3e394f a722ea2 7d0e758 d3e394f 7d0e758 d3e394f 3feb6b1 d3e394f 7d0e758 a722ea2 d3e394f 7d0e758 a722ea2 d3e394f 7d0e758 ff3afb3 d3e394f 7d0e758 a722ea2 7d0e758 d3e394f a722ea2 a42f937 a722ea2 7d0e758 a722ea2 7d0e758 d3e394f a722ea2 d3e394f a722ea2 7d0e758 d3e394f a722ea2 d3e394f a722ea2 d3e394f a722ea2 7d0e758 d3e394f 3feb6b1 a722ea2 3feb6b1 a722ea2 d3e394f a722ea2 d3e394f 82a7d66 a722ea2 a0be510 3feb6b1 b78402f d3e394f 7d0e758 3feb6b1 d3e394f a0be510 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
from fastai.vision.all import *
from io import BytesIO
import requests
import streamlit as st
import numpy as np
import torch
import time
import cv2
from numpy import random
import os
import sys
# 加入上層目錄到模組搜尋路徑中
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from models.experimental import attempt_load
from utils.general import check_img_size, non_max_suppression, scale_coords
from utils.plots import plot_one_box
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup:
r = min(r, 1.0)
ratio = r, r
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]
if auto:
dw, dh = np.mod(dw, stride), np.mod(dh, stride)
elif scaleFill:
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]
dw /= 2
dh /= 2
if shape[::-1] != new_unpad:
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
return img, ratio, (dw, dh)
def detect_modify(img0, model, device, conf=0.4, imgsz=640, conf_thres=0.25, iou_thres=0.45):
st.image(img0, caption="Your image", use_column_width=True)
stride = int(model.stride.max())
imgsz = check_img_size(imgsz, s=stride)
img0 = cv2.cvtColor(np.asarray(img0), cv2.COLOR_RGB2BGR)
img = letterbox(img0, imgsz, stride=stride)[0]
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
img = torch.from_numpy(img).to(device).float() / 255.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
with torch.no_grad():
pred = model(img)[0]
pred = non_max_suppression(pred, conf_thres, iou_thres)
gn = torch.tensor(img0.shape)[[1, 0, 1, 0]]
det = pred[0]
if len(det):
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
for *xyxy, conf, cls in reversed(det):
label = f'{names[int(cls)]} {conf:.2f}'
plot_one_box(xyxy, img0, label=label, color=colors[int(cls)], line_thickness=1)
img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB)
st.image(img0, caption="Prediction Result", use_column_width=True)
# 取得目前檔案所在目錄
current_dir = os.path.dirname(os.path.abspath(__file__))
# 模型權重路徑
weight_path = os.path.join(current_dir, 'best.pt')
# 參數設定
imgsz = 640
conf = 0.4
conf_thres = 0.25
iou_thres = 0.45
device = torch.device("cpu")
# 載入模型
ckpt = torch.load(weight_path, map_location=device, weights_only=False)
model = ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()
# Streamlit 介面
st.title("YOLOv7 Mask Detection")
st.write("Detect whether a person is wearing a face mask or not.")
option = st.radio("Select Input Method", ["Upload Image", "Image URL"])
if option == "Upload Image":
uploaded_file = st.file_uploader("Please upload an image.", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
img = PILImage.create(uploaded_file)
detect_modify(img, model, device, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
elif option == "Image URL":
url = st.text_input("Please input an image URL.")
if url:
try:
response = requests.get(url)
response.raise_for_status() # 檢查 http status
pil_img = PILImage.create(BytesIO(response.content))
detect_modify(pil_img, model, device, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
except Exception as e:
st.error(f"Problem reading image from URL: {url}")
st.error(str(e))
|