Spaces:
Sleeping
Sleeping
import torch | |
from ultralytics import YOLO | |
from fastapi import FastAPI, UploadFile, File | |
from fastapi.responses import JSONResponse # 我們將改用 JSONResponse | |
from PIL import Image | |
import io | |
import os | |
import numpy as np | |
import cv2 | |
import base64 # 引入 base64 套件,用於圖片編碼 | |
# 初始化 FastAPI 應用 | |
app = FastAPI() | |
# 載入 YOLOv8 模型 | |
try: | |
model = YOLO("best.onnx") | |
print("YOLOv8 模型在 Hugging Face Space 載入成功!") | |
except Exception as e: | |
# 如果模型載入失敗,在啟動時就拋出錯誤 | |
raise RuntimeError(f"無法載入 YOLOv8 模型於 Space: {e}") | |
async def read_root(): | |
""" | |
根路徑,用於檢查 API 是否正常運行。 | |
""" | |
return {"message": "Hugging Face Space API for Taiwan Black Bear Detection is running!"} | |
async def predict_image(file: UploadFile = File(...)): | |
""" | |
接收圖片檔案,進行台灣黑熊偵測, | |
並返回一個包含偵測結果 (detections) 和處理後圖片 (processed_image) 的 JSON。 | |
""" | |
try: | |
# --- 步驟 1: 讀取和準備圖片 (與原始碼相同) --- | |
contents = await file.read() | |
image = Image.open(io.BytesIO(contents)).convert("RGB") | |
image_np = np.array(image) | |
image_cv2 = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR) | |
# --- 步驟 2: 執行推論 (與原始碼相同) --- | |
results = model.predict(source=image_cv2, conf=0.25) | |
# --- 步驟 3: 準備儲存偵測結果和繪製圖片 (核心修改) --- | |
detections = [] # 建立一個空列表來存放偵測到的物件資料 | |
output_image_np = image_cv2.copy() # 建立一個圖片副本以供繪圖 | |
for r in results: | |
for box in r.boxes: | |
x1, y1, x2, y2 = map(int, box.xyxy[0]) | |
conf = round(float(box.conf[0]), 2) | |
cls = int(box.cls[0]) | |
name = model.names[cls] | |
# 不論是什麼物件,都先記錄下來 | |
detections.append({ | |
"label": name, | |
"confidence": conf, | |
"box": [x1, y1, x2, y2] | |
}) | |
# 在圖片副本上繪製邊界框和標籤 | |
# 這樣前端就可以顯示有標示的圖片 | |
label_text = f'{name} {conf}' | |
cv2.rectangle(output_image_np, (x1, y1), (x2, y2), (0, 255, 0), 2) | |
cv2.putText(output_image_np, label_text, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) | |
# --- 步驟 4: 將繪製後的圖片編碼為 Base64 字串 (核心修改) --- | |
# 將 OpenCV 圖片 (BGR) 轉回 PIL 格式 (RGB) | |
output_image_pil = Image.fromarray(cv2.cvtColor(output_image_np, cv2.COLOR_BGR2RGB)) | |
# 存入記憶體中的 BytesIO 物件 | |
byte_arr = io.BytesIO() | |
output_image_pil.save(byte_arr, format='JPEG') | |
# 進行 Base64 編碼,並轉換為字串格式 | |
processed_image_base64 = base64.b64encode(byte_arr.getvalue()).decode('utf-8') | |
# --- 步驟 5: 回傳最終的 JSON 物件 (核心修改) --- | |
return JSONResponse(content={ | |
"detections": detections, | |
"processed_image": processed_image_base64 | |
}) | |
except Exception as e: | |
# 回傳詳細的錯誤訊息,並設定 HTTP 狀態碼 | |
return JSONResponse(content={"error": str(e)}, status_code=500) |