Abhishek Gola
updated app.py
96c2a9f
raw
history blame
2.2 kB
import cv2 as cv
import numpy as np
import gradio as gr
from yunet import YuNet
from huggingface_hub import hf_hub_download
# Download ONNX model from Hugging Face
model_path = hf_hub_download(repo_id="opencv/face_detection_yunet", filename="face_detection_yunet_2023mar.onnx")
# Initialize YuNet model
model = YuNet(
modelPath=model_path,
inputSize=[320, 320],
confThreshold=0.9,
nmsThreshold=0.3,
topK=5000,
backendId=cv.dnn.DNN_BACKEND_OPENCV,
targetId=cv.dnn.DNN_TARGET_CPU
)
def visualize(image, results, box_color=(0, 255, 0), text_color=(0, 0, 255)):
output = image.copy()
landmark_color = [
(255, 0, 0), # right eye
( 0, 0, 255), # left eye
( 0, 255, 0), # nose tip
(255, 0, 255), # right mouth corner
( 0, 255, 255) # left mouth corner
]
for det in results:
bbox = det[0:4].astype(np.int32)
cv.rectangle(output, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), box_color, 2)
conf = det[-1]
cv.putText(output, '{:.2f}'.format(conf), (bbox[0], bbox[1] + 12), cv.FONT_HERSHEY_SIMPLEX, 0.5, text_color)
landmarks = det[4:14].astype(np.int32).reshape((5, 2))
for idx, landmark in enumerate(landmarks):
cv.circle(output, tuple(landmark), 2, landmark_color[idx], 2)
return output
def detect_faces(input_image):
input_image = cv.cvtColor(input_image, cv.COLOR_RGB2BGR)
h, w, _ = input_image.shape
model.setInputSize([w, h])
results = model.infer(input_image)
if results is None or len(results) == 0:
input_image = cv.cvtColor(input_image, cv.COLOR_BGR2RGB)
return input_image
output = visualize(input_image, results)
output = cv.cvtColor(output, cv.COLOR_BGR2RGB)
return output
# Gradio Interface
demo = gr.Interface(
fn=detect_faces,
inputs=gr.Image(type="numpy", label="Upload Image"),
outputs=gr.Image(type="numpy", label="Detected Faces"),
title="Face Detection YuNet (OpenCV DNN)",
allow_flagging="never",
description="Upload an image to detect faces using OpenCV's ONNX-based YuNet face detector."
)
if __name__ == "__main__":
demo.launch()