Spaces:
Running
Running
File size: 3,351 Bytes
d828c23 96c2a9f d828c23 96c2a9f d828c23 96c2a9f d828c23 39376d7 d828c23 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
import cv2 as cv
import numpy as np
import gradio as gr
from yunet import YuNet
from huggingface_hub import hf_hub_download
# Download ONNX model from Hugging Face
model_path = hf_hub_download(repo_id="opencv/face_detection_yunet", filename="face_detection_yunet_2023mar.onnx")
# Initialize YuNet model
model = YuNet(
modelPath=model_path,
inputSize=[320, 320],
confThreshold=0.9,
nmsThreshold=0.3,
topK=5000,
backendId=cv.dnn.DNN_BACKEND_OPENCV,
targetId=cv.dnn.DNN_TARGET_CPU
)
def visualize(image, results, box_color=(0, 255, 0), text_color=(0, 0, 255)):
output = image.copy()
landmark_color = [
(255, 0, 0), # right eye
( 0, 0, 255), # left eye
( 0, 255, 0), # nose tip
(255, 0, 255), # right mouth corner
( 0, 255, 255) # left mouth corner
]
for det in results:
bbox = det[0:4].astype(np.int32)
cv.rectangle(output, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), box_color, 2)
conf = det[-1]
cv.putText(output, '{:.2f}'.format(conf), (bbox[0], bbox[1] + 12), cv.FONT_HERSHEY_SIMPLEX, 0.5, text_color)
landmarks = det[4:14].astype(np.int32).reshape((5, 2))
for idx, landmark in enumerate(landmarks):
cv.circle(output, tuple(landmark), 2, landmark_color[idx], 2)
return output
def detect_faces(input_image):
input_image = cv.cvtColor(input_image, cv.COLOR_RGB2BGR)
h, w, _ = input_image.shape
model.setInputSize([w, h])
results = model.infer(input_image)
if results is None or len(results) == 0:
input_image = cv.cvtColor(input_image, cv.COLOR_BGR2RGB)
return input_image
output = visualize(input_image, results)
output = cv.cvtColor(output, cv.COLOR_BGR2RGB)
return output
# Gradio Interface
# demo = gr.Interface(
# fn=detect_faces,
# inputs=gr.Image(type="numpy", label="Upload Image"),
# outputs=gr.Image(type="numpy", label="Detected Faces"),
# title="Face Detection YuNet (OpenCV DNN)",
# allow_flagging="never",
# description="Upload an image to detect faces using OpenCV's ONNX-based YuNet face detector."
# )
# Gradio Interface
with gr.Blocks(css='''.example * {
font-style: italic;
font-size: 18px !important;
color: #0ea5e9 !important;
}''') as demo:
gr.Markdown("### Face Detection YuNet (OpenCV DNN)")
gr.Markdown("Upload an image to detect faces using OpenCV's ONNX-based YuNet face detector.")
with gr.Row():
input_image = gr.Image(type="numpy", label="Upload Image")
output_image = gr.Image(type="numpy", label="Detected Faces")
# Clear output when new image is uploaded
input_image.change(fn=lambda: (None), outputs=output_image)
with gr.Row():
submit_btn = gr.Button("Submit", variant="primary")
clear_btn = gr.Button("Clear")
submit_btn.click(fn=detect_faces, inputs=input_image, outputs=output_image)
clear_btn.click(fn=lambda:(None, None), outputs=[input_image, output_image])
gr.Markdown("Click on any example to try it.", elem_classes=["example"])
gr.Examples(
examples=[
["examples/selfie.jpg"],
["examples/lena.jpg"],
["examples/messi5.jpg"]
],
inputs=input_image
)
if __name__ == "__main__":
demo.launch()
|