File size: 1,492 Bytes
79056ef
 
 
 
 
 
 
32442ce
e77ba73
32442ce
e7e7d8c
 
e77ba73
79056ef
e77ba73
 
 
9bf7951
e77ba73
ef6c110
0b16bd7
32442ce
 
0b16bd7
 
 
aa7aa23
0b16bd7
 
 
32442ce
e77ba73
 
 
356e2ea
32442ce
0e3070e
e77ba73
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
from huggingface_hub import hf_hub_download
from ultralytics import YOLO
from supervision import Detections
import cv2
import gradio as gr
from PIL import Image
import numpy as np
from transformers import pipeline

pipe = pipeline("image-classification", model="NTQAI/pedestrian_gender_recognition")
model_path = hf_hub_download(repo_id="arnabdhar/YOLOv8-Face-Detection", filename="model.pt")
model = YOLO(model_path)

def detect_faces(image):
    output = model(image)
    results = Detections.from_ultralytics(output[0])

    im = np.array(image)
    for i in results:
        im = cv2.rectangle(im, (int(i[0][0]),int(i[0][1])), (int(i[0][2]),int(i[0][3])), (0,0,255), 2)

    label_out = pipe(image)

    image_np = np.array(image)
    gray_image = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
    face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
    faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5, minSize=(5, 5))
    for (x, y, w, h) in faces:
        cv2.rectangle(image_np, (x, y), (x+w, y+h), (0, 255, 0), 2)
    
    return (image_np,im,label_out[0]['label'])

interface = gr.Interface(
    fn=detect_faces,
    inputs=gr.Image(label='Upload Image'),
    outputs=[gr.Image(label='Original'),gr.Image(label='Deep learning'),'text'],
    title="Face Detection Deep Learning",
    description="Upload an image, and the model will detect faces and draw bounding boxes around them.",
)
interface.launch()