Update app.py
Browse files
app.py
CHANGED
@@ -2,12 +2,13 @@ import cv2 as cv
|
|
2 |
import numpy as np
|
3 |
import gradio as gr
|
4 |
from pathlib import Path
|
|
|
5 |
from huggingface_hub import hf_hub_download
|
6 |
|
7 |
from facial_fer_model import FacialExpressionRecog
|
8 |
from yunet import YuNet
|
9 |
|
10 |
-
# Download ONNX
|
11 |
FD_MODEL_PATH = hf_hub_download(repo_id="opencv/face_detection_yunet", filename="face_detection_yunet_2023mar.onnx")
|
12 |
FER_MODEL_PATH = hf_hub_download(repo_id="opencv/facial_expression_recognition", filename="facial_expression_recognition_mobilefacenet_2022july.onnx")
|
13 |
|
@@ -17,15 +18,40 @@ target_id = cv.dnn.DNN_TARGET_CPU
|
|
17 |
fer_model = FacialExpressionRecog(modelPath=FER_MODEL_PATH, backendId=backend_id, targetId=target_id)
|
18 |
detect_model = YuNet(modelPath=FD_MODEL_PATH)
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
def visualize(image, det_res, fer_res):
|
21 |
output = image.copy()
|
22 |
landmark_color = [(255, 0, 0), (0, 0, 255), (0, 255, 0), (255, 0, 255), (0, 255, 255)]
|
23 |
|
24 |
for det, fer_type in zip(det_res, fer_res):
|
25 |
bbox = det[0:4].astype(np.int32)
|
26 |
-
|
|
|
|
|
|
|
27 |
cv.rectangle(output, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (0, 255, 0), 2)
|
28 |
-
|
|
|
|
|
29 |
|
30 |
landmarks = det[4:14].astype(np.int32).reshape((5, 2))
|
31 |
for idx, landmark in enumerate(landmarks):
|
@@ -33,6 +59,26 @@ def visualize(image, det_res, fer_res):
|
|
33 |
|
34 |
return output
|
35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
def detect_expression(input_image):
|
37 |
image = cv.cvtColor(input_image, cv.COLOR_RGB2BGR)
|
38 |
h, w, _ = image.shape
|
@@ -40,7 +86,9 @@ def detect_expression(input_image):
|
|
40 |
|
41 |
dets = detect_model.infer(image)
|
42 |
if dets is None:
|
43 |
-
|
|
|
|
|
44 |
|
45 |
fer_res = []
|
46 |
for face_points in dets:
|
@@ -48,7 +96,8 @@ def detect_expression(input_image):
|
|
48 |
fer_res.append(result[0])
|
49 |
|
50 |
output = visualize(image, dets, fer_res)
|
51 |
-
|
|
|
52 |
|
53 |
# === Automatisch voorbeelden inladen uit de map "examples/" ===
|
54 |
IMAGE_EXTS = {".jpg", ".jpeg", ".png", ".bmp", ".webp"}
|
@@ -66,35 +115,46 @@ example_list = [[p] for p in example_paths]
|
|
66 |
CACHE_EXAMPLES = bool(example_list) # cache alleen als er echt voorbeelden zijn
|
67 |
# =============================================================
|
68 |
|
69 |
-
# Gradio Interface
|
70 |
-
|
|
|
|
|
|
|
71 |
|
72 |
-
|
73 |
-
gr.Markdown("
|
|
|
74 |
|
75 |
with gr.Row():
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
gr.Examples(
|
92 |
-
examples=example_list,
|
93 |
inputs=input_image,
|
94 |
-
outputs=output_image,
|
95 |
-
fn=detect_expression,
|
96 |
examples_per_page=20,
|
97 |
-
cache_examples=CACHE_EXAMPLES
|
98 |
)
|
99 |
|
100 |
if __name__ == "__main__":
|
|
|
2 |
import numpy as np
|
3 |
import gradio as gr
|
4 |
from pathlib import Path
|
5 |
+
from collections import Counter
|
6 |
from huggingface_hub import hf_hub_download
|
7 |
|
8 |
from facial_fer_model import FacialExpressionRecog
|
9 |
from yunet import YuNet
|
10 |
|
11 |
+
# Download ONNX-modellen van Hugging Face
|
12 |
FD_MODEL_PATH = hf_hub_download(repo_id="opencv/face_detection_yunet", filename="face_detection_yunet_2023mar.onnx")
|
13 |
FER_MODEL_PATH = hf_hub_download(repo_id="opencv/facial_expression_recognition", filename="facial_expression_recognition_mobilefacenet_2022july.onnx")
|
14 |
|
|
|
18 |
fer_model = FacialExpressionRecog(modelPath=FER_MODEL_PATH, backendId=backend_id, targetId=target_id)
|
19 |
detect_model = YuNet(modelPath=FD_MODEL_PATH)
|
20 |
|
21 |
+
# === EN -> NL mapping van emotielabels ===
|
22 |
+
EN_TO_NL = {
|
23 |
+
"neutral": "Neutraal",
|
24 |
+
"happy": "Blij",
|
25 |
+
"sad": "Verdrietig",
|
26 |
+
"surprise": "Verrast",
|
27 |
+
"angry": "Boos",
|
28 |
+
"anger": "Boos", # sommige modellen geven 'anger'
|
29 |
+
"disgust": "Walging",
|
30 |
+
"fear": "Bang",
|
31 |
+
"contempt": "Minachting",
|
32 |
+
"unknown": "Onbekend",
|
33 |
+
}
|
34 |
+
|
35 |
+
def to_dutch(label: str) -> str:
|
36 |
+
if not label:
|
37 |
+
return "Onbekend"
|
38 |
+
key = label.strip().lower()
|
39 |
+
return EN_TO_NL.get(key, label) # fallback naar originele label als onbekend
|
40 |
+
|
41 |
def visualize(image, det_res, fer_res):
|
42 |
output = image.copy()
|
43 |
landmark_color = [(255, 0, 0), (0, 0, 255), (0, 255, 0), (255, 0, 255), (0, 255, 255)]
|
44 |
|
45 |
for det, fer_type in zip(det_res, fer_res):
|
46 |
bbox = det[0:4].astype(np.int32)
|
47 |
+
# Modelbeschrijving -> NL label
|
48 |
+
fer_type_str_en = FacialExpressionRecog.getDesc(fer_type)
|
49 |
+
fer_type_str_nl = to_dutch(fer_type_str_en)
|
50 |
+
|
51 |
cv.rectangle(output, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (0, 255, 0), 2)
|
52 |
+
# Iets grotere en vettere tekst
|
53 |
+
cv.putText(output, fer_type_str_nl, (bbox[0], max(0, bbox[1] - 10)),
|
54 |
+
cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2, cv.LINE_AA)
|
55 |
|
56 |
landmarks = det[4:14].astype(np.int32).reshape((5, 2))
|
57 |
for idx, landmark in enumerate(landmarks):
|
|
|
59 |
|
60 |
return output
|
61 |
|
62 |
+
def summarize_emotions(fer_res):
|
63 |
+
"""
|
64 |
+
Maakt een Markdown-tekst met een grote, vetgedrukte samenvatting (NL).
|
65 |
+
Bij meerdere gezichten wordt de meest voorkomende emotie uitgelicht.
|
66 |
+
"""
|
67 |
+
if not fer_res:
|
68 |
+
return "## **Geen gezicht gedetecteerd**"
|
69 |
+
|
70 |
+
# Haal Engelstalige labels uit het model en map naar NL
|
71 |
+
names_en = [FacialExpressionRecog.getDesc(x) for x in fer_res]
|
72 |
+
names_nl = [to_dutch(n) for n in names_en]
|
73 |
+
counts = Counter(names_nl).most_common()
|
74 |
+
top, top_n = counts[0]
|
75 |
+
|
76 |
+
# Compacte lijst: "Blij (2), Neutraal (1)"
|
77 |
+
details = ", ".join([f"{name} ({n})" for name, n in counts])
|
78 |
+
|
79 |
+
# Grote headline + toelichting
|
80 |
+
return f"# **{top}**\n\n_Gedetecteerde emoties: {details}_"
|
81 |
+
|
82 |
def detect_expression(input_image):
|
83 |
image = cv.cvtColor(input_image, cv.COLOR_RGB2BGR)
|
84 |
h, w, _ = image.shape
|
|
|
86 |
|
87 |
dets = detect_model.infer(image)
|
88 |
if dets is None:
|
89 |
+
# Geen gezichten
|
90 |
+
emotion_md = summarize_emotions([])
|
91 |
+
return cv.cvtColor(image, cv.COLOR_BGR2RGB), emotion_md
|
92 |
|
93 |
fer_res = []
|
94 |
for face_points in dets:
|
|
|
96 |
fer_res.append(result[0])
|
97 |
|
98 |
output = visualize(image, dets, fer_res)
|
99 |
+
emotion_md = summarize_emotions(fer_res)
|
100 |
+
return cv.cvtColor(output, cv.COLOR_BGR2RGB), emotion_md
|
101 |
|
102 |
# === Automatisch voorbeelden inladen uit de map "examples/" ===
|
103 |
IMAGE_EXTS = {".jpg", ".jpeg", ".png", ".bmp", ".webp"}
|
|
|
115 |
CACHE_EXAMPLES = bool(example_list) # cache alleen als er echt voorbeelden zijn
|
116 |
# =============================================================
|
117 |
|
118 |
+
# Gradio Interface (NL)
|
119 |
+
custom_css = """
|
120 |
+
.example * { font-style: italic; font-size: 18px !important; color: #0ea5e9 !important; }
|
121 |
+
#emotie-uitslag h1, #emotie-uitslag h2, #emotie-uitslag h3 { margin: 0.25rem 0; }
|
122 |
+
"""
|
123 |
|
124 |
+
with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as demo:
|
125 |
+
gr.Markdown("## Herkenning van gezichtsuitdrukkingen (FER) met OpenCV DNN")
|
126 |
+
gr.Markdown("Detecteert gezichten en herkent gezichtsuitdrukkingen met YuNet + MobileFaceNet (ONNX).")
|
127 |
|
128 |
with gr.Row():
|
129 |
+
with gr.Column(scale=1):
|
130 |
+
input_image = gr.Image(type="numpy", label="Afbeelding uploaden")
|
131 |
+
with gr.Row():
|
132 |
+
submit_btn = gr.Button("Verstuur", variant="primary")
|
133 |
+
clear_btn = gr.Button("Wissen")
|
134 |
+
with gr.Column(scale=1):
|
135 |
+
# Grote emotie-uitkomst bovenaan
|
136 |
+
emotion_md = gr.Markdown(value="## **Nog geen resultaat**", elem_id="emotie-uitslag")
|
137 |
+
output_image = gr.Image(type="numpy", label="Resultaat gezichtsuitdrukking")
|
138 |
+
|
139 |
+
# Output(s) leegmaken bij nieuwe upload
|
140 |
+
def _clear_output_on_new_image():
|
141 |
+
return None, "## **Nog geen resultaat**"
|
142 |
+
input_image.change(fn=_clear_output_on_new_image, outputs=[output_image, emotion_md])
|
143 |
+
|
144 |
+
# Knop-actie(s)
|
145 |
+
submit_btn.click(fn=detect_expression, inputs=input_image, outputs=[output_image, emotion_md])
|
146 |
+
clear_btn.click(fn=lambda: (None, None, "## **Nog geen resultaat**"),
|
147 |
+
outputs=[input_image, output_image, emotion_md])
|
148 |
+
|
149 |
+
gr.Markdown("Klik op een voorbeeld om te testen.", elem_classes=["example"])
|
150 |
|
151 |
gr.Examples(
|
152 |
+
examples=example_list, # automatisch ingelezen paden
|
153 |
inputs=input_image,
|
154 |
+
outputs=[output_image, emotion_md], # nodig voor caching
|
155 |
+
fn=detect_expression, # nodig voor caching
|
156 |
examples_per_page=20,
|
157 |
+
cache_examples=CACHE_EXAMPLES
|
158 |
)
|
159 |
|
160 |
if __name__ == "__main__":
|