File size: 7,918 Bytes
a58bc70
 
 
152fa4b
f8f9179
a58bc70
 
 
 
 
f8f9179
a58bc70
 
 
 
 
 
 
 
 
f8f9179
2be9d10
 
 
 
 
 
ba086ac
2be9d10
 
 
 
 
 
 
 
 
 
f8f9179
 
57e2f3b
f8f9179
2be9d10
a58bc70
 
 
 
 
f8f9179
a58bc70
2be9d10
 
a58bc70
 
 
 
 
2be9d10
 
5f45471
f8f9179
2be9d10
f8f9179
2be9d10
5f45471
2be9d10
57e2f3b
 
 
34f5ac3
57e2f3b
34f5ac3
57e2f3b
 
 
 
 
 
 
34f5ac3
 
57e2f3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34f5ac3
57e2f3b
 
 
 
 
 
 
 
 
 
34f5ac3
 
a58bc70
 
 
 
 
34f5ac3
f8f9179
a58bc70
34f5ac3
 
 
 
 
2be9d10
34f5ac3
f8f9179
34f5ac3
 
 
 
57e2f3b
34f5ac3
 
 
 
 
 
 
a58bc70
f8f9179
152fa4b
 
57e2f3b
 
152fa4b
 
 
85e6132
152fa4b
57e2f3b
2be9d10
f8f9179
2be9d10
 
0fc8804
f8f9179
34f5ac3
 
0fc8804
 
f8f9179
2be9d10
 
 
 
f8f9179
2be9d10
f8f9179
 
2be9d10
57e2f3b
 
 
 
 
 
 
 
f8f9179
57e2f3b
0fc8804
57e2f3b
34f5ac3
0fc8804
85e6132
152fa4b
f8f9179
34f5ac3
77e129a
2be9d10
0fc8804
 
a58bc70
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
import cv2 as cv
import numpy as np
import gradio as gr
from pathlib import Path
from collections import Counter, defaultdict
from huggingface_hub import hf_hub_download

from facial_fer_model import FacialExpressionRecog
from yunet import YuNet

# Download ONNX-modellen
FD_MODEL_PATH = hf_hub_download(repo_id="opencv/face_detection_yunet", filename="face_detection_yunet_2023mar.onnx")
FER_MODEL_PATH = hf_hub_download(repo_id="opencv/facial_expression_recognition", filename="facial_expression_recognition_mobilefacenet_2022july.onnx")

backend_id = cv.dnn.DNN_BACKEND_OPENCV
target_id = cv.dnn.DNN_TARGET_CPU

fer_model = FacialExpressionRecog(modelPath=FER_MODEL_PATH, backendId=backend_id, targetId=target_id)
detect_model = YuNet(modelPath=FD_MODEL_PATH)

# EN -> NL mapping
EN_TO_NL = {
    "neutral": "Neutraal",
    "happy": "Blij",
    "sad": "Verdrietig",
    "surprise": "Verrast",
    "angry": "Boos",
    "anger": "Boos",
    "disgust": "Walging",
    "fear": "Bang",
    "contempt": "Minachting",
    "unknown": "Onbekend",
}

def to_dutch(label: str) -> str:
    if not label:
        return "Onbekend"
    key = label.strip().lower()
    return EN_TO_NL.get(key, label)

# In-memory statistieken
emotion_stats = defaultdict(int)

def visualize(image, det_res, fer_res):
    output = image.copy()
    landmark_color = [(255, 0, 0), (0, 0, 255), (0, 255, 0), (255, 0, 255), (0, 255, 255)]
    for det, fer_type in zip(det_res, fer_res):
        bbox = det[0:4].astype(np.int32)
        fer_type_str_nl = to_dutch(FacialExpressionRecog.getDesc(fer_type))
        cv.rectangle(output, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (0, 255, 0), 2)
        cv.putText(output, fer_type_str_nl, (bbox[0], max(0, bbox[1] - 10)),
                   cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2, cv.LINE_AA)
        landmarks = det[4:14].astype(np.int32).reshape((5, 2))
        for idx, landmark in enumerate(landmarks):
            cv.circle(output, landmark, 2, landmark_color[idx], 2)
    return output

def summarize_emotions(fer_res):
    if not fer_res:
        return "## **Geen gezicht gedetecteerd**"
    names_nl = [to_dutch(FacialExpressionRecog.getDesc(x)) for x in fer_res]
    counts = Counter(names_nl).most_common()
    top = counts[0][0]
    details = ", ".join([f"{name} ({n})" for name, n in counts])
    return f"# **{top}**\n\n_Gedetecteerde emoties: {details}_"

# --- Staafdiagram tekenen met OpenCV (geen matplotlib nodig) ---
def draw_bar_chart_cv(stats: dict, width=640, height=320):
    img = np.full((height, width, 3), 255, dtype=np.uint8)
    cv.putText(img, "Live emotie-statistieken", (12, 28), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 2, cv.LINE_AA)
    if not stats:
        cv.putText(img, "Nog geen statistieken", (12, height//2), cv.FONT_HERSHEY_SIMPLEX, 0.9, (128, 128, 128), 2, cv.LINE_AA)
        return cv.cvtColor(img, cv.COLOR_BGR2RGB)

    left, right, top, bottom = 60, 20, 50, 40
    plot_w = width - left - right
    plot_h = height - top - bottom
    origin = (left, height - bottom)

    cv.line(img, origin, (left + plot_w, height - bottom), (0, 0, 0), 2)
    cv.line(img, origin, (left, height - bottom - plot_h), (0, 0, 0), 2)

    labels = list(stats.keys())
    values = [stats[k] for k in labels]
    max_val = max(values) if max(values) > 0 else 1

    n = len(labels)
    gap = 12
    bar_w = max(10, int((plot_w - gap * (n + 1)) / max(1, n)))

    for i, (lab, val) in enumerate(zip(labels, values)):
        x1 = left + gap + i * (bar_w + gap)
        x2 = x1 + bar_w
        h_px = int((val / max_val) * (plot_h - 10))
        y1 = height - bottom - h_px
        y2 = height - bottom - 1
        cv.rectangle(img, (x1, y1), (x2, y2), (0, 170, 60), -1)
        cv.putText(img, str(val), (x1 + 2, y1 - 6), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 90, 30), 1, cv.LINE_AA)

        show_lab = lab if len(lab) <= 10 else lab[:9] + "…"
        (tw, th), _ = cv.getTextSize(show_lab, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)
        tx = x1 + (bar_w - tw) // 2
        ty = height - bottom + th + 12
        cv.putText(img, show_lab, (tx, ty), cv.FONT_HERSHEY_SIMPLEX, 0.5, (40, 40, 40), 1, cv.LINE_AA)

    return cv.cvtColor(img, cv.COLOR_BGR2RGB)

def process_image(input_image):
    """Helper: run detectie en retourneer (output_img, fer_res as list of ints)."""
    image = cv.cvtColor(input_image, cv.COLOR_RGB2BGR)
    h, w, _ = image.shape
    detect_model.setInputSize([w, h])
    dets = detect_model.infer(image)
    if dets is None:
        return cv.cvtColor(image, cv.COLOR_BGR2RGB), []
    fer_res = [fer_model.infer(image, face_points[:-1])[0] for face_points in dets]
    output = visualize(image, dets, fer_res)
    return cv.cvtColor(output, cv.COLOR_BGR2RGB), fer_res

def detect_expression(input_image):
    """Versie die WÉL statistieken bijwerkt (gebruik voor 'Verstuur')."""
    output_img, fer_res = process_image(input_image)
    emotion_md = summarize_emotions(fer_res)
    # update stats alleen hier:
    names_nl = [to_dutch(FacialExpressionRecog.getDesc(x)) for x in fer_res]
    for name in names_nl:
        emotion_stats[name] += 1
    stats_plot = draw_bar_chart_cv(emotion_stats)
    return output_img, emotion_md, stats_plot

def detect_expression_no_stats(input_image):
    """Versie die GEEN statistieken bijwerkt (gebruik voor gr.Examples & caching)."""
    output_img, fer_res = process_image(input_image)
    emotion_md = summarize_emotions(fer_res)
    # géén update van emotion_stats
    stats_plot = draw_bar_chart_cv(emotion_stats)  # toon huidige stand (kan leeg zijn)
    return output_img, emotion_md, stats_plot

# Voorbeelden automatisch laden
IMAGE_EXTS = {".jpg", ".jpeg", ".png", ".bmp", ".webp"}
EXAMPLES_DIR = Path("examples")
if EXAMPLES_DIR.exists() and EXAMPLES_DIR.is_dir():
    example_paths = [str(p) for p in sorted(EXAMPLES_DIR.iterdir()) if Path(p).suffix.lower() in IMAGE_EXTS]
else:
    example_paths = []
example_list = [[p] for p in example_paths]
CACHE_EXAMPLES = bool(example_list)

# CSS (groene emotietekst)
custom_css = """
#emotie-uitslag { color: #16a34a; }
#emotie-uitslag h1, #emotie-uitslag h2, #emotie-uitslag h3 { margin: 0.25rem 0; }
"""

with gr.Blocks(css=custom_css) as demo:
    gr.Markdown("## Herkenning van gezichtsuitdrukkingen ")
    gr.Markdown("Detecteert gezichten en herkent gezichtsuitdrukkingen ")

    with gr.Row():
        with gr.Column():
            input_image = gr.Image(type="numpy", label="Afbeelding uploaden")
            with gr.Row():
                submit_btn = gr.Button("Verstuur", variant="primary")
                clear_btn = gr.Button("Wissen")
        with gr.Column():
            output_image = gr.Image(type="numpy", label="Resultaat gezichtsuitdrukking")
            emotion_md = gr.Markdown("## **Nog geen resultaat**", elem_id="emotie-uitslag")
            stats_image = gr.Image(label="Statistieken", type="numpy")

    # Clear-helpers
    def clear_all_on_new():
        return None, "## **Nog geen resultaat**", draw_bar_chart_cv(emotion_stats)

    def clear_all_button():
        return None, None, "## **Nog geen resultaat**", draw_bar_chart_cv(emotion_stats)

    input_image.change(fn=clear_all_on_new, outputs=[output_image, emotion_md, stats_image])
    submit_btn.click(fn=detect_expression, inputs=input_image, outputs=[output_image, emotion_md, stats_image])
    clear_btn.click(fn=clear_all_button, outputs=[input_image, output_image, emotion_md, stats_image])

    gr.Markdown("Klik op een voorbeeld om te testen.")
    # BELANGRIJK: gebruik de 'no_stats'-functie voor Examples zodat deze NIET meetellen
    gr.Examples(
        examples=example_list,
        inputs=input_image,
        outputs=[output_image, emotion_md, stats_image],
        fn=detect_expression_no_stats,   # <- telt NIET mee
        examples_per_page=20,
        cache_examples=CACHE_EXAMPLES
    )

if __name__ == "__main__":
    demo.launch()