Marcel0123 commited on
Commit
152fa4b
·
verified ·
1 Parent(s): d0c4bb8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -15
app.py CHANGED
@@ -2,12 +2,13 @@ import cv2 as cv
2
  import numpy as np
3
  import gradio as gr
4
  import datetime
 
5
  from huggingface_hub import hf_hub_download
6
 
7
  from facial_fer_model import FacialExpressionRecog
8
  from yunet import YuNet
9
 
10
- # Download ONNX model from Hugging Face
11
  FD_MODEL_PATH = hf_hub_download(repo_id="opencv/face_detection_yunet", filename="face_detection_yunet_2023mar.onnx")
12
  FER_MODEL_PATH = hf_hub_download(repo_id="opencv/facial_expression_recognition", filename="facial_expression_recognition_mobilefacenet_2022july.onnx")
13
 
@@ -50,12 +51,23 @@ def detect_expression(input_image):
50
  output = visualize(image, dets, fer_res)
51
  return cv.cvtColor(output, cv.COLOR_BGR2RGB)
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  # Gradio Interface
54
- with gr.Blocks(css='''.example * {
55
- font-style: italic;
56
- font-size: 18px !important;
57
- color: #0ea5e9 !important;
58
- }''') as demo:
59
 
60
  gr.Markdown("### Facial Expression Recognition (FER) with OpenCV DNN")
61
  gr.Markdown("Detects faces and recognizes facial expressions using YuNet + MobileFaceNet ONNX models.")
@@ -64,7 +76,7 @@ with gr.Blocks(css='''.example * {
64
  input_image = gr.Image(type="numpy", label="Upload Image")
65
  output_image = gr.Image(type="numpy", label="Facial Expression Result")
66
 
67
- # Clear output when new image is uploaded
68
  input_image.change(fn=lambda: (None), outputs=output_image)
69
 
70
  with gr.Row():
@@ -72,18 +84,16 @@ with gr.Blocks(css='''.example * {
72
  clear_btn = gr.Button("Clear")
73
 
74
  submit_btn.click(fn=detect_expression, inputs=input_image, outputs=output_image)
75
- clear_btn.click(fn=lambda:(None, None), outputs=[input_image, output_image])
76
 
77
- gr.Markdown("Click on any example to try it.", elem_classes=["example"])
78
 
79
  gr.Examples(
80
- examples=[
81
- ["examples/lena.jpg"],
82
- ["examples/gray_face.png"]
83
- ],
84
- inputs=input_image
85
  )
86
 
87
-
88
  if __name__ == "__main__":
89
  demo.launch()
 
2
  import numpy as np
3
  import gradio as gr
4
  import datetime
5
+ from pathlib import Path
6
  from huggingface_hub import hf_hub_download
7
 
8
  from facial_fer_model import FacialExpressionRecog
9
  from yunet import YuNet
10
 
11
+ # Download ONNX modellen van Hugging Face
12
  FD_MODEL_PATH = hf_hub_download(repo_id="opencv/face_detection_yunet", filename="face_detection_yunet_2023mar.onnx")
13
  FER_MODEL_PATH = hf_hub_download(repo_id="opencv/facial_expression_recognition", filename="facial_expression_recognition_mobilefacenet_2022july.onnx")
14
 
 
51
  output = visualize(image, dets, fer_res)
52
  return cv.cvtColor(output, cv.COLOR_BGR2RGB)
53
 
54
+ # === Automatisch voorbeelden inladen uit de map "examples/" ===
55
+ IMAGE_EXTS = {".jpg", ".jpeg", ".png", ".bmp", ".webp"}
56
+ EXAMPLES_DIR = Path("examples")
57
+
58
+ if EXAMPLES_DIR.exists() and EXAMPLES_DIR.is_dir():
59
+ example_paths = [
60
+ str(p) for p in sorted(EXAMPLES_DIR.iterdir())
61
+ if p.is_file() and p.suffix.lower() in IMAGE_EXTS
62
+ ]
63
+ else:
64
+ example_paths = []
65
+
66
+ example_list = [[p] for p in example_paths]
67
+ # =============================================================
68
+
69
  # Gradio Interface
70
+ with gr.Blocks(css='''.example * { font-style: italic; font-size: 18px !important; color: #0ea5e9 !important; }''') as demo:
 
 
 
 
71
 
72
  gr.Markdown("### Facial Expression Recognition (FER) with OpenCV DNN")
73
  gr.Markdown("Detects faces and recognizes facial expressions using YuNet + MobileFaceNet ONNX models.")
 
76
  input_image = gr.Image(type="numpy", label="Upload Image")
77
  output_image = gr.Image(type="numpy", label="Facial Expression Result")
78
 
79
+ # Output leegmaken bij nieuwe upload
80
  input_image.change(fn=lambda: (None), outputs=output_image)
81
 
82
  with gr.Row():
 
84
  clear_btn = gr.Button("Clear")
85
 
86
  submit_btn.click(fn=detect_expression, inputs=input_image, outputs=output_image)
87
+ clear_btn.click(fn=lambda: (None, None), outputs=[input_image, output_image])
88
 
89
+ gr.Markdown("Klik op een voorbeeld om het te proberen.", elem_classes=["example"])
90
 
91
  gr.Examples(
92
+ examples=example_list, # automatisch ingelezen
93
+ inputs=input_image,
94
+ examples_per_page=20, # handig bij veel afbeeldingen
95
+ cache_examples=True # sneller in Hugging Face Spaces
 
96
  )
97
 
 
98
  if __name__ == "__main__":
99
  demo.launch()