awacke1 commited on
Commit
b4f38f2
·
1 Parent(s): a0b24fa

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +119 -0
app.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from __future__ import annotations
3
+
4
+ import os
5
+ import pathlib
6
+ import shlex
7
+ import subprocess
8
+ import tarfile
9
+
10
+ if os.environ.get('SYSTEM') == 'spaces':
11
+ subprocess.call(shlex.split('pip uninstall -y opencv-python'))
12
+ subprocess.call(shlex.split('pip uninstall -y opencv-python-headless'))
13
+ subprocess.call(
14
+ shlex.split('pip install opencv-python-headless==4.5.5.64'))
15
+
16
+ import gradio as gr
17
+ import huggingface_hub
18
+ import mediapipe as mp
19
+ import numpy as np
20
+
21
+ mp_drawing = mp.solutions.drawing_utils
22
+ mp_drawing_styles = mp.solutions.drawing_styles
23
+ mp_face_mesh = mp.solutions.face_mesh
24
+
25
+ TITLE = 'MediaPipe Face Mesh'
26
+ DESCRIPTION = 'https://google.github.io/mediapipe/'
27
+
28
+ HF_TOKEN = os.getenv('HF_TOKEN')
29
+
30
+
31
+ def load_sample_images() -> list[pathlib.Path]:
32
+ image_dir = pathlib.Path('images')
33
+ if not image_dir.exists():
34
+ image_dir.mkdir()
35
+ dataset_repo = 'hysts/input-images'
36
+ filenames = ['001.tar', '005.tar']
37
+ for name in filenames:
38
+ path = huggingface_hub.hf_hub_download(dataset_repo,
39
+ name,
40
+ repo_type='dataset',
41
+ use_auth_token=HF_TOKEN)
42
+ with tarfile.open(path) as f:
43
+ f.extractall(image_dir.as_posix())
44
+ return sorted(image_dir.rglob('*.jpg'))
45
+
46
+
47
+ def run(
48
+ image: np.ndarray,
49
+ max_num_faces: int,
50
+ min_detection_confidence: float,
51
+ show_tesselation: bool,
52
+ show_contours: bool,
53
+ show_irises: bool,
54
+ ) -> np.ndarray:
55
+ with mp_face_mesh.FaceMesh(
56
+ static_image_mode=True,
57
+ max_num_faces=max_num_faces,
58
+ refine_landmarks=True,
59
+ min_detection_confidence=min_detection_confidence) as face_mesh:
60
+ results = face_mesh.process(image)
61
+
62
+ res = image[:, :, ::-1].copy()
63
+ if results.multi_face_landmarks is not None:
64
+ for face_landmarks in results.multi_face_landmarks:
65
+ if show_tesselation:
66
+ mp_drawing.draw_landmarks(
67
+ image=res,
68
+ landmark_list=face_landmarks,
69
+ connections=mp_face_mesh.FACEMESH_TESSELATION,
70
+ landmark_drawing_spec=None,
71
+ connection_drawing_spec=mp_drawing_styles.
72
+ get_default_face_mesh_tesselation_style())
73
+ if show_contours:
74
+ mp_drawing.draw_landmarks(
75
+ image=res,
76
+ landmark_list=face_landmarks,
77
+ connections=mp_face_mesh.FACEMESH_CONTOURS,
78
+ landmark_drawing_spec=None,
79
+ connection_drawing_spec=mp_drawing_styles.
80
+ get_default_face_mesh_contours_style())
81
+ if show_irises:
82
+ mp_drawing.draw_landmarks(
83
+ image=res,
84
+ landmark_list=face_landmarks,
85
+ connections=mp_face_mesh.FACEMESH_IRISES,
86
+ landmark_drawing_spec=None,
87
+ connection_drawing_spec=mp_drawing_styles.
88
+ get_default_face_mesh_iris_connections_style())
89
+
90
+ return res[:, :, ::-1]
91
+
92
+
93
+ image_paths = load_sample_images()
94
+ examples = [[path.as_posix(), 5, 0.5, True, True, True]
95
+ for path in image_paths]
96
+
97
+ gr.Interface(
98
+ fn=run,
99
+ inputs=[
100
+ gr.Image(label='Input', type='numpy'),
101
+ gr.Slider(label='Max Number of Faces',
102
+ minimum=0,
103
+ maximum=10,
104
+ step=1,
105
+ value=5),
106
+ gr.Slider(label='Minimum Detection Confidence',
107
+ minimum=0,
108
+ maximum=1,
109
+ step=0.05,
110
+ value=0.5),
111
+ gr.Checkbox(label='Show Tesselation', value=True),
112
+ gr.Checkbox(label='Show Contours', value=True),
113
+ gr.Checkbox(label='Show Irises', value=True),
114
+ ],
115
+ outputs=gr.Image(label='Output', type='numpy'),
116
+ examples=examples,
117
+ title=TITLE,
118
+ description=DESCRIPTION,
119
+ ).launch(show_api=False)