Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,97 +1,478 @@
|
|
1 |
-
import
|
2 |
-
import
|
3 |
-
import
|
4 |
-
|
5 |
from PIL import Image
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
#
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
-
|
72 |
-
st.success("Analysis complete! Here's what we found:")
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
-
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
else:
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
import requests
|
4 |
+
import json
|
5 |
from PIL import Image
|
6 |
+
|
7 |
+
css = """
|
8 |
+
.example-image img{
|
9 |
+
display: flex; /* Use flexbox to align items */
|
10 |
+
justify-content: center; /* Center the image horizontally */
|
11 |
+
align-items: center; /* Center the image vertically */
|
12 |
+
height: 300px; /* Set the height of the container */
|
13 |
+
object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
|
14 |
+
}
|
15 |
+
.example-image{
|
16 |
+
display: flex; /* Use flexbox to align items */
|
17 |
+
justify-content: center; /* Center the image horizontally */
|
18 |
+
align-items: center; /* Center the image vertically */
|
19 |
+
height: 350px; /* Set the height of the container */
|
20 |
+
object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
|
21 |
+
}
|
22 |
+
.face-row {
|
23 |
+
display: flex;
|
24 |
+
justify-content: space-around; /* Distribute space evenly between elements */
|
25 |
+
align-items: center; /* Align items vertically */
|
26 |
+
width: 100%; /* Set the width of the row to 100% */
|
27 |
+
}
|
28 |
+
.face-image{
|
29 |
+
justify-content: center; /* Center the image horizontally */
|
30 |
+
align-items: center; /* Center the image vertically */
|
31 |
+
height: 160px; /* Set the height of the container */
|
32 |
+
width: 160px;
|
33 |
+
object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
|
34 |
+
}
|
35 |
+
.face-image img{
|
36 |
+
justify-content: center; /* Center the image horizontally */
|
37 |
+
align-items: center; /* Center the image vertically */
|
38 |
+
height: 160px; /* Set the height of the container */
|
39 |
+
object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
|
40 |
+
}
|
41 |
+
.markdown-success-container {
|
42 |
+
background-color: #F6FFED;
|
43 |
+
padding: 20px;
|
44 |
+
margin: 20px;
|
45 |
+
border-radius: 1px;
|
46 |
+
border: 2px solid green;
|
47 |
+
text-align: center;
|
48 |
+
}
|
49 |
+
.markdown-fail-container {
|
50 |
+
background-color: #FFF1F0;
|
51 |
+
padding: 20px;
|
52 |
+
margin: 20px;
|
53 |
+
border-radius: 1px;
|
54 |
+
border: 2px solid red;
|
55 |
+
text-align: center;
|
56 |
+
}
|
57 |
+
.markdown-attribute-container {
|
58 |
+
display: flex;
|
59 |
+
justify-content: space-around; /* Distribute space evenly between elements */
|
60 |
+
align-items: center; /* Align items vertically */
|
61 |
+
padding: 10px;
|
62 |
+
margin: 10px;
|
63 |
+
}
|
64 |
+
.block-background {
|
65 |
+
# background-color: #202020; /* Set your desired background color */
|
66 |
+
border-radius: 5px;
|
67 |
+
}
|
68 |
+
"""
|
69 |
+
|
70 |
+
def convert_fun(input_str):
|
71 |
+
# Remove line breaks and extra whitespaces
|
72 |
+
return ' '.join(input_str.split())
|
73 |
+
|
74 |
+
def get_attributes(frame):
|
75 |
+
url = "https://recognito.p.rapidapi.com/api/analyze_face"
|
76 |
+
try:
|
77 |
+
files = {'image': open(frame, 'rb')}
|
78 |
+
headers = {"X-RapidAPI-Key": os.environ.get("API_KEY")}
|
79 |
+
|
80 |
+
r = requests.post(url=url, files=files, headers=headers)
|
81 |
+
except:
|
82 |
+
raise gr.Error("Please select images file!")
|
83 |
+
|
84 |
+
faces = None
|
85 |
+
face_crop, one_line_attribute = None, ""
|
86 |
+
try:
|
87 |
+
image = Image.open(frame)
|
88 |
+
|
89 |
+
face = Image.new('RGBA',(150, 150), (80,80,80,0))
|
90 |
+
|
91 |
+
res = r.json().get('image')
|
92 |
+
if res is not None and res:
|
93 |
+
face = res.get('detection')
|
94 |
+
x1 = face.get('x')
|
95 |
+
y1 = face.get('y')
|
96 |
+
x2 = x1 + face.get('w')
|
97 |
+
y2 = y1 + face.get('h')
|
98 |
+
|
99 |
+
if x1 < 0:
|
100 |
+
x1 = 0
|
101 |
+
if y1 < 0:
|
102 |
+
y1 = 0
|
103 |
+
if x2 >= image.width:
|
104 |
+
x2 = image.width - 1
|
105 |
+
if y2 >= image.height:
|
106 |
+
y2 = image.height - 1
|
107 |
+
|
108 |
+
face_crop = image.crop((x1, y1, x2, y2))
|
109 |
+
face_image_ratio = face_crop.width / float(face_crop.height)
|
110 |
+
resized_w = int(face_image_ratio * 150)
|
111 |
+
resized_h = 150
|
112 |
+
|
113 |
+
face_crop = face_crop.resize((int(resized_w), int(resized_h)))
|
114 |
|
115 |
+
attr = res.get('attribute')
|
|
|
116 |
|
117 |
+
age = attr.get('age')
|
118 |
+
gender = attr.get('gender')
|
119 |
+
emotion = attr.get('emotion')
|
120 |
+
ethnicity = attr.get('ethnicity')
|
121 |
+
|
122 |
+
mask = attr.get('face_mask')
|
123 |
+
glass = 'No Glasses'
|
124 |
+
if attr.get('glasses') == 'USUAL':
|
125 |
+
glass = 'Glasses'
|
126 |
+
if attr.get('glasses') == 'DARK':
|
127 |
+
glass = 'Sunglasses'
|
128 |
|
129 |
+
open_eye_thr = 0.3
|
130 |
+
left_eye = 'Close'
|
131 |
+
if attr.get('eye_left') >= open_eye_thr:
|
132 |
+
left_eye = 'Open'
|
133 |
+
|
134 |
+
right_eye = 'Close'
|
135 |
+
if attr.get('eye_right') >= open_eye_thr:
|
136 |
+
right_eye = 'Open'
|
137 |
+
|
138 |
+
facehair = attr.get('facial_hair')
|
139 |
+
haircolor = attr.get('hair_color')
|
140 |
+
hairtype = attr.get('hair_type')
|
141 |
+
headwear = attr.get('headwear')
|
142 |
+
|
143 |
+
pitch = attr.get('pitch')
|
144 |
+
roll = attr.get('roll')
|
145 |
+
yaw = attr.get('yaw')
|
146 |
+
quality = attr.get('quality')
|
147 |
+
|
148 |
+
attribute = f"""
|
149 |
+
<br/>
|
150 |
+
<div class="markdown-attribute-container">
|
151 |
+
<table>
|
152 |
+
<tr>
|
153 |
+
<th style="text-align: center;">Attribute</th>
|
154 |
+
<th style="text-align: center;">Result</th>
|
155 |
+
<th style="text-align: center;">Score</th>
|
156 |
+
<th style="text-align: center;">Threshold</th>
|
157 |
+
</tr>
|
158 |
+
<tr>
|
159 |
+
<td>Gender</td>
|
160 |
+
<td>{gender}</td>
|
161 |
+
<td></td><td></td>
|
162 |
+
</tr>
|
163 |
+
<tr>
|
164 |
+
<td>Age</td>
|
165 |
+
<td>{int(age)}</td>
|
166 |
+
<td></td><td></td>
|
167 |
+
</tr>
|
168 |
+
<tr>
|
169 |
+
<td>Pitch</td>
|
170 |
+
<td>{"{:.4f}".format(pitch)}</td>
|
171 |
+
<td></td><td></td>
|
172 |
+
</tr>
|
173 |
+
<tr>
|
174 |
+
<td>Yaw</td>
|
175 |
+
<td>{"{:.4f}".format(yaw)}</td>
|
176 |
+
<td></td><td></td>
|
177 |
+
</tr>
|
178 |
+
<tr>
|
179 |
+
<td>Roll</td>
|
180 |
+
<td>{"{:.4f}".format(roll)}</td>
|
181 |
+
<td></td><td></td>
|
182 |
+
</tr>
|
183 |
+
<tr>
|
184 |
+
<td>Emotion</td>
|
185 |
+
<td>{emotion}</td>
|
186 |
+
<td></td><td></td>
|
187 |
+
</tr>
|
188 |
+
<tr>
|
189 |
+
<td>Left Eye</td>
|
190 |
+
<td>{left_eye}</td>
|
191 |
+
<td>{"{:.4f}".format(attr.get('eye_left'))}</td>
|
192 |
+
<td>{open_eye_thr}</td>
|
193 |
+
</tr>
|
194 |
+
<tr>
|
195 |
+
<td>Right Eye</td>
|
196 |
+
<td>{right_eye}</td>
|
197 |
+
<td>{"{:.4f}".format(attr.get('eye_right'))}</td>
|
198 |
+
<td>{open_eye_thr}</td>
|
199 |
+
</tr>
|
200 |
+
<tr>
|
201 |
+
<td>Mask</td>
|
202 |
+
<td>{mask}</td>
|
203 |
+
<td></td><td></td>
|
204 |
+
</tr>
|
205 |
+
<tr>
|
206 |
+
<td>Glass</td>
|
207 |
+
<td>{glass}</td>
|
208 |
+
<td></td><td></td>
|
209 |
+
</tr>
|
210 |
+
<tr>
|
211 |
+
<td>FaceHair</td>
|
212 |
+
<td>{facehair}</td>
|
213 |
+
<td></td><td></td>
|
214 |
+
</tr>
|
215 |
+
<tr>
|
216 |
+
<td>HairColor</td>
|
217 |
+
<td>{haircolor}</td>
|
218 |
+
<td></td><td></td>
|
219 |
+
</tr>
|
220 |
+
<tr>
|
221 |
+
<td>HairType</td>
|
222 |
+
<td>{hairtype}</td>
|
223 |
+
<td></td><td></td>
|
224 |
+
</tr>
|
225 |
+
<tr>
|
226 |
+
<td>HeadWear</td>
|
227 |
+
<td>{headwear}</td>
|
228 |
+
<td></td><td></td>
|
229 |
+
</tr>
|
230 |
+
<tr>
|
231 |
+
<td>Image Quality</td>
|
232 |
+
<td>{"{:.4f}".format(quality)}</td>
|
233 |
+
<td></td><td></td>
|
234 |
+
</tr>
|
235 |
+
</table>
|
236 |
+
</div>
|
237 |
+
"""
|
238 |
+
one_line_attribute = convert_fun(attribute)
|
239 |
+
except:
|
240 |
+
pass
|
241 |
+
|
242 |
+
return face_crop, one_line_attribute
|
243 |
+
|
244 |
+
def check_liveness(frame):
|
245 |
+
|
246 |
+
url = "https://recognito-faceliveness.p.rapidapi.com/api/check_liveness"
|
247 |
+
try:
|
248 |
+
files = {'image': open(frame, 'rb')}
|
249 |
+
headers = {"X-RapidAPI-Key": os.environ.get("API_KEY")}
|
250 |
+
|
251 |
+
r = requests.post(url=url, files=files, headers=headers)
|
252 |
+
except:
|
253 |
+
raise gr.Error("Please select images file!")
|
254 |
+
|
255 |
+
faces = None
|
256 |
+
|
257 |
+
face_crop, liveness_result, liveness_score = None, "", -200
|
258 |
+
try:
|
259 |
+
image = Image.open(frame)
|
260 |
+
|
261 |
+
face = Image.new('RGBA',(150, 150), (80,80,80,0))
|
262 |
+
res = r.json().get('data')
|
263 |
+
if res is not None and res:
|
264 |
+
face = res.get('face_rect')
|
265 |
+
x1 = face.get('x')
|
266 |
+
y1 = face.get('y')
|
267 |
+
x2 = x1 + face.get('w')
|
268 |
+
y2 = y1 + face.get('h')
|
269 |
+
|
270 |
+
if x1 < 0:
|
271 |
+
x1 = 0
|
272 |
+
if y1 < 0:
|
273 |
+
y1 = 0
|
274 |
+
if x2 >= image.width:
|
275 |
+
x2 = image.width - 1
|
276 |
+
if y2 >= image.height:
|
277 |
+
y2 = image.height - 1
|
278 |
+
|
279 |
+
face_crop = image.crop((x1, y1, x2, y2))
|
280 |
+
face_image_ratio = face_crop.width / float(face_crop.height)
|
281 |
+
resized_w = int(face_image_ratio * 150)
|
282 |
+
resized_h = 150
|
283 |
+
|
284 |
+
face_crop = face_crop.resize((int(resized_w), int(resized_h)))
|
285 |
+
liveness_score = res.get('liveness_score')
|
286 |
+
liveness = res.get('result')
|
287 |
+
|
288 |
+
if liveness == 'REAL':
|
289 |
+
liveness_result = f"""<br/><div class="markdown-success-container"><p style="text-align: center; font-size: 20px; color: green;">Liveness Check: REAL<br/>Score: {liveness_score}</p></div>"""
|
290 |
else:
|
291 |
+
liveness_result = f"""<br/><div class="markdown-fail-container"><p style="text-align: center; font-size: 20px; color: red;">Liveness Check: {liveness}<br/>Score: {liveness_score}</p></div>"""
|
292 |
+
|
293 |
+
except:
|
294 |
+
pass
|
295 |
+
|
296 |
+
return face_crop, liveness_result, liveness_score
|
297 |
+
|
298 |
+
def analyze_face(frame):
|
299 |
+
face_crop_1, liveness_result, liveness_score = check_liveness(frame)
|
300 |
+
face_crop_2, attribute = get_attributes(frame)
|
301 |
+
|
302 |
+
face_crop = face_crop_1 if (face_crop_1 is not None) else face_crop_2
|
303 |
+
return [face_crop, liveness_result, attribute]
|
304 |
+
|
305 |
+
|
306 |
+
def compare_face(frame1, frame2):
|
307 |
+
url = "https://recognito.p.rapidapi.com/api/compare_face"
|
308 |
+
try:
|
309 |
+
files = {'image1': open(frame1, 'rb'), 'image2': open(frame2, 'rb')}
|
310 |
+
headers = {"X-RapidAPI-Key": os.environ.get("API_KEY")}
|
311 |
+
|
312 |
+
r = requests.post(url=url, files=files, headers=headers)
|
313 |
+
except:
|
314 |
+
raise gr.Error("Please select images files!")
|
315 |
+
|
316 |
+
faces = None
|
317 |
+
|
318 |
+
try:
|
319 |
+
image1 = Image.open(frame1)
|
320 |
+
image2 = Image.open(frame2)
|
321 |
+
|
322 |
+
face1 = Image.new('RGBA',(150, 150), (80,80,80,0))
|
323 |
+
face2 = Image.new('RGBA',(150, 150), (80,80,80,0))
|
324 |
+
|
325 |
+
res1 = r.json().get('image1')
|
326 |
+
|
327 |
+
if res1 is not None and res1:
|
328 |
+
face = res1.get('detection')
|
329 |
+
x1 = face.get('x')
|
330 |
+
y1 = face.get('y')
|
331 |
+
x2 = x1 + face.get('w')
|
332 |
+
y2 = y1 + face.get('h')
|
333 |
+
if x1 < 0:
|
334 |
+
x1 = 0
|
335 |
+
if y1 < 0:
|
336 |
+
y1 = 0
|
337 |
+
if x2 >= image1.width:
|
338 |
+
x2 = image1.width - 1
|
339 |
+
if y2 >= image1.height:
|
340 |
+
y2 = image1.height - 1
|
341 |
+
|
342 |
+
face1 = image1.crop((x1, y1, x2, y2))
|
343 |
+
face_image_ratio = face1.width / float(face1.height)
|
344 |
+
resized_w = int(face_image_ratio * 150)
|
345 |
+
resized_h = 150
|
346 |
+
|
347 |
+
face1 = face1.resize((int(resized_w), int(resized_h)))
|
348 |
+
|
349 |
+
res2 = r.json().get('image2')
|
350 |
+
if res2 is not None and res2:
|
351 |
+
face = res2.get('detection')
|
352 |
+
x1 = face.get('x')
|
353 |
+
y1 = face.get('y')
|
354 |
+
x2 = x1 + face.get('w')
|
355 |
+
y2 = y1 + face.get('h')
|
356 |
+
|
357 |
+
if x1 < 0:
|
358 |
+
x1 = 0
|
359 |
+
if y1 < 0:
|
360 |
+
y1 = 0
|
361 |
+
if x2 >= image2.width:
|
362 |
+
x2 = image2.width - 1
|
363 |
+
if y2 >= image2.height:
|
364 |
+
y2 = image2.height - 1
|
365 |
+
|
366 |
+
face2 = image2.crop((x1, y1, x2, y2))
|
367 |
+
face_image_ratio = face2.width / float(face2.height)
|
368 |
+
resized_w = int(face_image_ratio * 150)
|
369 |
+
resized_h = 150
|
370 |
+
|
371 |
+
face2 = face2.resize((int(resized_w), int(resized_h)))
|
372 |
+
except:
|
373 |
+
pass
|
374 |
+
|
375 |
+
matching_result = Image.open("icons/blank.png")
|
376 |
+
similarity_score = ""
|
377 |
+
if face1 is not None and face2 is not None:
|
378 |
+
matching_score = r.json().get('matching_score')
|
379 |
+
if matching_score is not None:
|
380 |
+
str_score = str("{:.4f}".format(matching_score))
|
381 |
+
if matching_score >= 0.7:
|
382 |
+
matching_result = Image.open("icons/same.png")
|
383 |
+
similarity_score = f"""<br/><div class="markdown-success-container"><p style="text-align: center; font-size: 20px; color: green;">Similarity score: {str_score}</p></div>"""
|
384 |
+
else:
|
385 |
+
matching_result = Image.open("icons/different.png")
|
386 |
+
similarity_score = f"""<br/><div class="markdown-fail-container"><p style="text-align: center; font-size: 20px; color: red;">Similarity score: {str_score}</p></div>"""
|
387 |
+
|
388 |
+
return [face1, face2, matching_result, similarity_score]
|
389 |
+
|
390 |
+
|
391 |
+
def image_change_callback(image_data):
|
392 |
+
# This function will be called whenever a new image is set for the gr.Image component
|
393 |
+
print("New image set:", image_data)
|
394 |
+
|
395 |
+
with gr.Blocks(css=css) as demo:
|
396 |
+
gr.Markdown(
|
397 |
+
"""
|
398 |
+
<a href="https://recognito.vision" style="display: flex; align-items: center;">
|
399 |
+
<img src="https://recognito.vision/wp-content/uploads/2024/03/Recognito-modified.png" style="width: 8%; margin-right: 15px;"/>
|
400 |
+
<div>
|
401 |
+
<p style="font-size: 32px; font-weight: bold; margin: 0;">Recognito</p>
|
402 |
+
<p style="font-size: 18px; margin: 0;">www.recognito.vision</p>
|
403 |
+
</div>
|
404 |
+
</a>
|
405 |
+
<p style="font-size: 20px; font-weight: bold;">β¨ NIST FRVT Top #1 Face Recognition Algorithm Developer</p>
|
406 |
+
<div style="display: flex; align-items: center;">
|
407 |
+
  <a href="https://pages.nist.gov/frvt/html/frvt11.html"> <p style="font-size: 14px;">ππ» Latest NIST FRVT Report</p></a>
|
408 |
+
</div>
|
409 |
+
<p style="font-size: 20px; font-weight: bold;">π Product Documentation</p>
|
410 |
+
<div style="display: flex; align-items: center;">
|
411 |
+
  <a href="https://docs.recognito.vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/05/book.png" style="width: 48px; margin-right: 5px;"/></a>
|
412 |
+
</div>
|
413 |
+
<p style="font-size: 20px; font-weight: bold;">π Visit Recognito</p>
|
414 |
+
<div style="display: flex; align-items: center;">
|
415 |
+
  <a href="https://recognito.vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/recognito_64_cl.png" style="width: 32px; margin-right: 5px;"/></a>
|
416 |
+
<a href="https://www.linkedin.com/company/recognito-vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/linkedin_64_cl.png" style="width: 32px; margin-right: 5px;"/></a>
|
417 |
+
<a href="https://huggingface.co/recognito" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/hf_64_cl.png" style="width: 32px; margin-right: 5px;"/></a>
|
418 |
+
<a href="https://github.com/recognito-vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/github_64_cl.png" style="width: 32px; margin-right: 5px;"/></a>
|
419 |
+
<a href="https://hub.docker.com/u/recognito" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/docker_64_cl.png" style="width: 32px; margin-right: 5px;"/></a>
|
420 |
+
<a href="https://www.youtube.com/@recognito-vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/04/youtube_64_cl.png" style="width: 32px; margin-right: 5px;"/></a>
|
421 |
+
</div>
|
422 |
+
<p style="font-size: 20px; font-weight: bold;">π€ Contact us for our on-premise Face Recognition, Liveness Detection SDKs deployment</p>
|
423 |
+
<div style="display: flex; align-items: center;">
|
424 |
+
  <a target="_blank" href="mailto:[email protected]"><img src="https://img.shields.io/badge/[email protected]?logo=gmail " alt="www.recognito.vision"></a>
|
425 |
+
<a target="_blank" href="https://wa.me/+14158003112"><img src="https://img.shields.io/badge/whatsapp-+14158003112-blue.svg?logo=whatsapp " alt="www.recognito.vision"></a>
|
426 |
+
<a target="_blank" href="https://t.me/recognito_vision"><img src="https://img.shields.io/badge/telegram-@recognito__vision-blue.svg?logo=telegram " alt="www.recognito.vision"></a>
|
427 |
+
<a target="_blank" href="https://join.slack.com/t/recognito-workspace/shared_invite/zt-2d4kscqgn-"><img src="https://img.shields.io/badge/slack-recognito__workspace-blue.svg?logo=slack " alt="www.recognito.vision"></a>
|
428 |
+
</div>
|
429 |
+
<br/><br/><br/>
|
430 |
+
"""
|
431 |
+
)
|
432 |
+
|
433 |
+
with gr.Tabs():
|
434 |
+
with gr.Tab("Face Recognition"):
|
435 |
+
with gr.Row():
|
436 |
+
with gr.Column(scale=2):
|
437 |
+
with gr.Row():
|
438 |
+
with gr.Column(scale=1):
|
439 |
+
compare_face_input1 = gr.Image(label="Image1", type='filepath', elem_classes="example-image")
|
440 |
+
gr.Examples(['examples/1.jpg', 'examples/2.jpg', 'examples/3.jpg', 'examples/4.jpg'],
|
441 |
+
inputs=compare_face_input1)
|
442 |
+
with gr.Column(scale=1):
|
443 |
+
compare_face_input2 = gr.Image(label="Image2", type='filepath', elem_classes="example-image")
|
444 |
+
gr.Examples(['examples/5.jpg', 'examples/6.jpg', 'examples/7.jpg', 'examples/8.jpg'],
|
445 |
+
inputs=compare_face_input2)
|
446 |
+
|
447 |
+
with gr.Blocks():
|
448 |
+
with gr.Column(scale=1, min_width=400, elem_classes="block-background"):
|
449 |
+
compare_face_button = gr.Button("Compare Face", variant="primary", size="lg")
|
450 |
+
with gr.Row(elem_classes="face-row"):
|
451 |
+
face_output1 = gr.Image(value="icons/face.jpg", label="Face 1", scale=0, elem_classes="face-image", show_share_button=False, show_download_button=False, show_fullscreen_button=False)
|
452 |
+
compare_result = gr.Image(value="icons/blank.png", min_width=30, scale=0, show_download_button=False, show_label=False, show_share_button=False, show_fullscreen_button=False)
|
453 |
+
face_output2 = gr.Image(value="icons/face.jpg", label="Face 2", scale=0, elem_classes="face-image", show_share_button=False, show_download_button=False, show_fullscreen_button=False)
|
454 |
+
similarity_markdown = gr.Markdown("")
|
455 |
+
|
456 |
+
compare_face_button.click(compare_face, inputs=[compare_face_input1, compare_face_input2], outputs=[face_output1, face_output2, compare_result, similarity_markdown])
|
457 |
+
|
458 |
+
with gr.Tab("Face Liveness, Analysis"):
|
459 |
+
with gr.Row():
|
460 |
+
with gr.Column(scale=1):
|
461 |
+
face_input = gr.Image(label="Image", type='filepath', elem_classes="example-image")
|
462 |
+
gr.Examples(['examples/att_1.jpg', 'examples/att_2.jpg', 'examples/att_3.jpg', 'examples/att_4.jpg', 'examples/att_5.jpg', 'examples/att_6.jpg', 'examples/att_7.jpg'],
|
463 |
+
inputs=face_input)
|
464 |
+
|
465 |
+
with gr.Blocks():
|
466 |
+
with gr.Column(scale=1, elem_classes="block-background"):
|
467 |
+
analyze_face_button = gr.Button("Analyze Face", variant="primary", size="lg")
|
468 |
+
with gr.Row(elem_classes="face-row"):
|
469 |
+
face_output = gr.Image(value="icons/face.jpg", label="Face", scale=0, elem_classes="face-image", show_share_button=False, show_download_button=False, show_fullscreen_button=False)
|
470 |
+
|
471 |
+
liveness_result = gr.Markdown("")
|
472 |
+
attribute_result = gr.Markdown("")
|
473 |
+
|
474 |
+
analyze_face_button.click(analyze_face, inputs=face_input, outputs=[face_output, liveness_result, attribute_result])
|
475 |
+
|
476 |
+
gr.HTML('<a href="https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FRecognito%2FFaceRecognition-LivenessDetection-FaceAnalysis"><img src="https://api.visitorbadge.io/api/combined?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FRecognito%2FFaceRecognition-LivenessDetection-FaceAnalysis&countColor=%2337d67a&style=flat&labelStyle=upper" /></a>')
|
477 |
+
|
478 |
+
demo.launch(server_name="0.0.0.0", server_port=7860, show_api=False)
|