Spaces:
Build error
Build error
File size: 4,367 Bytes
86bd6ec 2a9d4ec 36bb0bf 86bd6ec 2a9d4ec bd7880e 86bd6ec 9560e15 86bd6ec 9560e15 86bd6ec 9560e15 86bd6ec 9560e15 86bd6ec 9560e15 86bd6ec bd7880e 86bd6ec dc455a3 690413e 86bd6ec |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import gradio as gr
from sklearn.metrics.pairwise import cosine_similarity
from sentence_transformers import SentenceTransformer
from PIL import Image
import cv2
import os
import numpy as np
def extract_face(im):
prototxt_path = 'deploy.prototxt'
caffemodel_path = 'weights.caffemodel'
# Read the model
cv2_model = cv2.dnn.readNetFromCaffe(prototxt_path, caffemodel_path)
#pil_image = PIL.Image.open('image.jpg')
image = cv2.cvtColor(np.array(im), cv2.COLOR_RGB2BGR)
#image = cv2.imread(im)
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
cv2_model.setInput(blob)
detections = cv2_model.forward()
# Identify each face
for i in range(0, detections.shape[2]):
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
confidence = detections[0, 0, i, 2]
# If confidence > 0.5, save it as a separate file
if (confidence > 0.5):
frame = image[startY:endY, startX:endX]
#PIL_image = Image.fromarray(frame)
file_name = 'faces/' + str(np.random.randint(1,10)) + '_' + 'face.png'
cv2.imwrite(file_name, frame)
return file_name
def predict(im1, im2,thresh,model_name):
if not isinstance(im1,str):
im1_face = im1
im2_face = im2
else:
im1_face = Image.open(im1)
im2_face = Image.open(im2)
model = load_model(model_name)
sim=cosine_similarity(model.encode([im1_face,im2_face]))[0][1]
if sim > thresh:
return round(sim,2), "SAME PERSON, UNLOCK PHONE"
else:
return round(sim,2), "DIFFERENT PEOPLE, DON'T UNLOCK"
def load_model(model_name):
model = SentenceTransformer(model_name)
return model
title = """<h1 id="title">FaceID for Facial Recognition with Face Detector</h1>"""
models = ['clip-ViT-B-16','clip-ViT-B-32','clip-ViT-L-14']
twitter_link = """
[](https://twitter.com/nickmuchi)
"""
css = '''
h1#title {
text-align: center;
}
'''
demo = gr.Blocks(css=css)
with demo:
gr.Markdown(title)
gr.Markdown(twitter_link)
model_options = gr.Dropdown(choices=models,label='Embedding Models',value=models[-1],show_label=True)
thresh = gr.Slider(minimum=0.5,maximum=1,value=0.85,step=0.1,label='Confidence')
with gr.Tabs():
with gr.TabItem("Face ID with No Face Detection"):
with gr.Row():
with gr.Column():
nd_image_input_1 = gr.Image(label='Image 1',type='pil',source='webcam')
nd_image_input_2 = gr.Image(label='Image 2',type='pil',source='webcam')
with gr.Column():
sim = gr.Number(label="Similarity")
msg = gr.Textbox(label="Message")
nd_but = gr.Button('Verify')
with gr.TabItem("Face ID with Face Detector"):
with gr.Row():
with gr.Column():
fd_image_1 = gr.Image(label='Image 1',type='pil',source='webcam')
fd_image_2 = gr.Image(label='Image 2',type='pil',source='webcam')
with gr.Column():
face_1 = gr.Image(label='Face Detected 1',type='filepath')
face_2 = gr.Image(label='Face Detected 2',type='filepath')
fd_image_1.change(extract_face,fd_image_1,face_1)
fd_image_2.change(extract_face,fd_image_2,face_2)
with gr.Row():
with gr.Column():
sim_1 = gr.Number(label="Similarity")
msg_1 = gr.Textbox(label="Message")
fd_but = gr.Button('Verify')
nd_but.click(predict,inputs=[nd_image_input_1,nd_image_input_2,thresh,model_options],outputs=[sim,msg],queue=True)
fd_but.click(predict,inputs=[face_1,face_2,thresh,model_options],outputs=[sim_1,msg_1],queue=True)
gr.Markdown("")
demo.launch(debug=True,enable_queue=True) |