File size: 3,395 Bytes
86bd6ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import gradio as gr
from sklearn.metrics.pairwise import cosine_similarity
from sentence_transformers import SentenceTransformer
from PIL import Image
import cv2

def predict(im1, im2,thresh,model_name):
  im1_face = Image.open(im1)
  im2_face = Image.open(im2)

  model = load_model(model_name)

  sim=cosine_similarity(model.encode([im1_face,im2_face]))[0][1]

  if sim > thresh:
    return sim, "SAME PERSON, UNLOCK PHONE"
  else:
    return sim, "DIFFERENT PEOPLE, DON'T UNLOCK"

def load_model(model_name):
  model = SentenceTransformer(model_name)

title = """<h1 id="title">FaceID for Facial Recognition with Face Detector</h1>"""

models = ['clip-ViT-B-16','clip-ViT-B-32','clip-ViT-L-14']

twitter_link = """
[![](https://img.shields.io/twitter/follow/nickmuchi?label=@nickmuchi&style=social)](https://twitter.com/nickmuchi)
"""

css = '''
h1#title {
  text-align: center;
}
'''
demo = gr.Blocks(css=css)

with demo:
    gr.Markdown(title)
    gr.Markdown(twitter_link)
    model_options = gr.Dropdown(choices=models,label='Embedding Models',value=models[-1],show_label=True)
    thresh = gr.Slider(minimum=0.5,maximum=1,value=0.85,step=0.1,label='Confidence')
    
    with gr.Tabs():
        with gr.TabItem("Face ID with No Face Detection"):
            
            with gr.Row():
                with gr.Column():
                    nd_image_input_1 = gr.Image(label='Image 1',type='pil',source='webcam')
                    nd_image_input_2 = gr.Image(label='Image 2',type='pil',source='webcam')
                    
                with gr.Column():
                    sim = gr.Number(label="Similarity")
                    msg = gr.Textbox(label="Message")
                
            nd_but = gr.Button('Verify')
                            
        with gr.TabItem("Face ID with Face Detector"):
            
            with gr.Row():
                with gr.Column():
                    fd_image_1 = gr.Image(label='Image 1',type='pil',source='webcam')
                    fd_image_2 = gr.Image(label='Image 2',type='pil',source='webcam')

                with gr.Column():
                    face_1 = gr.Image(label='Face Detected 1',type='filepath')
                    face_2 = gr.Image(label='Face Detected 2',type='filepath')
                    fd_image_1.change(extract_face,fd_image_1,face_1)
                    fd_image_1.change(extract_face,fd_image_1,face_1)
      
                            
            with gr.Row():
                with gr.Column():
                    sim_1 = gr.Number(label="Similarity")
                    msg_1 = gr.Textbox(label="Message")
                                                    
            fd_but = gr.Button('Verify')          
                
            
    nd_but.click(predict,inputs=[nd_image_input_1,nd_image_input_2,thresh,model_options],outputs=[sim,msg],queue=True)
    fd_but.click(predict,inputs=[face_1,face_2,thresh,model_options],outputs=[sim_1,msg_1],queue=True)
# interface = gr.Interface(fn=predict, 
#                          inputs= [gr.Image(type="pil", source="webcam"), 
#                                   gr.Image(type="pil", source="webcam")], 
#                          outputs= [gr.Number(label="Similarity"),
#                                    gr.Textbox(label="Message")]
#                          )

# interface.launch(debug=True)

demo.launch(debug=True,enable_queue=True)