aminaB9's picture
Initial commit
cc77879
raw
history blame
12.4 kB
import gradio as gr
import numpy as np
from PIL import Image
from transformers import AutoImageProcessor, AutoModel
import torch
import timm
import torch.nn.functional as F
from torchvision import transforms
import time
import subprocess
import os
def resizeImage(image):
resized = image.resize((112, 112))
return resized
SECURITYLEVELS = ["128", "196", "256"]
FRMODELS = ["gaunernst/vit_tiny_patch8_112.arcface_ms1mv3",
"gaunernst/vit_tiny_patch8_112.cosface_ms1mv3",
"gaunernst/vit_tiny_patch8_112.adaface_ms1mv3",
"gaunernst/vit_small_patch8_gap_112.cosface_ms1mv3",
"gaunernst/convnext_nano.cosface_ms1mv3",
"gaunernst/convnext_atto.cosface_ms1mv3"]
def runBinFile(*args):
binary_path = args[0]
if not os.path.isfile(binary_path):
return "Error: Compiled binary not found."
try:
os.chmod(binary_path, 0o755)
start = time.time()
result = subprocess.run(
list(args),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
end = time.time()
duration = (end - start) * 1000
if 'print' in args:
return result.stdout
elif 'styledPrint' in args:
return styled_output(result.stdout)
elif result.returncode == 0:
return True, f"<b>⏱️ Processing Time:</b> {duration:.0f} ms"
else:
return False
except Exception as e:
return f"Execution failed: {e}"
example_images = ['./VGGFace2/n000001/0002_01.jpg',
'./VGGFace2/n000149/0002_01.jpg',
'./VGGFace2/n000082/0001_02.jpg',
'./VGGFace2/n000148/0014_01.jpg',
'./VGGFace2/n000129/0001_01.jpg',
'./VGGFace2/n000394/0007_01.jpg',
]
example_images_auth = ['./VGGFace2/n000001/0013_01.jpg',
'./VGGFace2/n000149/0019_01.jpg',
'./VGGFace2/n000082/0003_03.jpg',
'./VGGFace2/n000148/0043_01.jpg',
'./VGGFace2/n000129/0006_01.jpg',
'./VGGFace2/n000394/0018_01.jpg',
]
def display_image(image):
return image
def load_rec_image():
return f'static/reconstructed.png'
def extract_emb(image, modelName=FRMODELS[0], mode=None):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.RandomHorizontalFlip(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
image = transform(image)
image = image.unsqueeze(0)
model = timm.create_model(f"hf_hub:{modelName}", pretrained=True).eval()
with torch.no_grad():
embs = model(image)
embs = F.normalize(embs, dim=1)
embs = embs.detach().numpy()
embs = embs.squeeze(0)
if mode != None:
np.savetxt(f'{mode}-emb.txt', embs.reshape(1, embs.shape[0]), fmt="%.6f", delimiter=',')
return embs
def get_selected_image(evt: gr.SelectData):
return example_images[evt.index]
def get_selected_image_auth(evt: gr.SelectData):
return example_images_auth[evt.index]
def styled_output(result):
if result.strip().lower() == "match":
return "<span style='color: green; font-weight: bold;'>βœ”οΈ Match</span>"
elif result.strip().lower() == "no match":
return "<span style='color: red; font-weight: bold;'>❌ No Match</span>"
else:
return "<span style='color: red; font-weight: bold;'>Error</span>"
with gr.Blocks() as demo:
gr.Markdown("# Biometric Recognition (1:1 matching) Using Fully Homomorphic Encryption (FHE)")
with gr.Row():
gr.Markdown("## Phase 1: Enrollment")
with gr.Row():
gr.Markdown("### Step 1: Upload or select a reference facial image for enrollment.")
with gr.Row():
with gr.Column():
image_input_enroll = gr.Image(label="Upload a reference facial image.", type="pil", sources="upload")
image_input_enroll.change(fn=resizeImage, inputs=image_input_enroll, outputs=image_input_enroll)
with gr.Column():
example_gallery = gr.Gallery(value=example_images, columns=3)
with gr.Column():
image_output_enroll = gr.Image(label="Reference facial image", sources="upload")
image_input_enroll.change(fn=display_image, inputs=image_input_enroll, outputs=image_output_enroll)
with gr.Row():
gr.Markdown("### Step 2: Generate reference embedding.")
with gr.Row():
with gr.Column():
modelName = gr.Dropdown(
choices=FRMODELS,
label="Choose a face recognition model"
)
with gr.Column():
example_gallery.select(fn=get_selected_image, inputs=None, outputs=image_input_enroll)
key_button = gr.Button("Generate embedding")
enroll_emb_text = gr.JSON(label="Reference embedding")
mode = gr.State("enroll")
key_button.click(fn=extract_emb, inputs=[image_input_enroll, modelName, mode], outputs=enroll_emb_text)
with gr.Row():
gr.HTML("<h3>Facial embeddings are <span style='color:red; font-weight:bold'>INVERTIBLE</span> and lead to the <span style='color:red; font-weight:bold'>RECONSTRUCTION</span> of their raw facial images.</h3>")
with gr.Row():
gr.Markdown("### Example:")
with gr.Row():
original_image = gr.Image(value="static/original.jpg", label="Original", sources="upload")
key_button = gr.Button("Generate embedding")
output_text = gr.JSON(label="Target embedding")
key_button.click(fn=extract_emb, inputs=[original_image, modelName], outputs=output_text)
btn = gr.Button("Reconstruct facial image")
Reconstructed_image = gr.Image(label="Reconstructed")
btn.click(fn=load_rec_image, outputs=Reconstructed_image)
with gr.Row():
gr.HTML("<h3>Facial embeddings protection is a <span style='color:red; font-weight:bold'>MUST!</span> At Suraksh.AI, we protect facial embeddings using FHE.</h3>")
with gr.Row():
gr.Markdown("### Step 3: πŸ” Generate the FHE public and secret keys.")
with gr.Row():
with gr.Column():
securityLevel = gr.Dropdown(
choices=SECURITYLEVELS,
label="Choose a security level"
)
with gr.Column():
key_button = gr.Button("Generate the FHE public and secret keys")
key_status = gr.Checkbox(label="FHE Public and Secret keys generated.", value=False)
time_output = gr.HTML()
key_button.click(fn=runBinFile, inputs=[gr.State("./bin/genKeys.bin"), securityLevel, gr.State("genkeys")], outputs=[key_status,time_output])
with gr.Row():
gr.Markdown("### Step 4: πŸ”’ Encrypt reference embedding using FHE.")
with gr.Row():
with gr.Column():
key_button = gr.Button("Encrypt")
key_status = gr.Checkbox(label="Reference embedding encrypted.", value=False)
time_output = gr.HTML()
key_button.click(fn=runBinFile, inputs=[gr.State("./bin/encReference.bin"), securityLevel, gr.State("encrypt")], outputs=[key_status,time_output])
with gr.Column():
key_button = gr.Button("Display")
output_text = gr.Text(label="Encrypted embedding", lines=3, interactive=False)
key_button.click(fn=runBinFile, inputs=[gr.State("./bin/encReference.bin"), securityLevel, gr.State("print")], outputs=output_text)
with gr.Row():
gr.Markdown("## Phase 2: Authentication")
with gr.Row():
gr.Markdown("### Step 1: Upload or select a probe facial image for authentication.")
with gr.Row():
with gr.Column():
image_input_auth = gr.Image(label="Upload a facial image.", type="pil", sources="upload")
image_input_auth.change(fn=resizeImage, inputs=image_input_auth, outputs=image_input_auth)
with gr.Column():
example_gallery = gr.Gallery(value=example_images_auth, columns=3)
with gr.Column():
image_output_auth = gr.Image(label="Probe facial image", sources="upload")
image_input_auth.change(fn=display_image, inputs=image_input_auth, outputs=image_output_auth)
with gr.Row():
gr.Markdown("### Step 2: Generate probe facial embedding.")
with gr.Row():
with gr.Column():
example_gallery.select(fn=get_selected_image_auth, inputs=None, outputs=image_input_auth)
key_button = gr.Button("Generate embedding")
enroll_emb_text = gr.JSON(label="Probe embedding")
mode = gr.State("auth")
key_button.click(fn=extract_emb, inputs=[image_input_auth, modelName, mode], outputs=enroll_emb_text)
with gr.Row():
gr.Markdown("### Step 3: πŸ”€ Generate protected probe embedding.")
with gr.Row():
with gr.Column():
key_button = gr.Button("Protect")
key_status = gr.Checkbox(label="Probe embedding protected.", value=False)
time_output = gr.HTML()
key_button.click(fn=runBinFile, inputs=[gr.State("./bin/encProbe.bin"), securityLevel, gr.State("encrypt")], outputs=[key_status,time_output])
with gr.Column():
key_button = gr.Button("Display")
output_text = gr.Text(label="Protected embedding", lines=3, interactive=False)
key_button.click(fn=runBinFile, inputs=[gr.State("./bin/encProbe.bin"), securityLevel, gr.State("print")], outputs=output_text)
with gr.Row():
gr.Markdown("### Step 4: πŸ”’ Compute biometric recognition decision using the threshold under FHE.")
with gr.Row():
gr.Markdown("### Set the recognition threshold.")
with gr.Row():
slider_threshold = gr.Slider(0, 512*5, step=1, value=133, label="Decision threshold", info="The higher the stricter.", interactive=True)
number_threshold = gr.Textbox(visible=False, value = '133')
slider_threshold.change(fn=lambda x: x, inputs=slider_threshold, outputs=number_threshold)
with gr.Row():
with gr.Column():
key_button = gr.Button("Biometric recognition under FHE")
key_status = gr.Checkbox(label="Recognition decision encrypted.", value=False)
time_output = gr.HTML()
key_button.click(fn=runBinFile, inputs=[gr.State("./bin/recDecision.bin"), securityLevel, gr.State("decision"), number_threshold], outputs=[key_status,time_output])
with gr.Column():
key_button = gr.Button("Display")
output_text = gr.Text(label="Encrypted decision", lines=3, interactive=False)
key_button.click(fn=runBinFile, inputs=[gr.State("./bin/recDecision.bin"), securityLevel, gr.State("print")], outputs=output_text)
with gr.Row():
gr.Markdown("### Step 5: πŸ”‘ Decrypt biometric recognition decision.")
with gr.Row():
with gr.Column(scale=1):
decision_button = gr.Button("Decrypt")
decision_status = gr.Checkbox(label="Recognition decision decrypted.", value=False)
time_output = gr.HTML()
decision_button.click(fn=runBinFile, inputs=[gr.State("./bin/decDecision.bin"), securityLevel, gr.State("decision")], outputs=[decision_status, time_output])
with gr.Column(scale=3):
with gr.Row():
check_button = gr.Button("Check")
with gr.Row():
with gr.Column(scale=1):
final_output = gr.HTML()
check_button.click(fn=runBinFile, inputs=[gr.State("./bin/decDecision.bin"), securityLevel, gr.State("styledPrint")], outputs=final_output)
with gr.Column(scale=1):
image_output_enroll = gr.Image(label="Reference", sources="upload")
image_input_enroll.change(fn=display_image, inputs=image_input_enroll, outputs=image_output_enroll)
with gr.Column(scale=1):
image_output_auth = gr.Image(label="Probe", sources="upload")
image_input_auth.change(fn=display_image, inputs=image_input_auth, outputs=image_output_auth)
demo.launch()