Spaces:
Sleeping
Sleeping
File size: 3,153 Bytes
d888b34 de2634a 51e2b89 626beb9 de2634a 626beb9 51e2b89 3724ecf de2634a 3724ecf 51e2b89 de2634a 51e2b89 de2634a 51e2b89 de2634a 51e2b89 de2634a 7d8ff14 de2634a 7d8ff14 de2634a 7d8ff14 de2634a 7d8ff14 2c3ff20 de2634a 7d8ff14 de2634a 51e2b89 de2634a 51e2b89 de2634a a2e411b b7317e4 de2634a a2e411b de2634a 84927b1 de2634a 84927b1 de2634a 84927b1 a2e411b 84927b1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import cv2
import numpy as np
import gradio as gr
from mtcnn import MTCNN
from tensorflow.keras.models import load_model
from tensorflow.keras.applications.xception import preprocess_input as xcp_pre
from tensorflow.keras.applications.efficientnet import preprocess_input as eff_pre
from huggingface_hub import hf_hub_download
# Load models
xcp_path = hf_hub_download(repo_id="Zeyadd-Mostaffa/deepfake-image-detector_final", filename="xception_model.h5")
eff_path = hf_hub_download(repo_id="Zeyadd-Mostaffa/deepfake-image-detector_final", filename="efficientnet_model.h5")
xcp_model = load_model(xcp_path)
eff_model = load_model(eff_path)
# Load face detector
detector = MTCNN()
def expand_box(x, y, w, h, scale=1.5, img_shape=None):
"""Expand face bounding box with margin."""
cx, cy = x + w // 2, y + h // 2
new_w, new_h = int(w * scale), int(h * scale)
x1 = max(0, cx - new_w // 2)
y1 = max(0, cy - new_h // 2)
x2 = min(img_shape[1], cx + new_w // 2)
y2 = min(img_shape[0], cy + new_h // 2)
return x1, y1, x2, y2
def predict(image):
faces = detector.detect_faces(image)
if not faces:
return "No face detected", image
output_image = image.copy()
results = []
for idx, face in enumerate(faces):
x, y, w, h = face['box']
# Add 20% margin while staying inside bounds
margin = 0.2
img_h, img_w = image.shape[:2]
x = max(0, int(x - w * margin))
y = max(0, int(y - h * margin))
w = int(w * (1 + 2 * margin))
h = int(h * (1 + 2 * margin))
x2 = min(img_w, x + w)
y2 = min(img_h, y + h)
face_img = image[y:y2, x:x2]
# Resize + preprocess
face_xcp = cv2.resize(face_img, (299, 299))
face_eff = cv2.resize(face_img, (224, 224))
xcp_tensor = xcp_pre(face_xcp.astype(np.float32))[np.newaxis, ...]
eff_tensor = eff_pre(face_eff.astype(np.float32))[np.newaxis, ...]
# Predictions
pred_xcp = xcp_model.predict(xcp_tensor, verbose=0).flatten()[0]
pred_eff = eff_model.predict(eff_tensor, verbose=0).flatten()[0]
avg = (pred_xcp + pred_eff) / 2
label = "Real" if avg > 0.41 else "Fake"
color = (0, 255, 0) if label == "Real" else (0, 0, 255)
# Annotate image
cv2.rectangle(output_image, (x, y), (x2, y2), color, 2)
cv2.putText(output_image, f"{label} ({avg:.2f})", (x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
results.append(f"Face {idx+1}: {label} (Avg: {avg:.3f}, XCP: {pred_xcp:.3f}, EFF: {pred_eff:.3f})")
return "\n".join(results), output_image
# Gradio Interface
interface = gr.Interface(
fn=predict,
inputs=gr.Image(type="filepath", label="Upload Image"), # ✅ EDITED
outputs=[
gr.Textbox(label="Predictions"),
gr.Image(type="numpy", label="Annotated Image"),
],
title="Deepfake Detector (Multi-Face Ensemble)",
description="Detects all faces in an image and classifies each one as real or fake using Xception and EfficientNetB4 ensemble.",
api_name="/predict" # ✅ ADDED
)
interface.launch()
|