File size: 1,902 Bytes
8e246a4
f0eb267
d1d6be5
f0eb267
1cd1906
d1d6be5
 
f0eb267
 
d1d6be5
f0eb267
 
 
8e246a4
f0eb267
 
1cd1906
f0eb267
 
 
 
 
 
 
 
 
1cd1906
f0eb267
1cd1906
f0eb267
 
1cd1906
 
 
f0eb267
d1d6be5
f0eb267
1cd1906
f0eb267
 
1cd1906
d1d6be5
f0eb267
 
d1d6be5
f0eb267
 
 
1cd1906
f0eb267
 
d1d6be5
f0eb267
1cd1906
d1d6be5
f0eb267
 
 
 
d1d6be5
f0eb267
 
d1d6be5
f0eb267
1cd1906
 
 
8e246a4
 
f0eb267
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import gradio as gr
import tensorflow as tf
import numpy as np
import tempfile
import cv2
import os

# Load model
model = tf.keras.models.load_model("deepfake_model.h5")

# Frame extractor (extracts 9 evenly spaced frames)
def extract_frames(video_path, num_frames=9):
    cap = cv2.VideoCapture(video_path)
    frames = []
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    interval = max(total_frames // num_frames, 1)

    count = 0
    while len(frames) < num_frames and cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        if count % interval == 0:
            frame = cv2.resize(frame, (224, 224))
            frames.append(frame)
        count += 1

    cap.release()

    while len(frames) < num_frames:
        frames.append(np.zeros((224, 224, 3), dtype=np.uint8))

    return np.array(frames)

# Prediction function
def predict_video(video):
    # Save uploaded video to temp file
    with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_video_file:
        with open(video, "rb") as f:
            temp_video_file.write(f.read())
        temp_video_path = temp_video_file.name

    # Extract frames
    frames = extract_frames(temp_video_path)

    # Normalize and expand dims for batch input
    frames = frames / 255.0
    input_data = np.expand_dims(frames, axis=0)

    # Predict
    prediction = model.predict(input_data)[0][0]

    # Clean up
    os.remove(temp_video_path)

    # Return result
    result = "FAKE" if prediction > 0.5 else "REAL"
    confidence = f"{prediction:.2f}" if result == "FAKE" else f"{1 - prediction:.2f}"
    return f"Prediction: {result} (Confidence: {confidence})"

# Gradio UI
iface = gr.Interface(
    fn=predict_video,
    inputs=gr.Video(),
    outputs="text",
    title="Deepfake Detection",
    description="Upload a video to detect whether it's real or fake."
)

iface.launch()