Spaces:
Running
Running
import os | |
import cv2 | |
import glob | |
import shutil | |
import numpy as np | |
import gradio as gr | |
from moviepy.editor import VideoFileClip, AudioFileClip, CompositeAudioClip | |
from insightface.app import FaceAnalysis | |
def video_to_images(video_path, images_path): | |
cam = cv2.VideoCapture(video_path) | |
fps = cam.get(cv2.CAP_PROP_FPS) | |
clip = VideoFileClip(video_path) | |
clip.audio.write_audiofile("./audio.mp3") | |
if os.path.exists(images_path): | |
shutil.rmtree(images_path) | |
os.makedirs(images_path) | |
frame_num = 0 | |
while True: | |
ret, frame = cam.read() | |
if not ret: | |
break | |
filename = os.path.join(images_path, f"{frame_num:05}.png") | |
cv2.imwrite(filename, frame, [int(cv2.IMWRITE_PNG_COMPRESSION), 0]) | |
frame_num += 1 | |
cam.release() | |
return fps | |
def blur_faces_in_images(images_path): | |
faceapp = FaceAnalysis(name="buffalo_l", providers=["CPUExecutionProvider"]) | |
faceapp.prepare(ctx_id=0) | |
for img_path in sorted(glob.glob(os.path.join(images_path, "*.png"))): | |
img = cv2.imread(img_path) | |
faces = faceapp.get(img) | |
for face in faces: | |
x1, y1, x2, y2 = list(map(int, face.bbox)) | |
face_crop = img[y1:y2, x1:x2] | |
img[y1:y2, x1:x2] = cv2.GaussianBlur(face_crop, (99, 99), 30) | |
cv2.imwrite(img_path, img) | |
def images_to_video(images_path, output_path, fps): | |
images = sorted(glob.glob(os.path.join(images_path, "*.png"))) | |
frame = cv2.imread(images[0]) | |
height, width, _ = frame.shape | |
temp_path = "temp_video.avi" | |
fourcc = cv2.VideoWriter_fourcc(*'XVID') | |
out = cv2.VideoWriter(temp_path, fourcc, fps, (width, height)) | |
for img_path in images: | |
frame = cv2.imread(img_path) | |
out.write(frame) | |
out.release() | |
video = VideoFileClip(temp_path) | |
audio = AudioFileClip("audio.mp3") | |
final = video.set_audio(audio) | |
final.write_videofile(output_path, codec="libx264", audio_codec="aac") | |
os.remove(temp_path) | |
os.remove("audio.mp3") | |
def process(video): | |
input_path = "input.mp4" | |
output_path = "output.mp4" | |
frames_dir = "frames" | |
shutil.copy(video, input_path) # β Fix: copy the uploaded file | |
fps = video_to_images(input_path, frames_dir) | |
blur_faces_in_images(frames_dir) | |
images_to_video(frames_dir, output_path, fps) | |
shutil.rmtree(frames_dir) | |
return output_path | |
demo = gr.Interface( | |
fn=process, | |
inputs=gr.Video(label="Upload Video"), | |
outputs=gr.Video(label="Faces Blurred"), | |
title="Face Blur App", | |
description="Detects and blurs all faces in your video using InsightFace." | |
) | |
if __name__ == "__main__": | |
demo.launch() | |