Spaces:
Running
Running
import os | |
import gradio as gr | |
import cv2 | |
import numpy as np | |
from insightface.app import FaceAnalysis | |
import tempfile | |
from moviepy.editor import VideoFileClip | |
# Désactiver les avertissements d'Albumentations | |
os.environ["NO_ALBUMENTATIONS_UPDATE"] = "1" | |
# Rediriger les chemins problématiques | |
os.environ['MPLCONFIGDIR'] = '/tmp/matplotlib' | |
os.environ['FONTCONFIG_PATH'] = '/tmp/fontconfig' | |
os.makedirs('/tmp/matplotlib', exist_ok=True) | |
os.makedirs('/tmp/fontconfig', exist_ok=True) | |
# Forcer InsightFace à utiliser un répertoire accessible | |
os.environ['INSIGHTFACE_ROOT'] = '/tmp/.insightface' | |
os.environ["ORT_DISABLE_CUDA"] = "1" # Désactiver CUDA si GPU indisponible | |
def swap_face(source_face, target_face, frame): | |
src_emb = source_face.normed_embedding | |
tgt_bbox = target_face.bbox.astype(int) | |
resized_face = cv2.resize(source_face.img, (tgt_bbox[2] - tgt_bbox[0], tgt_bbox[3] - tgt_bbox[1])) | |
mask = np.zeros_like(resized_face) | |
center = (mask.shape[1] // 2, mask.shape[0] // 2) | |
radius = int(min(mask.shape) * 0.45) | |
cv2.circle(mask, center, radius, (255, 255, 255), -1) | |
mask = cv2.GaussianBlur(mask, (15, 15), 5) | |
center = ((tgt_bbox[0] + tgt_bbox[2]) // 2, (tgt_bbox[1] + tgt_bbox[3]) // 2) | |
result = cv2.seamlessClone(resized_face, frame, mask, center, cv2.NORMAL_CLONE) | |
return result | |
def process_video(source_img, target_video): | |
try: | |
face_app = FaceAnalysis(name="buffalo_l", root="/tmp/.insightface") | |
face_app.prepare(ctx_id=0, det_size=(640, 640)) | |
source_faces = face_app.get(source_img) | |
if not source_faces: | |
raise ValueError("Aucun visage trouvé dans l'image source.") | |
source_face = source_faces[0] | |
temp_output = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) | |
cap = cv2.VideoCapture(target_video) | |
fps = cap.get(cv2.CAP_PROP_FPS) | |
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
fourcc = cv2.VideoWriter_fourcc(*'avc1') # Codec compatible H.264 | |
out = cv2.VideoWriter(temp_output.name, fourcc, fps, (frame_width, frame_height)) | |
while True: | |
ret, frame = cap.read() | |
if not ret: | |
break | |
target_faces = face_app.get(frame) | |
for face in target_faces: | |
frame = swap_face(source_face, face, frame) | |
out.write(frame) | |
cap.release() | |
out.release() | |
print(f"Taille de la vidéo temporaire : {os.path.getsize(temp_output.name)} octets") | |
# Réencodage final pour compatibilité Gradio | |
clip = VideoFileClip(temp_output.name) | |
final_path = tempfile.mktemp(suffix=".mp4") | |
clip.write_videofile(final_path, codec="libx264", audio_codec="aac", verbose=False, logger=None) | |
return final_path | |
except Exception as e: | |
print(f"Erreur lors du traitement : {str(e)}") | |
return None | |
# Interface Gradio | |
demo = gr.Interface( | |
fn=process_video, | |
inputs=[ | |
gr.Image(label="Visage Source", type="numpy"), | |
gr.Video(label="Vidéo Cible"), | |
], | |
outputs=gr.Video(label="Vidéo Résultat"), | |
title="🎬 FaceSwap Pro", | |
description="Échangez des visages dans une vidéo.", | |
allow_flagging="never" | |
) | |
if __name__ == "__main__": | |
demo.launch(share=True) |