Spaces:
Running
Running
File size: 1,713 Bytes
fa2c889 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import gradio as gr
import cv2
from moviepy.editor import VideoFileClip, ImageSequenceClip
import numpy as np
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import load_image
# Load the anime-style model
pipe = AutoPipelineForImage2Image.from_pretrained(
"nitrosocke/Arcane-Diffusion",
safety_checker=None,
)
pipe.to("cuda")
# Function to process a single frame
def process_frame(frame, prompt):
# Convert frame from BGR (OpenCV) to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Load the frame as an image for the model
image = load_image(frame)
# Apply the anime-style transformation
result = pipe(prompt=prompt, image=image, strength=0.75).images[0]
return np.array(result)
# Function to convert the entire video
def video_to_anime(video_path, prompt="Arcane style"):
# Load the video and extract frames
clip = VideoFileClip(video_path)
frames = [frame for frame in clip.iter_frames()]
# Process each frame with the anime-style model
processed_frames = [process_frame(frame, prompt) for frame in frames]
# Reassemble the processed frames into a video
new_clip = ImageSequenceClip(processed_frames, fps=clip.fps)
output_path = "output.mp4"
new_clip.write_videofile(output_path, codec="libx264")
return output_path
# Create the Gradio interface
iface = gr.Interface(
fn=video_to_anime,
inputs=[
gr.Video(label="Input Video"),
gr.Textbox(label="Style Prompt", default="Arcane style")
],
outputs=gr.Video(label="Output Video"),
title="Video to Anime Converter",
description="Upload a video and convert it to anime style!"
)
# Launch the interface
iface.launch() |