SkyReels_B / app.py
1inkusFace's picture
Update app.py
abdf73f verified
raw
history blame
3.36 kB
import spaces
import gradio as gr
import sys
import time
import os
import random
from PIL import Image
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ["SAFETENSORS_FAST_GPU"] = "1"
os.putenv("HF_HUB_ENABLE_HF_TRANSFER","1")
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Create the gr.State component *outside* the gr.Blocks context
global predictor
def init_predictor(task_type: str):
from skyreelsinfer import TaskType
from skyreelsinfer.offload import OffloadConfig
from skyreelsinfer.skyreels_video_infer import SkyReelsVideoInfer
from huggingface_hub.utils import RepositoryNotFoundError, RevisionNotFoundError, EntryNotFoundError
global predictor
try:
predictor = SkyReelsVideoInfer(
task_type=TaskType.I2V if task_type == "i2v" else TaskType.T2V,
model_id="Skywork/skyreels-v1-Hunyuan-i2v",
quant_model=True,
is_offload=True,
offload_config=OffloadConfig(
high_cpu_memory=True,
parameters_level=True,
),
use_multiprocessing=False,
)
return predictor
except (RepositoryNotFoundError, RevisionNotFoundError, EntryNotFoundError) as e:
return f"Error: Model not found. Details: {e}", None
except Exception as e:
return f"Error loading model: {e}", None
predictor = init_predictor('i2v')
@spaces.GPU(duration=80)
def generate_video(prompt, image, predictor):
from diffusers.utils import export_to_video
from diffusers.utils import load_image
if image == None:
return "Error: For i2v, provide image path.", "{}"
if not isinstance(prompt, str):
return "Error: No prompt.", "{}"
#if seed == -1:
random.seed(time.time())
seed = int(random.randrange(4294967294))
kwargs = {
"prompt": prompt,
"height": 256,
"width": 256,
"num_frames": 24,
"num_inference_steps": 30,
"seed": int(seed),
"guidance_scale": 7.0,
"embedded_guidance_scale": 1.0,
"negative_prompt": "bad quality, blur",
"cfg_for": False,
}
kwargs["image"] = load_image(image=image)
output = predictor.inference(kwargs)
frames = output
save_dir = f"./result/{task_type}"
os.makedirs(save_dir, exist_ok=True)
video_out_file = f"{save_dir}/{prompt[:100]}_{int(seed)}.mp4"
print(f"Generating video: {video_out_file}")
export_to_video(frames, video_out_file, fps=24)
return video_out_file
def display_image(file):
if file is not None:
return Image.open(file.name)
else:
return None
with gr.Blocks() as demo:
#predictor = gr.State({}) # Initialize as an empty dictionary
image_file = gr.File(label="Image Prompt (Required)", file_types=["image"])
image_file_preview = gr.Image(label="Image Prompt Preview", interactive=False)
prompt_textbox = gr.Text(label="Prompt")
generate_button = gr.Button("Generate")
output_video = gr.Video(label="Output Video")
image_file.change(
display_image,
inputs=[image_file],
outputs=[image_file_preview]
)
generate_button.click(
fn=generate_video,
inputs=[prompt_textbox, image_file, predictor],
outputs=[output_video],
)
demo.launch()