|
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler |
|
from utils import write_video, dummy, preprocess_image, preprocess_mask_image |
|
from PIL import Image |
|
import gradio as gr |
|
import torch |
|
import os |
|
os.environ["CUDA_VISIBLE_DEVICES"]="0" |
|
|
|
|
|
orig_prompt = "Ancient underground architectural ruins of Hong Kong in a flooded apocalypse landscape of dead skyscrapers" |
|
orig_negative_prompt = "lurry, bad art, blurred, text, watermark" |
|
model_list = ["stabilityai/stable-diffusion-2-inpainting", "runwayml/stable-diffusion-inpainting"] |
|
|
|
def stable_diffusion_zoom_out( |
|
repo_id, |
|
original_prompt, |
|
negative_prompt, |
|
step_size, |
|
num_frames, |
|
fps, |
|
num_inference_steps |
|
): |
|
|
|
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.float16) |
|
pipe.set_use_memory_efficient_attention_xformers(True) |
|
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) |
|
pipe = pipe.to("cuda") |
|
pipe.safety_checker = dummy |
|
|
|
new_image = Image.new(mode="RGBA", size=(512,512)) |
|
current_image, mask_image = preprocess_mask_image(new_image) |
|
|
|
current_image = pipe(prompt=[original_prompt], negative_prompt=[negative_prompt], image=current_image, mask_image=mask_image, num_inference_steps=num_inference_steps).images[0] |
|
|
|
|
|
all_frames = [] |
|
all_frames.append(current_image) |
|
|
|
for i in range(num_frames): |
|
prev_image = preprocess_image(current_image, step_size, 512) |
|
current_image = prev_image |
|
current_image, mask_image = preprocess_mask_image(current_image) |
|
current_image = pipe(prompt=[original_prompt], negative_prompt=[negative_prompt], image=current_image, mask_image=mask_image, num_inference_steps=num_inference_steps).images[0] |
|
|
|
current_image.paste(prev_image, mask=prev_image) |
|
all_frames.append(current_image) |
|
|
|
save_path = "output.mp4" |
|
write_video(save_path, all_frames, fps=fps) |
|
return save_path |
|
|
|
|
|
inputs = [ |
|
gr.Dropdown(model_list, value=model_list[0], label="Model"), |
|
gr.inputs.Textbox(lines=5, default=orig_prompt, label="Prompt"), |
|
gr.inputs.Textbox(lines=1, default=orig_negative_prompt, label="Negative Prompt"), |
|
gr.inputs.Slider(minimum=1, maximum=120, default=25, step=5, label="Steps"), |
|
gr.inputs.Slider(minimum=1, maximum=100, default=10, step=5, label="Frames"), |
|
gr.inputs.Slider(minimum=1, maximum=100, default=16, step=1, label="FPS"), |
|
gr.inputs.Slider(minimum=1, maximum=100, default=15, step=1, label="Inference Steps") |
|
] |
|
|
|
output = gr.outputs.Video() |
|
examples = [ |
|
["stabilityai/stable-diffusion-2-inpainting", orig_prompt, orig_negative_prompt, 25, 10, 16, 15], |
|
] |
|
|
|
title = "Stable Diffusion Infinite Zoom Out" |
|
description = """<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. |
|
<br/> |
|
<a href="https://huggingface.co/spaces/kadirnar/stable-diffusion-2-infinite-zoom-out?duplicate=true"> |
|
<img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> |
|
<p/>""" |
|
|
|
demo_app = gr.Interface( |
|
fn=stable_diffusion_zoom_out, |
|
description=description, |
|
inputs=inputs, |
|
outputs=output, |
|
title=title, |
|
theme='huggingface', |
|
examples=examples, |
|
cache_examples=True |
|
) |
|
demo_app.launch(debug=True, enable_queue=True) |
|
|