File size: 1,237 Bytes
f16af8d 5e0a11c f16af8d 73ff6df f16af8d d2de447 f16af8d 5e0a11c f16af8d 73ff6df 9f27b89 a094439 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import gradio as gr
import torch
from diffusers import AutoPipelineForText2Image
import time
USE_TORCH_COMPILE = False
dtype = torch.float16
device = torch.device("cuda:0")
pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", variant="fp16", torch_dtype=dtype)
pipeline.to(device)
if USE_TORCH_COMPILE:
pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True)
def generate(num_images_per_prompt: int = 1):
prompt = 77 * "a"
num_inference_steps = 40
start_time = time.time()
pipeline(prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=num_inference_steps).images
end_time = time.time()
print(f"For {num_inference_steps} steps", end_time - start_time)
print("Avg per step", (end_time - start_time) / num_inference_steps)
with gr.Blocks(css="style.css") as demo:
batch_size = gr.Slider(
label="Batch size",
minimum=0,
maximum=16,
step=1,
value=1,
)
btn = gr.Button("Benchmark!").style(
margin=False,
rounded=(False, True, True, False),
full_width=False,
)
btn.click(fn=generate, inputs=[batch_size])
demo.launch()
|