import gradio as gr import torch from torch import autocast from diffusers import StableDiffusionPipeline #model_id = "hakurei/waifu-diffusion" pipe = StableDiffusionPipeline.from_pretrained("nan2/lcbanner", torch_type=torch.float16) pipe = pipe.to("cuda") #torch.backends.cudnn.benchmark = True num_samples = 1 def infer(prompt): images = pipe([prompt] * num_samples, guidance_scale=7.5)["sample"] return images block = gr.Blocks() examples = [ [ 'Goku' ], [ 'Mikasa Ackerman' ], [ 'Saber' ], ] with block as demo: with gr.Group(): with gr.Box(): with gr.Row().style(mobile_collapse=False, equal_height=True): text = gr.Textbox( label="Enter your prompt", show_label=False, max_lines=1 ).style( border=(True, False, True, True), rounded=(True, False, False, True), container=False, ) btn = gr.Button("Run").style( margin=False, rounded=(False, True, True, False), ) gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="generated_id").style( grid=[1], width="2048px", height="512px" ) ex = gr.Examples(examples=examples, fn=infer, inputs=[text], outputs=gallery, cache_examples=True) ex.dataset.headers = [""] text.submit(infer, inputs=[text], outputs=gallery) btn.click(infer, inputs=[text], outputs=gallery) demo.queue(max_size=25).launch() #gr.Interface.load("models/nan2/lcbanner").launch()