import gradio as gr import numpy as np import random from diffusers import DiffusionPipeline from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder, OVBaseModel, OVStableDiffusionPipeline import torch from huggingface_hub import snapshot_download import openvino.runtime as ov from typing import Optional, Dict from diffusers import EulerAncestralDiscreteScheduler, LCMScheduler, DDIMScheduler #EulerDiscreteScheduler 尚可 #EulerAncestralDiscreteScheduler model_id = "hsuwill000/LCM-absolutereality-openvino-8bit" #model_id = "spamsoms/LCM-anything-v5-openvino2" #adapter_id = "latent-consistency/lcm-lora-sdv1-5" HIGH=1024 WIDTH=512 batch_size = -1 pipe = OVStableDiffusionPipeline.from_pretrained( model_id, compile = False, ov_config = {"CACHE_DIR":""}, torch_dtype=torch.uint8, #variant="fp16", #torch_dtype=torch.IntTensor, #慢, safety_checker=None, use_safetensors=False, ) print(pipe.scheduler.compatibles) pipe.reshape( batch_size=-1, height=HIGH, width=WIDTH, num_images_per_prompt=1) pipe.compile() prompt="" negative_prompt="EasyNegative, (animal, pet), close up," def infer(prompt,negative_prompt): image = pipe( prompt = prompt, negative_prompt = negative_prompt, width = WIDTH, height = HIGH, guidance_scale=1.0, num_inference_steps=6, num_images_per_prompt=1, ).images[0] return image css=""" #col-container { margin: 0 auto; max-width: 520px; } """ power_device = "CPU" with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.Markdown(f""" # {model_id.split('/')[1]} {WIDTH}x{HIGH} Currently running on {power_device}. """) with gr.Row(): prompt = gr.Text( label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, ) run_button = gr.Button("Run", scale=0) result = gr.Image(label="Result", show_label=False) run_button.click( fn = infer, inputs = [prompt], outputs = [result] ) demo.queue().launch()