File size: 2,486 Bytes
93b0d61 56c152f 210813d c8fdbc2 210813d c8fdbc2 93b0d61 6904e5b 8267623 e1e9b2f 9fa0ffc 6ac807a 93b0d61 c8fdbc2 99658c9 b4aea0f 93b0d61 c8fdbc2 93b0d61 3587027 93b0d61 4265f46 93b0d61 c082d19 c8fdbc2 93b0d61 adb6112 93b0d61 00afe6e 93b0d61 8267623 93b0d61 c8fdbc2 93b0d61 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
import gradio as gr
import numpy as np
import random
from diffusers import DiffusionPipeline
from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder, OVBaseModel, OVStableDiffusionPipeline
import torch
from huggingface_hub import snapshot_download
import openvino.runtime as ov
from typing import Optional, Dict
from diffusers import EulerAncestralDiscreteScheduler, LCMScheduler, DDIMScheduler
#EulerDiscreteScheduler 尚可
#EulerAncestralDiscreteScheduler
model_id = "hsuwill000/LCM-absolutereality-openvino-8bit"
#model_id = "spamsoms/LCM-anything-v5-openvino2"
#adapter_id = "latent-consistency/lcm-lora-sdv1-5"
HIGH=1024
WIDTH=512
batch_size = -1
pipe = OVStableDiffusionPipeline.from_pretrained(
model_id,
compile = False,
ov_config = {"CACHE_DIR":""},
torch_dtype=torch.uint8,
#variant="fp16",
#torch_dtype=torch.IntTensor, #慢,
safety_checker=None,
use_safetensors=False,
)
print(pipe.scheduler.compatibles)
pipe.reshape( batch_size=-1, height=HIGH, width=WIDTH, num_images_per_prompt=1)
pipe.compile()
prompt=""
negative_prompt="EasyNegative, cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly,"
def infer(prompt,negative_prompt):
image = pipe(
prompt = prompt+",hyper-realistic 2K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic, ",
negative_prompt = negative_prompt,
width = WIDTH,
height = HIGH,
guidance_scale=1.0,
num_inference_steps=6,
num_images_per_prompt=1,
).images[0]
return image
css="""
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
power_device = "CPU"
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# {model_id.split('/')[1]} {WIDTH}x{HIGH}
Currently running on {power_device}.
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
run_button.click(
fn = infer,
inputs = [prompt],
outputs = [result]
)
demo.queue().launch() |