Update app_demo.py
Browse files- app_demo.py +59 -59
app_demo.py
CHANGED
@@ -12,8 +12,16 @@ import PIL.Image
|
|
12 |
import torch
|
13 |
from diffusers import StableDiffusionPipeline
|
14 |
import uuid
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
model_id = "Lykon/dreamshaper-xl-v2-turbo"
|
17 |
#DESCRIPTION = '''# Fast Stable Diffusion CPU with Latent Consistency Model
|
18 |
#Distilled from [Dreamshaper v7](https://huggingface.co/Lykon/dreamshaper-7) fine‑tune of SD v1-5.
|
19 |
#'''
|
@@ -22,33 +30,23 @@ model_id = "Lykon/dreamshaper-xl-v2-turbo"
|
|
22 |
MAX_SEED = np.iinfo(np.int32).max
|
23 |
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
|
24 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "768"))
|
25 |
-
|
|
|
26 |
api = HfApi()
|
27 |
executor = ThreadPoolExecutor()
|
28 |
model_cache = {}
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
|
45 |
# Load pipeline once, disabling NSFW filter at construction time
|
46 |
pipe = StableDiffusionPipeline.from_pretrained(
|
47 |
-
model_id,
|
48 |
-
safety_checker=None,
|
49 |
-
torch_dtype=DTYPE,
|
50 |
-
use_safetensors=True
|
51 |
-
).to("cpu")
|
52 |
|
53 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
54 |
if randomize_seed:
|
@@ -58,14 +56,21 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
58 |
def save_image(img, profile: gr.OAuthProfile | None, metadata: dict):
|
59 |
unique_name = str(uuid.uuid4()) + '.png'
|
60 |
img.save(unique_name)
|
|
|
|
|
61 |
return unique_name
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
def save_images(image_array, profile: gr.OAuthProfile | None, metadata: dict):
|
|
|
64 |
with ThreadPoolExecutor() as executor:
|
65 |
-
|
66 |
-
|
67 |
-
zip(image_array, [profile]*len(image_array), [metadata]*len(image_array))
|
68 |
-
))
|
69 |
|
70 |
def generate(
|
71 |
prompt: str,
|
@@ -82,7 +87,6 @@ def generate(
|
|
82 |
# prepare seed
|
83 |
seed = randomize_seed_fn(seed, randomize_seed)
|
84 |
torch.manual_seed(seed)
|
85 |
-
|
86 |
start_time = time.time()
|
87 |
# **Call the pipeline with only supported kwargs:**
|
88 |
outputs = pipe(
|
@@ -94,6 +98,7 @@ def generate(
|
|
94 |
num_inference_steps=num_inference_steps,
|
95 |
num_images_per_prompt=num_images,
|
96 |
output_type="pil",
|
|
|
97 |
).images
|
98 |
|
99 |
latency = time.time() - start_time
|
@@ -114,20 +119,6 @@ def generate(
|
|
114 |
|
115 |
return paths, seed
|
116 |
|
117 |
-
examples = [
|
118 |
-
"A futuristic cityscape at sunset",
|
119 |
-
"Steampunk airship over mountains",
|
120 |
-
"Portrait of a cyborg queen, hyper‑detailed",
|
121 |
-
]
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
|
132 |
|
133 |
|
@@ -233,9 +224,7 @@ with gr.Blocks() as demo:
|
|
233 |
with gr.Group():
|
234 |
with gr.Row():
|
235 |
prompt = gr.Text(
|
236 |
-
placeholder="Enter your prompt",
|
237 |
-
show_label=False,
|
238 |
-
container=False,
|
239 |
)
|
240 |
run_button = gr.Button("Run", scale=0)
|
241 |
gallery = gr.Gallery(
|
@@ -256,23 +245,34 @@ with gr.Blocks() as demo:
|
|
256 |
num_inference_steps = gr.Slider(1, 8, value=4, step=1, label="Inference Steps")
|
257 |
num_images = gr.Slider(1, 8, value=1, step=1, label="Number of Images")
|
258 |
|
259 |
-
gr.Examples(
|
260 |
-
examples=examples,
|
261 |
-
inputs=prompt,
|
262 |
-
outputs=gallery,
|
263 |
-
fn=generate,
|
264 |
-
cache_examples=CACHE_EXAMPLES,
|
265 |
-
)
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
demo.launch()
|
270 |
-
|
271 |
-
|
272 |
-
'''#!/usr/bin/env python
|
273 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
274 |
|
|
|
|
|
|
|
|
|
|
|
|
|
275 |
|
276 |
-
|
|
|
277 |
demo.launch()
|
278 |
-
|
|
|
12 |
import torch
|
13 |
from diffusers import StableDiffusionPipeline
|
14 |
import uuid
|
15 |
+
from diffusers import DiffusionPipeline
|
16 |
+
from tqdm import tqdm
|
17 |
+
from safetensors.torch import load_file
|
18 |
+
import gradio_user_history as gr_user_history
|
19 |
+
import cv2
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
|
|
|
25 |
#DESCRIPTION = '''# Fast Stable Diffusion CPU with Latent Consistency Model
|
26 |
#Distilled from [Dreamshaper v7](https://huggingface.co/Lykon/dreamshaper-7) fine‑tune of SD v1-5.
|
27 |
#'''
|
|
|
30 |
MAX_SEED = np.iinfo(np.int32).max
|
31 |
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
|
32 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "768"))
|
33 |
+
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
|
34 |
+
DTYPE = torch.float32 # torch.float16 works as well, but pictures seem to be a bit worse
|
35 |
api = HfApi()
|
36 |
executor = ThreadPoolExecutor()
|
37 |
model_cache = {}
|
38 |
|
39 |
+
#custom
|
40 |
+
model_id = "Lykon/dreamshaper-xl-v2-turbo"
|
41 |
+
custom_pipe = DiffusionPipeline.from_pretrained(mode_id, custom_pipeline="latent_consistency_txt2img", custom_revision="main")
|
42 |
+
#1st
|
43 |
+
pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", custom_pipeline="latent_consistency_txt2img", custom_revision="main")
|
44 |
+
pipe.to(torch_device="cpu", torch_dtype=DTYPE)
|
45 |
+
pipe.safety_checker = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
# Load pipeline once, disabling NSFW filter at construction time
|
48 |
pipe = StableDiffusionPipeline.from_pretrained(
|
49 |
+
model_id, safety_checker=None, torch_dtype=DTYPE, use_safetensors=True).to("cpu")
|
|
|
|
|
|
|
|
|
50 |
|
51 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
52 |
if randomize_seed:
|
|
|
56 |
def save_image(img, profile: gr.OAuthProfile | None, metadata: dict):
|
57 |
unique_name = str(uuid.uuid4()) + '.png'
|
58 |
img.save(unique_name)
|
59 |
+
gr_user_history.save_image(label=metadata["prompt"], image=img, profile=profile, metadata=metadata)
|
60 |
+
|
61 |
return unique_name
|
62 |
|
63 |
+
#def save_images(image_array, profile: gr.OAuthProfile | None, metadata: dict):
|
64 |
+
# with ThreadPoolExecutor() as executor:
|
65 |
+
# return list(executor.map(
|
66 |
+
# lambda args: save_image(*args),
|
67 |
+
# zip(image_array, [profile]*len(image_array), [metadata]*len(image_array))
|
68 |
+
# ))
|
69 |
def save_images(image_array, profile: gr.OAuthProfile | None, metadata: dict):
|
70 |
+
paths = []
|
71 |
with ThreadPoolExecutor() as executor:
|
72 |
+
paths = list(executor.map(save_image, image_array, [profile]*len(image_array), [metadata]*len(image_array)))
|
73 |
+
return paths
|
|
|
|
|
74 |
|
75 |
def generate(
|
76 |
prompt: str,
|
|
|
87 |
# prepare seed
|
88 |
seed = randomize_seed_fn(seed, randomize_seed)
|
89 |
torch.manual_seed(seed)
|
|
|
90 |
start_time = time.time()
|
91 |
# **Call the pipeline with only supported kwargs:**
|
92 |
outputs = pipe(
|
|
|
98 |
num_inference_steps=num_inference_steps,
|
99 |
num_images_per_prompt=num_images,
|
100 |
output_type="pil",
|
101 |
+
lcm_origin_steps=50,
|
102 |
).images
|
103 |
|
104 |
latency = time.time() - start_time
|
|
|
119 |
|
120 |
return paths, seed
|
121 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
|
124 |
|
|
|
224 |
with gr.Group():
|
225 |
with gr.Row():
|
226 |
prompt = gr.Text(
|
227 |
+
placeholder="Enter your prompt", show_label=False, container=False,
|
|
|
|
|
228 |
)
|
229 |
run_button = gr.Button("Run", scale=0)
|
230 |
gallery = gr.Gallery(
|
|
|
245 |
num_inference_steps = gr.Slider(1, 8, value=4, step=1, label="Inference Steps")
|
246 |
num_images = gr.Slider(1, 8, value=1, step=1, label="Number of Images")
|
247 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
248 |
|
249 |
+
with gr.Group():
|
250 |
+
with gr.Row():
|
251 |
+
prompt = gr.Text( label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, )
|
252 |
+
run_button = gr.Button("Run", scale=0)
|
253 |
+
result = gr.Gallery( label="Generated images", show_label=False, elem_id="gallery", grid=[2] )
|
254 |
+
with gr.Accordion("Advanced options", open=False):
|
255 |
+
seed = gr.Slider(label="Seed",minimum=0,maximum=MAX_SEED,step=1,value=0,randomize=True)
|
256 |
+
randomize_seed = gr.Checkbox(label="Randomize seed across runs", value=True)
|
257 |
+
with gr.Row():
|
258 |
+
width = gr.Slider( label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=512, )
|
259 |
+
height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=512,)
|
260 |
+
with gr.Row():
|
261 |
+
guidance_scale = gr.Slider(label="Guidance scale for base", minimum=2, maximum=14, step=0.1, value=8.0,)
|
262 |
+
num_inference_steps = gr.Slider(label="Number of inference steps for base", minimum=1, maximum=8, step=1, value=4,)
|
263 |
+
with gr.Row():
|
264 |
+
num_images = gr.Slider(label="Number of images", minimum=1, maximum=8, step=1, value=1, visible=True,)
|
265 |
+
with gr.Accordion("Past generations", open=False):
|
266 |
+
gr_user_history.render()
|
267 |
|
268 |
+
gr.on( triggers=[ prompt.submit, run_button.click, ],
|
269 |
+
fn=generate,
|
270 |
+
inputs=[prompt,seed,width,height,guidance_scale,num_inference_steps,num_images,randomize_seed ],
|
271 |
+
outputs=[result, seed],
|
272 |
+
api_name="run",
|
273 |
+
)
|
274 |
|
275 |
+
if __name__ == "__main__":
|
276 |
+
demo.queue(api_open=False)
|
277 |
demo.launch()
|
278 |
+
|