charliebaby2023 commited on
Commit
aa274d7
·
verified ·
1 Parent(s): 8af7126

Update app_demo.py

Browse files
Files changed (1) hide show
  1. app_demo.py +56 -148
app_demo.py CHANGED
@@ -7,16 +7,13 @@ import gradio as gr
7
  import numpy as np
8
  import PIL.Image
9
  import torch
10
- #from diffusers import DiffusionPipeline
11
  from diffusers import StableDiffusionPipeline
12
- from tqdm import tqdm
13
- from safetensors.torch import load_file
14
  from concurrent.futures import ThreadPoolExecutor
15
  import uuid
16
- #import cv2
17
- model_id = "Lykon/dreamshaper-xl-v2-turbo" #"Lykon/dreamshaper-7" #"openskyml/lcm-lora-sdxl-turbo" #"SimianLuo/LCM_Dreamshaper_v7"
18
  DESCRIPTION = '''# Fast Stable Diffusion CPU with Latent Consistency Model
19
- Distilled from [Dreamshaper v7](https://huggingface.co/Lykon/dreamshaper-7) fine-tune of [Stable Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) with only 4,000 training iterations (~32 A100 GPU Hours). [Project page](https://latent-consistency-models.github.io)
20
  '''
21
  if not torch.cuda.is_available():
22
  DESCRIPTION += "\n<p>running on CPU.</p>"
@@ -24,44 +21,15 @@ if not torch.cuda.is_available():
24
  MAX_SEED = np.iinfo(np.int32).max
25
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
26
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "768"))
27
- USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
28
- DTYPE = torch.float32 # torch.float16 works as well, but pictures seem to be a bit worse
29
-
30
- #pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", custom_pipeline="latent_consistency_txt2img", custom_revision="main")
31
 
32
-
33
- #"SimianLuo/LCM_Dreamshaper_v7"
34
- '''
35
- pipe = DiffusionPipeline.from_pretrained( model_id , custom_pipeline=model_id,
36
- custom_revision="main",
37
- low_cpu_mem_usage=True,
38
- safety_checker=None, # Disable NSFW filter
39
- requires_safety_checker=False, # Skip warning
40
  use_safetensors=True
41
- )
42
- #pipe.to(torch_device="cpu",torch_dtype="float16", torch_dtype=DTYPE)
43
- pipe.to(torch_dtype="float32" )
44
- pipe.to("cpu")
45
- '''
46
-
47
- #from diffusers import StableDiffusionPipeline
48
-
49
-
50
-
51
- pipe = StableDiffusionPipeline.from_pretrained(model_id, safety_checker= None)
52
- prompt = "A futuristic cityscape at sunset"
53
- output = pipe(
54
- prompt=prompt,
55
- negative_prompt="", # ← prevents added_cond_kwargs from being None
56
- #num_inference_steps=50, # adjust as you like
57
- #guidance_scale=7.5 # ditto
58
- requires_safety_checker=False
59
-
60
- )
61
- image = output.images[0]
62
- #image = pipe(prompt).images[0]
63
- image.show()
64
-
65
 
66
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
67
  if randomize_seed:
@@ -71,14 +39,14 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
71
  def save_image(img, profile: gr.OAuthProfile | None, metadata: dict):
72
  unique_name = str(uuid.uuid4()) + '.png'
73
  img.save(unique_name)
74
- #gr_user_history.save_image(label=metadata["prompt"], image=img, profile=profile, metadata=metadata)
75
  return unique_name
76
 
77
  def save_images(image_array, profile: gr.OAuthProfile | None, metadata: dict):
78
- paths = []
79
  with ThreadPoolExecutor() as executor:
80
- paths = list(executor.map(save_image, image_array, [profile]*len(image_array), [metadata]*len(image_array)))
81
- return paths
 
 
82
 
83
  def generate(
84
  prompt: str,
@@ -91,143 +59,83 @@ def generate(
91
  randomize_seed: bool = False,
92
  progress = gr.Progress(track_tqdm=True),
93
  profile: gr.OAuthProfile | None = None,
94
- ) -> PIL.Image.Image:
 
95
  seed = randomize_seed_fn(seed, randomize_seed)
96
  torch.manual_seed(seed)
 
97
  start_time = time.time()
98
- result = pipe(
 
99
  prompt=prompt,
100
- width=width,
101
  height=height,
 
102
  guidance_scale=guidance_scale,
103
- negative_prompt="",
104
- safety_checker= None,
105
- requires_safety_checker=False,
106
  num_inference_steps=num_inference_steps,
107
  num_images_per_prompt=num_images,
108
- lcm_origin_steps=50,
109
  output_type="pil",
110
  ).images
111
- paths = save_images(result, profile, metadata={"prompt": prompt, "seed": seed, "width": width, "height": height, "guidance_scale": guidance_scale, "num_inference_steps": num_inference_steps})
112
- print(time.time() - start_time)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  return paths, seed
114
 
115
  examples = [
116
- "portrait photo of a girl, photograph, highly detailed face, depth of field, moody light, golden hour, style by Dan Winters, Russell James, Steve McCurry, centered, extremely detailed, Nikon D850, award winning photography",
117
- "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k",
118
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
119
- "A photo of beautiful mountain with realistic sunset and blue lake, highly detailed, masterpiece",
120
  ]
121
 
122
  with gr.Blocks(css="style.css") as demo:
123
  gr.Markdown(DESCRIPTION)
124
- gr.DuplicateButton(
125
- value="Duplicate Space for private use",
126
- elem_id="duplicate-button",
127
- visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
128
- )
129
- gr.HTML(
130
- f"""
131
- <p id="project-links" align="center">
132
- <a href='https://huggingface.co/spaces/charliebaby2023/Fast_Stable_diffusion_CPU/edit/main/app_demo.py'>Edit this py file</a>
133
- </p>
134
- """
135
- )
136
  with gr.Group():
137
  with gr.Row():
138
  prompt = gr.Text(
139
- label="Prompt",
140
- show_label=False,
141
- max_lines=1,
142
  placeholder="Enter your prompt",
 
143
  container=False,
144
  )
145
  run_button = gr.Button("Run", scale=0)
146
- result = gr.Gallery(
147
- label="Generated images", show_label=False, elem_id="gallery", grid=[2]
 
 
 
148
  )
 
149
  with gr.Accordion("Advanced options", open=False):
150
- seed = gr.Slider(
151
- label="Seed",
152
- minimum=0,
153
- maximum=MAX_SEED,
154
- step=1,
155
- value=0,
156
- randomize=True
157
- )
158
  randomize_seed = gr.Checkbox(label="Randomize seed across runs", value=True)
159
  with gr.Row():
160
- width = gr.Slider(
161
- label="Width",
162
- minimum=256,
163
- maximum=MAX_IMAGE_SIZE,
164
- step=32,
165
- value=512,
166
- )
167
- height = gr.Slider(
168
- label="Height",
169
- minimum=256,
170
- maximum=MAX_IMAGE_SIZE,
171
- step=32,
172
- value=512,
173
- )
174
  with gr.Row():
175
- guidance_scale = gr.Slider(
176
- label="Guidance scale for base",
177
- minimum=2,
178
- maximum=14,
179
- step=0.1,
180
- value=8.0,
181
- )
182
- num_inference_steps = gr.Slider(
183
- label="Number of inference steps for base",
184
- minimum=1,
185
- maximum=8,
186
- step=1,
187
- value=4,
188
- )
189
- with gr.Row():
190
- num_images = gr.Slider(
191
- label="Number of images",
192
- minimum=1,
193
- maximum=8,
194
- step=1,
195
- value=1,
196
- visible=True,
197
- )
198
 
199
- with gr.Accordion("Past generations", open=False):
200
- tr = gr.Textbox(value="ol")
201
-
202
  gr.Examples(
203
  examples=examples,
204
  inputs=prompt,
205
- outputs=result,
206
  fn=generate,
207
  cache_examples=CACHE_EXAMPLES,
208
  )
209
 
210
- gr.on(
211
- triggers=[
212
- prompt.submit,
213
- run_button.click,
214
- ],
215
- fn=generate,
216
- inputs=[
217
- prompt,
218
- seed,
219
- width,
220
- height,
221
- guidance_scale,
222
- num_inference_steps,
223
- num_images,
224
- randomize_seed
225
- ],
226
- outputs=[result, seed],
227
- api_name="run",
228
- )
229
-
230
- if __name__ == "__main__":
231
- demo.queue(api_open=False)
232
- # demo.queue(max_size=20).launch()
233
  demo.launch()
 
7
  import numpy as np
8
  import PIL.Image
9
  import torch
 
10
  from diffusers import StableDiffusionPipeline
 
 
11
  from concurrent.futures import ThreadPoolExecutor
12
  import uuid
13
+
14
+ model_id = "Lykon/dreamshaper-xl-v2-turbo"
15
  DESCRIPTION = '''# Fast Stable Diffusion CPU with Latent Consistency Model
16
+ Distilled from [Dreamshaper v7](https://huggingface.co/Lykon/dreamshaper-7) finetune of SD v1-5.
17
  '''
18
  if not torch.cuda.is_available():
19
  DESCRIPTION += "\n<p>running on CPU.</p>"
 
21
  MAX_SEED = np.iinfo(np.int32).max
22
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
23
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "768"))
24
+ DTYPE = torch.float32
 
 
 
25
 
26
+ # Load pipeline once, disabling NSFW filter at construction time
27
+ pipe = StableDiffusionPipeline.from_pretrained(
28
+ model_id,
29
+ safety_checker=None,
30
+ torch_dtype=DTYPE,
 
 
 
31
  use_safetensors=True
32
+ ).to("cpu")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
35
  if randomize_seed:
 
39
  def save_image(img, profile: gr.OAuthProfile | None, metadata: dict):
40
  unique_name = str(uuid.uuid4()) + '.png'
41
  img.save(unique_name)
 
42
  return unique_name
43
 
44
  def save_images(image_array, profile: gr.OAuthProfile | None, metadata: dict):
 
45
  with ThreadPoolExecutor() as executor:
46
+ return list(executor.map(
47
+ lambda args: save_image(*args),
48
+ zip(image_array, [profile]*len(image_array), [metadata]*len(image_array))
49
+ ))
50
 
51
  def generate(
52
  prompt: str,
 
59
  randomize_seed: bool = False,
60
  progress = gr.Progress(track_tqdm=True),
61
  profile: gr.OAuthProfile | None = None,
62
+ ) -> tuple[list[str], int]:
63
+ # prepare seed
64
  seed = randomize_seed_fn(seed, randomize_seed)
65
  torch.manual_seed(seed)
66
+
67
  start_time = time.time()
68
+ # **Call the pipeline with only supported kwargs:**
69
+ outputs = pipe(
70
  prompt=prompt,
71
+ negative_prompt="", # required to avoid NoneType in UNet
72
  height=height,
73
+ width=width,
74
  guidance_scale=guidance_scale,
 
 
 
75
  num_inference_steps=num_inference_steps,
76
  num_images_per_prompt=num_images,
 
77
  output_type="pil",
78
  ).images
79
+
80
+ latency = time.time() - start_time
81
+ print(f"Generation took {latency:.2f} seconds")
82
+
83
+ paths = save_images(
84
+ outputs,
85
+ profile,
86
+ metadata={
87
+ "prompt": prompt,
88
+ "seed": seed,
89
+ "width": width,
90
+ "height": height,
91
+ "guidance_scale": guidance_scale,
92
+ "num_inference_steps": num_inference_steps,
93
+ }
94
+ )
95
+
96
  return paths, seed
97
 
98
  examples = [
99
+ "A futuristic cityscape at sunset",
100
+ "Steampunk airship over mountains",
101
+ "Portrait of a cyborg queen, hyper‑detailed",
 
102
  ]
103
 
104
  with gr.Blocks(css="style.css") as demo:
105
  gr.Markdown(DESCRIPTION)
 
 
 
 
 
 
 
 
 
 
 
 
106
  with gr.Group():
107
  with gr.Row():
108
  prompt = gr.Text(
 
 
 
109
  placeholder="Enter your prompt",
110
+ show_label=False,
111
  container=False,
112
  )
113
  run_button = gr.Button("Run", scale=0)
114
+ gallery = gr.Gallery(
115
+ label="Generated images",
116
+ show_label=False,
117
+ elem_id="gallery",
118
+ grid=[2]
119
  )
120
+
121
  with gr.Accordion("Advanced options", open=False):
122
+ seed = gr.Slider(0, MAX_SEED, value=0, step=1, randomize=True, label="Seed")
 
 
 
 
 
 
 
123
  randomize_seed = gr.Checkbox(label="Randomize seed across runs", value=True)
124
  with gr.Row():
125
+ width = gr.Slider(256, MAX_IMAGE_SIZE, value=512, step=32, label="Width")
126
+ height = gr.Slider(256, MAX_IMAGE_SIZE, value=512, step=32, label="Height")
 
 
 
 
 
 
 
 
 
 
 
 
127
  with gr.Row():
128
+ guidance_scale = gr.Slider(2.0, 14.0, value=8.0, step=0.1, label="Guidance Scale")
129
+ num_inference_steps = gr.Slider(1, 8, value=4, step=1, label="Inference Steps")
130
+ num_images = gr.Slider(1, 8, value=1, step=1, label="Number of Images")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
 
 
 
132
  gr.Examples(
133
  examples=examples,
134
  inputs=prompt,
135
+ outputs=gallery,
136
  fn=generate,
137
  cache_examples=CACHE_EXAMPLES,
138
  )
139
 
140
+ demo.queue()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
  demo.launch()