Lifeinhockey commited on
Commit
ef9ea85
·
verified ·
1 Parent(s): 6ee4c09

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -78
app.py CHANGED
@@ -1,14 +1,12 @@
1
- import os
2
  import gradio as gr
3
  import numpy as np
4
- import random
5
  import torch
6
  from diffusers import StableDiffusionPipeline
7
  from peft import PeftModel, LoraConfig
8
-
9
 
10
  def get_lora_sd_pipeline(
11
- ckpt_dir='./lora_man_animestyle',
12
  base_model_name_or_path=None,
13
  dtype=torch.float16,
14
  adapter_name="default"
@@ -55,10 +53,10 @@ def align_embeddings(prompt_embeds, negative_prompt_embeds):
55
  torch.nn.functional.pad(negative_prompt_embeds, (0, 0, 0, max_length - negative_prompt_embeds.shape[1]))
56
 
57
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
58
- model_id_default = "stable-diffusion-v1-5/stable-diffusion-v1-5"
59
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
60
 
61
- pipe_default = get_lora_sd_pipeline(ckpt_dir='./lora_man_animestyle', base_model_name_or_path=model_id_default, dtype=torch_dtype).to(device)
62
 
63
  MAX_SEED = np.iinfo(np.int32).max
64
  MAX_IMAGE_SIZE = 1024
@@ -69,9 +67,9 @@ def infer(
69
  width=512,
70
  height=512,
71
  num_inference_steps=20,
72
- model_id="stable-diffusion-v1-5/stable-diffusion-v1-5",
73
  seed=42,
74
- guidance_scale=7.5,
75
  lora_scale=0.5,
76
  progress=gr.Progress(track_tqdm=True)
77
  ):
@@ -104,21 +102,6 @@ def infer(
104
 
105
  return pipe(**params).images[0]
106
 
107
-
108
- examples = [
109
- "Young man in anime style. The image is of high sharpness and resolution. A handsome, thoughtful man. The man is depicted in the foreground, close-up or middle plan. The background is blurry, not sharp. The play of light and shadow is visible on the face and clothes."
110
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.",
111
- "An astronaut riding a green horse.",
112
- "A delicious ceviche cheesecake slice.",
113
- "A futuristic sports car is located on the surface of Mars. Stars, planets, mountains and craters are visible.",
114
- ]
115
-
116
- examples_negative = [
117
- "blurred details, low resolution, poor image of a man's face, poor quality, artifacts, black and white image"
118
- "blurry details, low resolution, poorly defined edges",
119
- "bad face, bad quality, artifacts, low-res, black and white",
120
- ]
121
-
122
  css = """
123
  #col-container {
124
  margin: 0 auto;
@@ -126,55 +109,30 @@ css = """
126
  }
127
  """
128
 
129
- available_models = [
130
- "stable-diffusion-v1-5/stable-diffusion-v1-5",
131
- "SG161222/Realistic_Vision_V3.0_VAE",
132
- "CompVis/stable-diffusion-v1-4",
133
- "stabilityai/sdxl-turbo",
134
- "runwayml/stable-diffusion-v1-5",
135
- "sd-legacy/stable-diffusion-v1-5",
136
- "prompthero/openjourney",
137
- "stabilityai/stable-diffusion-3-medium-diffusers",
138
- "stabilityai/stable-diffusion-3.5-large",
139
- "stabilityai/stable-diffusion-3.5-large-turbo",
140
- ]
141
-
142
  with gr.Blocks(css=css) as demo:
143
-
144
  with gr.Column(elem_id="col-container"):
145
- gr.Markdown(" # Text-to-Image Gradio Template from V. Gorsky")
146
-
147
  with gr.Row():
148
- model_id = gr.Dropdown(
149
- label="Model Selection",
150
- choices=available_models,
151
- value="stable-diffusion-v1-5/stable-diffusion-v1-5",
152
- interactive=True
153
  )
154
 
155
- prompt = gr.Text(
156
  label="Prompt",
157
- show_label=False,
158
  max_lines=1,
159
  placeholder="Enter your prompt",
160
- container=False,
161
  )
162
- negative_prompt = gr.Text(
 
163
  label="Negative prompt",
164
  max_lines=1,
165
  placeholder="Enter a negative prompt",
166
- visible=True,
167
  )
168
 
169
- with gr.Row():
170
- lora_scale = gr.Slider(
171
- label="LoRA scale",
172
- minimum=0.0,
173
- maximum=1.0,
174
- step=0.1,
175
- value=0.5,
176
- )
177
-
178
  with gr.Row():
179
  seed = gr.Number(
180
  label="Seed",
@@ -184,60 +142,67 @@ with gr.Blocks(css=css) as demo:
184
  value=42,
185
  )
186
 
187
- with gr.Row():
188
  guidance_scale = gr.Slider(
189
  label="Guidance scale",
190
  minimum=0.0,
191
  maximum=10.0,
192
  step=0.1,
193
- value=7.5, # Replace with defaults that work for your model
 
 
 
 
 
 
 
 
 
194
  )
195
 
196
- with gr.Row():
197
  num_inference_steps = gr.Slider(
198
- label="Number of inference steps",
199
- minimum=1,
200
- maximum=100,
201
- step=1,
202
- value=30, # Replace with defaults that work for your model
203
  )
204
 
205
- with gr.Accordion("Advanced Settings", open=False):
206
  with gr.Row():
207
  width = gr.Slider(
208
  label="Width",
209
  minimum=256,
210
  maximum=MAX_IMAGE_SIZE,
211
  step=32,
212
- value=512, # Replace with defaults that work for your model
213
  )
214
-
 
215
  height = gr.Slider(
216
  label="Height",
217
  minimum=256,
218
  maximum=MAX_IMAGE_SIZE,
219
  step=32,
220
- value=512, # Replace with defaults that work for your model
221
  )
222
 
223
- gr.Examples(examples=examples, inputs=[prompt])
224
- gr.Examples(examples=examples_negative, inputs=[negative_prompt])
225
-
226
- run_button = gr.Button("Run", scale=0, variant="primary")
227
  result = gr.Image(label="Result", show_label=False)
228
-
229
  gr.on(
230
  triggers=[run_button.click, prompt.submit],
231
  fn=infer,
232
  inputs=[
233
- model_id,
234
  prompt,
235
  negative_prompt,
236
- seed,
237
  width,
238
  height,
239
- guidance_scale,
240
  num_inference_steps,
 
 
 
241
  lora_scale,
242
  ],
243
  outputs=[result],
@@ -245,3 +210,4 @@ with gr.Blocks(css=css) as demo:
245
 
246
  if __name__ == "__main__":
247
  demo.launch()
 
 
 
1
  import gradio as gr
2
  import numpy as np
 
3
  import torch
4
  from diffusers import StableDiffusionPipeline
5
  from peft import PeftModel, LoraConfig
6
+ import os
7
 
8
  def get_lora_sd_pipeline(
9
+ ckpt_dir='./lora_logos',
10
  base_model_name_or_path=None,
11
  dtype=torch.float16,
12
  adapter_name="default"
 
53
  torch.nn.functional.pad(negative_prompt_embeds, (0, 0, 0, max_length - negative_prompt_embeds.shape[1]))
54
 
55
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
56
+ model_id_default = "CompVis/stable-diffusion-v1-4"
57
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
58
 
59
+ pipe_default = get_lora_sd_pipeline(ckpt_dir='./lora_logos', base_model_name_or_path=model_id_default, dtype=torch_dtype).to(device)
60
 
61
  MAX_SEED = np.iinfo(np.int32).max
62
  MAX_IMAGE_SIZE = 1024
 
67
  width=512,
68
  height=512,
69
  num_inference_steps=20,
70
+ model_id='CompVis/stable-diffusion-v1-4',
71
  seed=42,
72
+ guidance_scale=7.0,
73
  lora_scale=0.5,
74
  progress=gr.Progress(track_tqdm=True)
75
  ):
 
102
 
103
  return pipe(**params).images[0]
104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  css = """
106
  #col-container {
107
  margin: 0 auto;
 
109
  }
110
  """
111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  with gr.Blocks(css=css) as demo:
 
113
  with gr.Column(elem_id="col-container"):
114
+ gr.Markdown(" # DEMO Text-to-Image")
115
+
116
  with gr.Row():
117
+ model_id = gr.Textbox(
118
+ label="Model ID",
119
+ max_lines=1,
120
+ placeholder="Enter model id like 'CompVis/stable-diffusion-v1-4'",
121
+ value=model_id_default
122
  )
123
 
124
+ prompt = gr.Textbox(
125
  label="Prompt",
 
126
  max_lines=1,
127
  placeholder="Enter your prompt",
 
128
  )
129
+
130
+ negative_prompt = gr.Textbox(
131
  label="Negative prompt",
132
  max_lines=1,
133
  placeholder="Enter a negative prompt",
 
134
  )
135
 
 
 
 
 
 
 
 
 
 
136
  with gr.Row():
137
  seed = gr.Number(
138
  label="Seed",
 
142
  value=42,
143
  )
144
 
145
+ with gr.Row():
146
  guidance_scale = gr.Slider(
147
  label="Guidance scale",
148
  minimum=0.0,
149
  maximum=10.0,
150
  step=0.1,
151
+ value=7.0,
152
+ )
153
+
154
+ with gr.Row():
155
+ lora_scale = gr.Slider(
156
+ label="LoRA scale",
157
+ minimum=0.0,
158
+ maximum=1.0,
159
+ step=0.1,
160
+ value=0.5,
161
  )
162
 
163
+ with gr.Row():
164
  num_inference_steps = gr.Slider(
165
+ label="Number of inference steps",
166
+ minimum=1,
167
+ maximum=50,
168
+ step=1,
169
+ value=20,
170
  )
171
 
172
+ with gr.Accordion("Optional Settings", open=False):
173
  with gr.Row():
174
  width = gr.Slider(
175
  label="Width",
176
  minimum=256,
177
  maximum=MAX_IMAGE_SIZE,
178
  step=32,
179
+ value=512,
180
  )
181
+
182
+ with gr.Row():
183
  height = gr.Slider(
184
  label="Height",
185
  minimum=256,
186
  maximum=MAX_IMAGE_SIZE,
187
  step=32,
188
+ value=512,
189
  )
190
 
191
+ run_button = gr.Button("Run", scale=1, variant="primary")
 
 
 
192
  result = gr.Image(label="Result", show_label=False)
193
+
194
  gr.on(
195
  triggers=[run_button.click, prompt.submit],
196
  fn=infer,
197
  inputs=[
 
198
  prompt,
199
  negative_prompt,
 
200
  width,
201
  height,
 
202
  num_inference_steps,
203
+ model_id,
204
+ seed,
205
+ guidance_scale,
206
  lora_scale,
207
  ],
208
  outputs=[result],
 
210
 
211
  if __name__ == "__main__":
212
  demo.launch()
213
+