Manjushri commited on
Commit
30917e2
·
verified ·
1 Parent(s): bc862a1

Update app.py

Browse files

Add upscaling to all

Files changed (1) hide show
  1. app.py +152 -28
app.py CHANGED
@@ -17,7 +17,6 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
17
  pipe.enable_xformers_memory_efficient_attention()
18
  pipe = pipe.to(device)
19
  torch.cuda.empty_cache()
20
-
21
  if refine == "Yes":
22
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
23
  refiner.enable_xformers_memory_efficient_attention()
@@ -26,7 +25,6 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
26
  int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
27
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
28
  torch.cuda.empty_cache()
29
-
30
  if upscale == "Yes":
31
  refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
32
  refiner.enable_xformers_memory_efficient_attention()
@@ -40,7 +38,6 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
40
  else:
41
  if upscale == "Yes":
42
  image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
43
-
44
  upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
45
  upscaler.enable_xformers_memory_efficient_attention()
46
  upscaler = upscaler.to(device)
@@ -49,7 +46,6 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
49
  torch.cuda.empty_cache()
50
  return upscaled
51
  else:
52
-
53
  image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
54
  torch.cuda.empty_cache()
55
  return image
@@ -67,12 +63,31 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
67
  int_image = anime(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
68
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
69
  torch.cuda.empty_cache()
70
- return image
 
 
 
 
 
 
 
 
 
71
  else:
72
- image = anime(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
73
- torch.cuda.empty_cache()
74
- return image
75
-
 
 
 
 
 
 
 
 
 
 
76
  if Model == "Disney":
77
  disney = DiffusionPipeline.from_pretrained("circulus/canvers-disney-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-disney-v3.8.1")
78
  disney.enable_xformers_memory_efficient_attention()
@@ -86,11 +101,31 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
86
  int_image = disney(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
87
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
88
  torch.cuda.empty_cache()
89
- return image
 
 
 
 
 
 
 
 
 
 
90
  else:
91
- image = disney(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
92
- torch.cuda.empty_cache()
93
- return image
 
 
 
 
 
 
 
 
 
 
94
 
95
  if Model == "StoryBook":
96
  story = DiffusionPipeline.from_pretrained("circulus/canvers-story-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-story-v3.8.1")
@@ -105,11 +140,33 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
105
  int_image = story(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
106
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
107
  torch.cuda.empty_cache()
108
- return image
 
 
 
 
 
 
 
 
 
 
109
  else:
110
- image = story(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
111
- torch.cuda.empty_cache()
112
- return image
 
 
 
 
 
 
 
 
 
 
 
 
113
 
114
  if Model == "SemiReal":
115
  semi = DiffusionPipeline.from_pretrained("circulus/canvers-semi-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-semi-v3.8.1")
@@ -124,11 +181,33 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
124
  image = semi(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
125
  image = refiner(Prompt, negative_prompt=negative_prompt, image=image, denoising_start=high_noise_frac).images[0]
126
  torch.cuda.empty_cache()
127
- return image
 
 
 
 
 
 
 
 
 
 
128
  else:
129
- image = semi(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
130
- torch.cuda.empty_cache()
131
- return image
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
  if Model == "Animagine XL 3.0":
134
  animagine = DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0")
@@ -146,11 +225,33 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
146
  torch.cuda.empty_cache()
147
  image = animagine(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
148
  torch.cuda.empty_cache()
149
- return image
 
 
 
 
 
 
 
 
 
 
150
  else:
151
- image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
152
- torch.cuda.empty_cache()
153
- return image
 
 
 
 
 
 
 
 
 
 
 
 
154
 
155
  if Model == "SDXL 1.0":
156
  torch.cuda.empty_cache()
@@ -171,10 +272,33 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
171
  torch.cuda.empty_cache()
172
  refined = sdxl(Prompt, negative_prompt=negative_prompt, image=image, denoising_start=high_noise_frac).images[0]
173
  torch.cuda.empty_cache()
174
- return refined
 
 
 
 
 
 
 
 
 
 
175
  else:
176
- image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
177
- torch.cuda.empty_cache()
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
  return image
180
 
 
17
  pipe.enable_xformers_memory_efficient_attention()
18
  pipe = pipe.to(device)
19
  torch.cuda.empty_cache()
 
20
  if refine == "Yes":
21
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
22
  refiner.enable_xformers_memory_efficient_attention()
 
25
  int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
26
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
27
  torch.cuda.empty_cache()
 
28
  if upscale == "Yes":
29
  refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
30
  refiner.enable_xformers_memory_efficient_attention()
 
38
  else:
39
  if upscale == "Yes":
40
  image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
 
41
  upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
42
  upscaler.enable_xformers_memory_efficient_attention()
43
  upscaler = upscaler.to(device)
 
46
  torch.cuda.empty_cache()
47
  return upscaled
48
  else:
 
49
  image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
50
  torch.cuda.empty_cache()
51
  return image
 
63
  int_image = anime(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
64
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
65
  torch.cuda.empty_cache()
66
+ if upscale == "Yes":
67
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
68
+ refiner.enable_xformers_memory_efficient_attention()
69
+ refiner = refiner.to(device)
70
+ torch.cuda.empty_cache()
71
+ upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
72
+ torch.cuda.empty_cache()
73
+ return upscaled
74
+ else:
75
+ return image
76
  else:
77
+ if upscale == "Yes":
78
+ image = anime(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
79
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
80
+ upscaler.enable_xformers_memory_efficient_attention()
81
+ upscaler = upscaler.to(device)
82
+ torch.cuda.empty_cache()
83
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
84
+ torch.cuda.empty_cache()
85
+ return upscaled
86
+ else:
87
+ image = anime(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
88
+ torch.cuda.empty_cache()
89
+ return image
90
+
91
  if Model == "Disney":
92
  disney = DiffusionPipeline.from_pretrained("circulus/canvers-disney-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-disney-v3.8.1")
93
  disney.enable_xformers_memory_efficient_attention()
 
101
  int_image = disney(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
102
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
103
  torch.cuda.empty_cache()
104
+
105
+ if upscale == "Yes":
106
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
107
+ refiner.enable_xformers_memory_efficient_attention()
108
+ refiner = refiner.to(device)
109
+ torch.cuda.empty_cache()
110
+ upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
111
+ torch.cuda.empty_cache()
112
+ return upscaled
113
+ else:
114
+ return image
115
  else:
116
+ if upscale == "Yes":
117
+ image = disney(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
118
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
119
+ upscaler.enable_xformers_memory_efficient_attention()
120
+ upscaler = upscaler.to(device)
121
+ torch.cuda.empty_cache()
122
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
123
+ torch.cuda.empty_cache()
124
+ return upscaled
125
+ else:
126
+ image = disney(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
127
+ torch.cuda.empty_cache()
128
+ return image
129
 
130
  if Model == "StoryBook":
131
  story = DiffusionPipeline.from_pretrained("circulus/canvers-story-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-story-v3.8.1")
 
140
  int_image = story(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
141
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
142
  torch.cuda.empty_cache()
143
+
144
+ if upscale == "Yes":
145
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
146
+ refiner.enable_xformers_memory_efficient_attention()
147
+ refiner = refiner.to(device)
148
+ torch.cuda.empty_cache()
149
+ upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
150
+ torch.cuda.empty_cache()
151
+ return upscaled
152
+ else:
153
+ return image
154
  else:
155
+ if upscale == "Yes":
156
+ image = story(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
157
+
158
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
159
+ upscaler.enable_xformers_memory_efficient_attention()
160
+ upscaler = upscaler.to(device)
161
+ torch.cuda.empty_cache()
162
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
163
+ torch.cuda.empty_cache()
164
+ return upscaled
165
+ else:
166
+
167
+ image = story(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
168
+ torch.cuda.empty_cache()
169
+ return image
170
 
171
  if Model == "SemiReal":
172
  semi = DiffusionPipeline.from_pretrained("circulus/canvers-semi-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-semi-v3.8.1")
 
181
  image = semi(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
182
  image = refiner(Prompt, negative_prompt=negative_prompt, image=image, denoising_start=high_noise_frac).images[0]
183
  torch.cuda.empty_cache()
184
+
185
+ if upscale == "Yes":
186
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
187
+ refiner.enable_xformers_memory_efficient_attention()
188
+ refiner = refiner.to(device)
189
+ torch.cuda.empty_cache()
190
+ upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
191
+ torch.cuda.empty_cache()
192
+ return upscaled
193
+ else:
194
+ return image
195
  else:
196
+ if upscale == "Yes":
197
+ image = semi(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
198
+
199
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
200
+ upscaler.enable_xformers_memory_efficient_attention()
201
+ upscaler = upscaler.to(device)
202
+ torch.cuda.empty_cache()
203
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
204
+ torch.cuda.empty_cache()
205
+ return upscaled
206
+ else:
207
+
208
+ image = semi(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
209
+ torch.cuda.empty_cache()
210
+ return image
211
 
212
  if Model == "Animagine XL 3.0":
213
  animagine = DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0")
 
225
  torch.cuda.empty_cache()
226
  image = animagine(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
227
  torch.cuda.empty_cache()
228
+
229
+ if upscale == "Yes":
230
+ animagine = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
231
+ animagine.enable_xformers_memory_efficient_attention()
232
+ animagine = animagine.to(device)
233
+ torch.cuda.empty_cache()
234
+ upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
235
+ torch.cuda.empty_cache()
236
+ return upscaled
237
+ else:
238
+ return image
239
  else:
240
+ if upscale == "Yes":
241
+ image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
242
+
243
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
244
+ upscaler.enable_xformers_memory_efficient_attention()
245
+ upscaler = upscaler.to(device)
246
+ torch.cuda.empty_cache()
247
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
248
+ torch.cuda.empty_cache()
249
+ return upscaled
250
+ else:
251
+
252
+ image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
253
+ torch.cuda.empty_cache()
254
+ return image
255
 
256
  if Model == "SDXL 1.0":
257
  torch.cuda.empty_cache()
 
272
  torch.cuda.empty_cache()
273
  refined = sdxl(Prompt, negative_prompt=negative_prompt, image=image, denoising_start=high_noise_frac).images[0]
274
  torch.cuda.empty_cache()
275
+
276
+ if upscale == "Yes":
277
+ sdxl = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
278
+ sdxl.enable_xformers_memory_efficient_attention()
279
+ sdxl = sdxl.to(device)
280
+ torch.cuda.empty_cache()
281
+ upscaled = sdxl(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
282
+ torch.cuda.empty_cache()
283
+ return upscaled
284
+ else:
285
+ return refined
286
  else:
287
+ if upscale == "Yes":
288
+ image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
289
+
290
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
291
+ upscaler.enable_xformers_memory_efficient_attention()
292
+ upscaler = upscaler.to(device)
293
+ torch.cuda.empty_cache()
294
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
295
+ torch.cuda.empty_cache()
296
+ return upscaled
297
+ else:
298
+
299
+ image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
300
+ torch.cuda.empty_cache()
301
+
302
 
303
  return image
304