Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from diffusers import DiffusionPipeline
|
2 |
+
import torch
|
3 |
+
|
4 |
+
# load both base & refiner
|
5 |
+
base = DiffusionPipeline.from_pretrained(
|
6 |
+
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
|
7 |
+
)
|
8 |
+
base.to("cuda")
|
9 |
+
refiner = DiffusionPipeline.from_pretrained(
|
10 |
+
"stabilityai/stable-diffusion-xl-refiner-1.0",
|
11 |
+
text_encoder_2=base.text_encoder_2,
|
12 |
+
vae=base.vae,
|
13 |
+
torch_dtype=torch.float16,
|
14 |
+
use_safetensors=True,
|
15 |
+
variant="fp16",
|
16 |
+
)
|
17 |
+
refiner.to("cuda")
|
18 |
+
|
19 |
+
# Define how many steps and what % of steps to be run on each experts (80/20) here
|
20 |
+
n_steps = 40
|
21 |
+
high_noise_frac = 0.8
|
22 |
+
|
23 |
+
prompt = "A majestic lion jumping from a big stone at night"
|
24 |
+
|
25 |
+
# run both experts
|
26 |
+
image = base(
|
27 |
+
prompt=prompt,
|
28 |
+
num_inference_steps=n_steps,
|
29 |
+
denoising_end=high_noise_frac,
|
30 |
+
output_type="latent",
|
31 |
+
).images
|
32 |
+
image = refiner(
|
33 |
+
prompt=prompt,
|
34 |
+
num_inference_steps=n_steps,
|
35 |
+
denoising_start=high_noise_frac,
|
36 |
+
image=image,
|
37 |
+
).images[0]
|