Spaces:
Running
on
A10G
Running
on
A10G
add good
Browse files
app.py
CHANGED
@@ -14,6 +14,7 @@ from diffusers import (
|
|
14 |
T2IAdapter,
|
15 |
)
|
16 |
import time
|
|
|
17 |
|
18 |
|
19 |
dtype = torch.float16
|
@@ -138,12 +139,28 @@ def generate(
|
|
138 |
for _ in range(3):
|
139 |
prompt = 77 * "a"
|
140 |
num_inference_steps = 20
|
141 |
-
|
142 |
-
|
143 |
-
prompt,
|
144 |
num_images_per_prompt=num_images_per_prompt,
|
145 |
num_inference_steps=num_inference_steps,
|
146 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
end_time = time.time()
|
148 |
|
149 |
print(f"For {num_inference_steps} steps", end_time - start_time)
|
|
|
14 |
T2IAdapter,
|
15 |
)
|
16 |
import time
|
17 |
+
import utils
|
18 |
|
19 |
|
20 |
dtype = torch.float16
|
|
|
139 |
for _ in range(3):
|
140 |
prompt = 77 * "a"
|
141 |
num_inference_steps = 20
|
142 |
+
call_args = dict(
|
143 |
+
prompt=prompt,
|
|
|
144 |
num_images_per_prompt=num_images_per_prompt,
|
145 |
num_inference_steps=num_inference_steps,
|
146 |
+
)
|
147 |
+
|
148 |
+
if pipeline_to_benchmark in ["SD I2I", "SDXL I2I"]:
|
149 |
+
image = utils.get_image_for_img_to_img(pipeline_to_benchmark)
|
150 |
+
call_args.update({"image": image})
|
151 |
+
elif "Inpainting" in pipeline_to_benchmark:
|
152 |
+
image, mask_image = utils.get_image_from_inpainting(pipeline_to_benchmark)
|
153 |
+
call_args.update({"image": image, "mask_image": mask_image})
|
154 |
+
elif "ControlNet" in pipeline_to_benchmark:
|
155 |
+
image = utils.get_image_for_controlnet(pipeline_to_benchmark)
|
156 |
+
call_args.update({"image": image})
|
157 |
+
|
158 |
+
elif "Adapters" in pipeline_to_benchmark:
|
159 |
+
image = utils.get_image_for_adapters(pipeline_to_benchmark)
|
160 |
+
call_args.update({"image": image})
|
161 |
+
|
162 |
+
start_time = time.time()
|
163 |
+
_ = pipeline(**call_args).images
|
164 |
end_time = time.time()
|
165 |
|
166 |
print(f"For {num_inference_steps} steps", end_time - start_time)
|
utils.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from diffusers.utils import load_image
|
2 |
+
|
3 |
+
|
4 |
+
def get_image_for_img_to_img(pipeline_to_benchmark):
|
5 |
+
if pipeline_to_benchmark == "SD I2I":
|
6 |
+
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
7 |
+
init_image = load_image(url).convert("RGB")
|
8 |
+
size = (768, 512)
|
9 |
+
init_image = init_image.resize(size)
|
10 |
+
elif pipeline_to_benchmark == "SDXL I2I":
|
11 |
+
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-img2img.png"
|
12 |
+
init_image = load_image(url).convert("RGB")
|
13 |
+
return init_image
|
14 |
+
|
15 |
+
|
16 |
+
def get_image_from_inpainting(pipeline_to_benchmark):
|
17 |
+
if pipeline_to_benchmark == "SD I2I":
|
18 |
+
image_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
|
19 |
+
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
|
20 |
+
init_image = load_image(image_url).convert("RGB").resize((512, 512))
|
21 |
+
mask_image = load_image(mask_url).convert("RGB").resize((512, 512))
|
22 |
+
elif pipeline_to_benchmark == "SDXL I2I":
|
23 |
+
image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-text2img.png"
|
24 |
+
mask_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-inpaint-mask.png"
|
25 |
+
init_image = load_image(image_url).convert("RGB")
|
26 |
+
mask_image = load_image(mask_url).convert("RGB")
|
27 |
+
return init_image, mask_image
|
28 |
+
|
29 |
+
|
30 |
+
def get_image_for_controlnet(pipeline_to_benchmark):
|
31 |
+
if pipeline_to_benchmark == "SD ControlNet":
|
32 |
+
image = load_image(
|
33 |
+
"https://huggingface.co/lllyasviel/sd-controlnet-hed/resolve/main/images/bird.png"
|
34 |
+
)
|
35 |
+
elif pipeline_to_benchmark == "SDXL ControlNet":
|
36 |
+
image = load_image(
|
37 |
+
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"
|
38 |
+
)
|
39 |
+
|
40 |
+
return image
|
41 |
+
|
42 |
+
|
43 |
+
def get_image_for_adapters(pipeline_to_benchmark):
|
44 |
+
if pipeline_to_benchmark == "SD T2I Adapters":
|
45 |
+
image = load_image(
|
46 |
+
"https://huggingface.co/TencentARC/t2iadapter_canny_sd14v1/resolve/main/images/canny.png"
|
47 |
+
).convert("L")
|
48 |
+
elif pipeline_to_benchmark == "SDXL T2I Adapters":
|
49 |
+
image = load_image(
|
50 |
+
"https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/cond_canny.png"
|
51 |
+
).convert("L")
|
52 |
+
return image
|