Commit
·
819a296
1
Parent(s):
2acb9f2
up
Browse files- run_local_img2img_xl.py +12 -7
- run_local_xl.py +9 -3
run_local_img2img_xl.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
-
from diffusers import DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionPipeline, KDPM2DiscreteScheduler, StableDiffusionImg2ImgPipeline, HeunDiscreteScheduler, KDPM2AncestralDiscreteScheduler, DDIMScheduler
|
| 3 |
import time
|
| 4 |
import os
|
| 5 |
from huggingface_hub import HfApi
|
|
@@ -15,23 +15,28 @@ path = sys.argv[1]
|
|
| 15 |
|
| 16 |
api = HfApi()
|
| 17 |
start_time = time.time()
|
| 18 |
-
pipe =
|
| 19 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
|
| 20 |
# pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
| 21 |
-
# pipe =
|
| 22 |
|
| 23 |
# compel = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder)
|
| 24 |
|
| 25 |
|
| 26 |
pipe = pipe.to("cuda")
|
| 27 |
|
| 28 |
-
prompt = "
|
|
|
|
| 29 |
|
| 30 |
# pipe.unet.to(memory_format=torch.channels_last)
|
| 31 |
-
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
| 32 |
-
pipe(prompt=prompt, num_inference_steps=2).images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
-
image = pipe(prompt=prompt).images[0]
|
| 35 |
|
| 36 |
file_name = f"aaa"
|
| 37 |
path = os.path.join(Path.home(), "images", f"{file_name}.png")
|
|
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
+
from diffusers import DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionPipeline, KDPM2DiscreteScheduler, StableDiffusionImg2ImgPipeline, HeunDiscreteScheduler, KDPM2AncestralDiscreteScheduler, DDIMScheduler, StableDiffusionXLImg2ImgPipeline
|
| 3 |
import time
|
| 4 |
import os
|
| 5 |
from huggingface_hub import HfApi
|
|
|
|
| 15 |
|
| 16 |
api = HfApi()
|
| 17 |
start_time = time.time()
|
| 18 |
+
pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(path, torch_dtype=torch.float16)
|
| 19 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
|
| 20 |
# pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
| 21 |
+
# pipe = StableDiffusionImg2ImgXLPipeline.from_pretrained(path, torch_dtype=torch.float16, safety_checker=None
|
| 22 |
|
| 23 |
# compel = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder)
|
| 24 |
|
| 25 |
|
| 26 |
pipe = pipe.to("cuda")
|
| 27 |
|
| 28 |
+
prompt = "A red castle on a beautiful landscape with a nice sunset"
|
| 29 |
+
|
| 30 |
|
| 31 |
# pipe.unet.to(memory_format=torch.channels_last)
|
| 32 |
+
# pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
| 33 |
+
# pipe(prompt=prompt, num_inference_steps=2).images[0]
|
| 34 |
+
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
| 35 |
+
|
| 36 |
+
response = requests.get(url)
|
| 37 |
+
init_image = Image.open(BytesIO(response.content)).convert("RGB").resize((1024, 1024))
|
| 38 |
|
| 39 |
+
image = pipe(prompt=prompt, image=init_image, strength=0.9).images[0]
|
| 40 |
|
| 41 |
file_name = f"aaa"
|
| 42 |
path = os.path.join(Path.home(), "images", f"{file_name}.png")
|
run_local_xl.py
CHANGED
|
@@ -28,10 +28,16 @@ pipe = pipe.to("cuda")
|
|
| 28 |
prompt = "Elon Musk riding a green horse on Mars"
|
| 29 |
|
| 30 |
# pipe.unet.to(memory_format=torch.channels_last)
|
| 31 |
-
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
| 32 |
-
pipe(prompt=prompt, num_inference_steps=2).images[0]
|
| 33 |
|
| 34 |
-
image = pipe(prompt=prompt).images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
file_name = f"aaa"
|
| 37 |
path = os.path.join(Path.home(), "images", f"{file_name}.png")
|
|
|
|
| 28 |
prompt = "Elon Musk riding a green horse on Mars"
|
| 29 |
|
| 30 |
# pipe.unet.to(memory_format=torch.channels_last)
|
| 31 |
+
# pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
| 32 |
+
# pipe(prompt=prompt, num_inference_steps=2).images[0]
|
| 33 |
|
| 34 |
+
image = pipe(prompt=prompt, num_images_per_prompt=1, num_inference_steps=40, output_type="latent").images
|
| 35 |
+
pipe.to("cpu")
|
| 36 |
+
|
| 37 |
+
pipe = DiffusionPipeline.from_pretrained("/home/patrick/diffusers-sd-xl/stable-diffusion-xl-refiner-0.9", torch_dtype=torch.float16)
|
| 38 |
+
pipe.to("cuda")
|
| 39 |
+
|
| 40 |
+
image = pipe(prompt=prompt, image=image, strength=0.5).images[0]
|
| 41 |
|
| 42 |
file_name = f"aaa"
|
| 43 |
path = os.path.join(Path.home(), "images", f"{file_name}.png")
|