Upload pipeline.py
Browse files- pipeline.py +504 -0
pipeline.py
ADDED
@@ -0,0 +1,504 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 PixArt-Sigma Authors and The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import html
|
16 |
+
import inspect
|
17 |
+
import re
|
18 |
+
import urllib.parse as ul
|
19 |
+
from typing import Callable, List, Optional, Tuple, Union
|
20 |
+
|
21 |
+
import torch
|
22 |
+
|
23 |
+
from diffusers.image_processor import PixArtImageProcessor
|
24 |
+
from diffusers.models import AutoencoderKL
|
25 |
+
from diffusers.schedulers import DPMSolverMultistepScheduler
|
26 |
+
from diffusers.utils import (
|
27 |
+
BACKENDS_MAPPING,
|
28 |
+
deprecate,
|
29 |
+
logging,
|
30 |
+
replace_example_docstring,
|
31 |
+
)
|
32 |
+
from diffusers.utils.torch_utils import randn_tensor
|
33 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
|
34 |
+
|
35 |
+
from pixcell_controlnet import PixCellControlNet
|
36 |
+
from pixcell_controlnet_transformer import PixCellTransformer2DModelControlNet
|
37 |
+
|
38 |
+
|
39 |
+
# TODO:
|
40 |
+
# Clean up the conditioning code
|
41 |
+
# Need to fix how the conditioning is provided
|
42 |
+
# Maybe add UNI to the pipeline
|
43 |
+
|
44 |
+
|
45 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
46 |
+
|
47 |
+
|
48 |
+
EXAMPLE_DOC_STRING = """
|
49 |
+
Examples:
|
50 |
+
```py
|
51 |
+
>>> import torch
|
52 |
+
>>> from diffusers import PixCellSigmaPipeline
|
53 |
+
|
54 |
+
>>> # You can replace the checkpoint id with "PixArt-alpha/PixArt-Sigma-XL-2-512-MS" too.
|
55 |
+
>>> pipe = PixArtSigmaPipeline.from_pretrained(
|
56 |
+
... "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS", torch_dtype=torch.float16
|
57 |
+
... )
|
58 |
+
>>> # Enable memory optimizations.
|
59 |
+
>>> # pipe.enable_model_cpu_offload()
|
60 |
+
|
61 |
+
>>> prompt = "A small cactus with a happy face in the Sahara desert."
|
62 |
+
>>> image = pipe(prompt).images[0]
|
63 |
+
```
|
64 |
+
"""
|
65 |
+
|
66 |
+
|
67 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
68 |
+
def retrieve_timesteps(
|
69 |
+
scheduler,
|
70 |
+
num_inference_steps: Optional[int] = None,
|
71 |
+
device: Optional[Union[str, torch.device]] = None,
|
72 |
+
timesteps: Optional[List[int]] = None,
|
73 |
+
sigmas: Optional[List[float]] = None,
|
74 |
+
**kwargs,
|
75 |
+
):
|
76 |
+
r"""
|
77 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
78 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
79 |
+
|
80 |
+
Args:
|
81 |
+
scheduler (`SchedulerMixin`):
|
82 |
+
The scheduler to get timesteps from.
|
83 |
+
num_inference_steps (`int`):
|
84 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
85 |
+
must be `None`.
|
86 |
+
device (`str` or `torch.device`, *optional*):
|
87 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
88 |
+
timesteps (`List[int]`, *optional*):
|
89 |
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
90 |
+
`num_inference_steps` and `sigmas` must be `None`.
|
91 |
+
sigmas (`List[float]`, *optional*):
|
92 |
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
93 |
+
`num_inference_steps` and `timesteps` must be `None`.
|
94 |
+
|
95 |
+
Returns:
|
96 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
97 |
+
second element is the number of inference steps.
|
98 |
+
"""
|
99 |
+
if timesteps is not None and sigmas is not None:
|
100 |
+
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
101 |
+
if timesteps is not None:
|
102 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
103 |
+
if not accepts_timesteps:
|
104 |
+
raise ValueError(
|
105 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
106 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
107 |
+
)
|
108 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
109 |
+
timesteps = scheduler.timesteps
|
110 |
+
num_inference_steps = len(timesteps)
|
111 |
+
elif sigmas is not None:
|
112 |
+
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
113 |
+
if not accept_sigmas:
|
114 |
+
raise ValueError(
|
115 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
116 |
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
117 |
+
)
|
118 |
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
119 |
+
timesteps = scheduler.timesteps
|
120 |
+
num_inference_steps = len(timesteps)
|
121 |
+
else:
|
122 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
123 |
+
timesteps = scheduler.timesteps
|
124 |
+
return timesteps, num_inference_steps
|
125 |
+
|
126 |
+
|
127 |
+
class PixCellControlNetPipeline(DiffusionPipeline):
|
128 |
+
r"""
|
129 |
+
Pipeline for SSL-to-image generation using PixCell.
|
130 |
+
"""
|
131 |
+
|
132 |
+
model_cpu_offload_seq = "transformer->vae"
|
133 |
+
|
134 |
+
def __init__(
|
135 |
+
self,
|
136 |
+
vae: AutoencoderKL,
|
137 |
+
transformer: PixCellTransformer2DModelControlNet,
|
138 |
+
controlnet: PixCellControlNet,
|
139 |
+
scheduler: DPMSolverMultistepScheduler,
|
140 |
+
):
|
141 |
+
super().__init__()
|
142 |
+
|
143 |
+
self.register_modules(
|
144 |
+
vae=vae, transformer=transformer, controlnet=controlnet, scheduler=scheduler
|
145 |
+
)
|
146 |
+
|
147 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
148 |
+
self.image_processor = PixArtImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
149 |
+
|
150 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
151 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
152 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
153 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
154 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
155 |
+
# and should be between [0, 1]
|
156 |
+
|
157 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
158 |
+
extra_step_kwargs = {}
|
159 |
+
if accepts_eta:
|
160 |
+
extra_step_kwargs["eta"] = eta
|
161 |
+
|
162 |
+
# check if the scheduler accepts generator
|
163 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
164 |
+
if accepts_generator:
|
165 |
+
extra_step_kwargs["generator"] = generator
|
166 |
+
return extra_step_kwargs
|
167 |
+
|
168 |
+
def get_unconditional_embedding(self, batch_size=1):
|
169 |
+
# Unconditional embedding is learned
|
170 |
+
uncond = self.transformer.caption_projection.uncond_embedding.clone().tile(batch_size,1,1)
|
171 |
+
return uncond
|
172 |
+
|
173 |
+
# Copied from diffusers.pipelines.pixart_alpha.pipeline_pixart_alpha.PixArtAlphaPipeline.check_inputs
|
174 |
+
def check_inputs(
|
175 |
+
self,
|
176 |
+
height,
|
177 |
+
width,
|
178 |
+
callback_steps,
|
179 |
+
uni_embeds=None,
|
180 |
+
negative_uni_embeds=None,
|
181 |
+
guidance_scale=None,
|
182 |
+
):
|
183 |
+
if height % 8 != 0 or width % 8 != 0:
|
184 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
185 |
+
|
186 |
+
if (callback_steps is None) or (
|
187 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
188 |
+
):
|
189 |
+
raise ValueError(
|
190 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
191 |
+
f" {type(callback_steps)}."
|
192 |
+
)
|
193 |
+
|
194 |
+
if uni_embeds is None:
|
195 |
+
raise ValueError(
|
196 |
+
"Provide a UNI embedding `uni_embeds`."
|
197 |
+
)
|
198 |
+
elif len(uni_embeds.shape) != 3:
|
199 |
+
raise ValueError(
|
200 |
+
"UNI embedding given is not in (B,N,D)."
|
201 |
+
)
|
202 |
+
elif uni_embeds.shape[1] != self.transformer.config.caption_num_tokens:
|
203 |
+
raise ValueError(
|
204 |
+
f"Number of UNI embeddings must match the ones used in training ({self.transformer.config.caption_num_tokens})."
|
205 |
+
)
|
206 |
+
elif uni_embeds.shape[2] != self.transformer.config.caption_channels:
|
207 |
+
raise ValueError(
|
208 |
+
"UNI embedding given has incorrect dimenions."
|
209 |
+
)
|
210 |
+
|
211 |
+
if guidance_scale > 1.0:
|
212 |
+
if negative_uni_embeds is None:
|
213 |
+
raise ValueError(
|
214 |
+
"Provide a negative UNI embedding `negative_uni_embeds`."
|
215 |
+
)
|
216 |
+
elif len(negative_uni_embeds.shape) != 3:
|
217 |
+
raise ValueError(
|
218 |
+
"Negative UNI embedding given is not in (B,N,D)."
|
219 |
+
)
|
220 |
+
elif negative_uni_embeds.shape[1] != self.transformer.config.caption_num_tokens:
|
221 |
+
raise ValueError(
|
222 |
+
f"Number of negative UNI embeddings must match the ones used in training ({self.transformer.config.caption_num_tokens})."
|
223 |
+
)
|
224 |
+
elif negative_uni_embeds.shape[2] != self.transformer.config.caption_channels:
|
225 |
+
raise ValueError(
|
226 |
+
"Negative UNI embedding given has incorrect dimenions."
|
227 |
+
)
|
228 |
+
|
229 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
230 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
231 |
+
shape = (
|
232 |
+
batch_size,
|
233 |
+
num_channels_latents,
|
234 |
+
int(height) // self.vae_scale_factor,
|
235 |
+
int(width) // self.vae_scale_factor,
|
236 |
+
)
|
237 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
238 |
+
raise ValueError(
|
239 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
240 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
241 |
+
)
|
242 |
+
|
243 |
+
if latents is None:
|
244 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
245 |
+
else:
|
246 |
+
latents = latents.to(device)
|
247 |
+
|
248 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
249 |
+
latents = latents * self.scheduler.init_noise_sigma
|
250 |
+
return latents
|
251 |
+
|
252 |
+
@torch.no_grad()
|
253 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
254 |
+
def __call__(
|
255 |
+
self,
|
256 |
+
num_inference_steps: int = 20,
|
257 |
+
timesteps: List[int] = None,
|
258 |
+
sigmas: List[float] = None,
|
259 |
+
guidance_scale: float = 1.5,
|
260 |
+
controlnet_input: Optional[torch.Tensor] = None,
|
261 |
+
num_images_per_prompt: Optional[int] = 1,
|
262 |
+
height: Optional[int] = None,
|
263 |
+
width: Optional[int] = None,
|
264 |
+
eta: float = 0.0,
|
265 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
266 |
+
latents: Optional[torch.Tensor] = None,
|
267 |
+
uni_embeds: Optional[torch.Tensor] = None,
|
268 |
+
negative_uni_embeds: Optional[torch.Tensor] = None,
|
269 |
+
output_type: Optional[str] = "pil",
|
270 |
+
return_dict: bool = True,
|
271 |
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
272 |
+
callback_steps: int = 1,
|
273 |
+
**kwargs,
|
274 |
+
) -> Union[ImagePipelineOutput, Tuple]:
|
275 |
+
"""
|
276 |
+
Function invoked when calling the pipeline for generation.
|
277 |
+
|
278 |
+
Args:
|
279 |
+
num_inference_steps (`int`, *optional*, defaults to 100):
|
280 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
281 |
+
expense of slower inference.
|
282 |
+
timesteps (`List[int]`, *optional*):
|
283 |
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
284 |
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
285 |
+
passed will be used. Must be in descending order.
|
286 |
+
sigmas (`List[float]`, *optional*):
|
287 |
+
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
|
288 |
+
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
|
289 |
+
will be used.
|
290 |
+
guidance_scale (`float`, *optional*, defaults to 4.5):
|
291 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
292 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
293 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
294 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
295 |
+
usually at the expense of lower image quality.
|
296 |
+
controlnet_input (`torch.Tensor`, *optional*, defaults to None):
|
297 |
+
The conditioning input to the ControlNet. If none is provided then the ControlNet is not used.
|
298 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
299 |
+
The number of images to generate per prompt.
|
300 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size):
|
301 |
+
The height in pixels of the generated image.
|
302 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size):
|
303 |
+
The width in pixels of the generated image.
|
304 |
+
eta (`float`, *optional*, defaults to 0.0):
|
305 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
306 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
307 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
308 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
309 |
+
to make generation deterministic.
|
310 |
+
latents (`torch.Tensor`, *optional*):
|
311 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
312 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
313 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
314 |
+
uni_embeds (`torch.Tensor`, *optional*):
|
315 |
+
Pre-generated UNI embeddings.
|
316 |
+
negative_uni_embeds (`torch.Tensor`, *optional*):
|
317 |
+
Pre-generated negative UNI embeddings.
|
318 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
319 |
+
The output format of the generate image. Choose between
|
320 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
321 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
322 |
+
Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
|
323 |
+
callback (`Callable`, *optional*):
|
324 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
325 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
326 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
327 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
328 |
+
called at every step.
|
329 |
+
|
330 |
+
Examples:
|
331 |
+
|
332 |
+
Returns:
|
333 |
+
[`~pipelines.ImagePipelineOutput`] or `tuple`:
|
334 |
+
If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
|
335 |
+
returned where the first element is a list with the generated images
|
336 |
+
"""
|
337 |
+
# 1. Check inputs. Raise error if not correct
|
338 |
+
height = height or self.transformer.config.sample_size * self.vae_scale_factor
|
339 |
+
width = width or self.transformer.config.sample_size * self.vae_scale_factor
|
340 |
+
|
341 |
+
self.check_inputs(
|
342 |
+
height,
|
343 |
+
width,
|
344 |
+
callback_steps,
|
345 |
+
uni_embeds,
|
346 |
+
negative_uni_embeds,
|
347 |
+
guidance_scale,
|
348 |
+
)
|
349 |
+
|
350 |
+
# 2. Default height and width to transformer
|
351 |
+
batch_size = uni_embeds.shape[0]
|
352 |
+
|
353 |
+
device = self._execution_device
|
354 |
+
|
355 |
+
# 3. Handle conditioning
|
356 |
+
|
357 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
358 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
359 |
+
# corresponds to doing no classifier free guidance.
|
360 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
361 |
+
|
362 |
+
# UNI
|
363 |
+
uni_embeds = uni_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
364 |
+
# Do not batch for CFG when using ControlNet
|
365 |
+
# TODO: Change to batched inputs?
|
366 |
+
if do_classifier_free_guidance:
|
367 |
+
negative_uni_embeds = negative_uni_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
368 |
+
# uni_embeds = torch.cat([negative_uni_embeds, uni_embeds], dim=0)
|
369 |
+
|
370 |
+
# ControlNet -- image given to ControlNet is (3,256,256)
|
371 |
+
if controlnet_input is not None:
|
372 |
+
controlnet_input_torch = torch.from_numpy(controlnet_input.copy()/255.).float().to(device).permute([2,0,1]).unsqueeze(0)
|
373 |
+
controlnet_input_torch = 2*(controlnet_input_torch-0.5)
|
374 |
+
|
375 |
+
vae_scale = self.vae.config.scaling_factor
|
376 |
+
vae_shift = getattr(self.vae.config, "shift_factor", 0)
|
377 |
+
controlnet_input_latent = self.vae.encode(controlnet_input_torch).latent_dist.mean
|
378 |
+
controlnet_input_latent = (controlnet_input_latent-vae_shift)*vae_scale
|
379 |
+
|
380 |
+
|
381 |
+
# 4. Prepare timesteps
|
382 |
+
timesteps, num_inference_steps = retrieve_timesteps(
|
383 |
+
self.scheduler, num_inference_steps, device, timesteps, sigmas
|
384 |
+
)
|
385 |
+
|
386 |
+
# 5. Prepare latents.
|
387 |
+
latent_channels = self.transformer.config.in_channels
|
388 |
+
latents = self.prepare_latents(
|
389 |
+
batch_size * num_images_per_prompt,
|
390 |
+
latent_channels,
|
391 |
+
height,
|
392 |
+
width,
|
393 |
+
uni_embeds.dtype,
|
394 |
+
device,
|
395 |
+
generator,
|
396 |
+
latents,
|
397 |
+
)
|
398 |
+
|
399 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
400 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
401 |
+
|
402 |
+
added_cond_kwargs = {}
|
403 |
+
|
404 |
+
# 7. Denoising loop
|
405 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
406 |
+
|
407 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
408 |
+
for i, t in enumerate(timesteps):
|
409 |
+
# Do not batch for CFG when using ControlNet
|
410 |
+
# latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
411 |
+
# latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
412 |
+
latent_model_input = self.scheduler.scale_model_input(latents, t)
|
413 |
+
|
414 |
+
current_timestep = t
|
415 |
+
if not torch.is_tensor(current_timestep):
|
416 |
+
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
|
417 |
+
# This would be a good case for the `match` statement (Python 3.10+)
|
418 |
+
is_mps = latent_model_input.device.type == "mps"
|
419 |
+
if isinstance(current_timestep, float):
|
420 |
+
dtype = torch.float32 if is_mps else torch.float64
|
421 |
+
else:
|
422 |
+
dtype = torch.int32 if is_mps else torch.int64
|
423 |
+
current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device)
|
424 |
+
elif len(current_timestep.shape) == 0:
|
425 |
+
current_timestep = current_timestep[None].to(latent_model_input.device)
|
426 |
+
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
427 |
+
current_timestep = current_timestep.expand(latent_model_input.shape[0])
|
428 |
+
|
429 |
+
# predict controlnet conditioning
|
430 |
+
if controlnet_input is not None:
|
431 |
+
controlnet_outputs = self.controlnet(
|
432 |
+
hidden_states=latent_model_input,
|
433 |
+
conditioning=controlnet_input_latent,
|
434 |
+
encoder_hidden_states=uni_embeds,
|
435 |
+
timestep=current_timestep,
|
436 |
+
# added_cond_kwargs=added_cond_kwargs,
|
437 |
+
return_dict=False,
|
438 |
+
)[0]
|
439 |
+
else:
|
440 |
+
controlnet_outputs = None
|
441 |
+
|
442 |
+
# predict noise model_output
|
443 |
+
noise_pred_cond = self.transformer(
|
444 |
+
latent_model_input,
|
445 |
+
encoder_hidden_states=uni_embeds,
|
446 |
+
controlnet_outputs=controlnet_outputs,
|
447 |
+
timestep=current_timestep,
|
448 |
+
added_cond_kwargs=added_cond_kwargs,
|
449 |
+
return_dict=False,
|
450 |
+
)[0]
|
451 |
+
|
452 |
+
# perform guidance
|
453 |
+
if do_classifier_free_guidance:
|
454 |
+
# Do not batch for CFG when using ControlNet
|
455 |
+
# noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
456 |
+
# noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
457 |
+
noise_pred_uncond = self.transformer(
|
458 |
+
latent_model_input,
|
459 |
+
encoder_hidden_states=negative_uni_embeds,
|
460 |
+
controlnet_outputs=None,
|
461 |
+
timestep=current_timestep,
|
462 |
+
added_cond_kwargs=added_cond_kwargs,
|
463 |
+
return_dict=False,
|
464 |
+
)[0]
|
465 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond)
|
466 |
+
else:
|
467 |
+
noise_pred = noise_pred_cond
|
468 |
+
|
469 |
+
|
470 |
+
# learned sigma
|
471 |
+
if self.transformer.config.out_channels // 2 == latent_channels:
|
472 |
+
noise_pred = noise_pred.chunk(2, dim=1)[0]
|
473 |
+
else:
|
474 |
+
noise_pred = noise_pred
|
475 |
+
|
476 |
+
# compute previous image: x_t -> x_t-1
|
477 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
478 |
+
|
479 |
+
# call the callback, if provided
|
480 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
481 |
+
progress_bar.update()
|
482 |
+
if callback is not None and i % callback_steps == 0:
|
483 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
484 |
+
callback(step_idx, t, latents)
|
485 |
+
|
486 |
+
if not output_type == "latent":
|
487 |
+
vae_scale = self.vae.config.scaling_factor
|
488 |
+
vae_shift = getattr(self.vae.config, "shift_factor", 0)
|
489 |
+
|
490 |
+
image = self.vae.decode((latents / vae_scale) + vae_shift, return_dict=False)[0]
|
491 |
+
|
492 |
+
else:
|
493 |
+
image = latents
|
494 |
+
|
495 |
+
if not output_type == "latent":
|
496 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
497 |
+
|
498 |
+
# Offload all models
|
499 |
+
self.maybe_free_model_hooks()
|
500 |
+
|
501 |
+
if not return_dict:
|
502 |
+
return (image,)
|
503 |
+
|
504 |
+
return ImagePipelineOutput(images=image)
|