|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from typing import Any, Dict, List, Optional, Union |
|
|
|
import torch |
|
from diffusers.models.modeling_outputs import Transformer2DModelOutput |
|
from diffusers.utils import ( |
|
USE_PEFT_BACKEND, |
|
is_torch_version, |
|
scale_lora_layers, |
|
unscale_lora_layers, |
|
) |
|
|
|
|
|
def sd3_forward( |
|
self, |
|
hidden_states: torch.FloatTensor, |
|
encoder_hidden_states: torch.FloatTensor = None, |
|
pooled_projections: torch.FloatTensor = None, |
|
timestep: torch.LongTensor = None, |
|
block_controlnet_hidden_states: List = None, |
|
joint_attention_kwargs: Optional[Dict[str, Any]] = None, |
|
return_dict: bool = True, |
|
) -> Union[torch.FloatTensor, Transformer2DModelOutput]: |
|
""" |
|
The [`SD3Transformer2DModel`] forward method. |
|
|
|
Args: |
|
hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`): |
|
Input `hidden_states`. |
|
encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`): |
|
Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. |
|
pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected |
|
from the embeddings of input conditions. |
|
timestep ( `torch.LongTensor`): |
|
Used to indicate denoising step. |
|
block_controlnet_hidden_states: (`list` of `torch.Tensor`): |
|
A list of tensors that if specified are added to the residuals of transformer blocks. |
|
joint_attention_kwargs (`dict`, *optional*): |
|
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under |
|
`self.processor` in |
|
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). |
|
return_dict (`bool`, *optional*, defaults to `True`): |
|
Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain |
|
tuple. |
|
|
|
Returns: |
|
If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a |
|
`tuple` where the first element is the sample tensor. |
|
""" |
|
if joint_attention_kwargs is not None: |
|
joint_attention_kwargs = joint_attention_kwargs.copy() |
|
lora_scale = joint_attention_kwargs.pop("scale", 1.0) |
|
else: |
|
lora_scale = 1.0 |
|
|
|
if USE_PEFT_BACKEND: |
|
|
|
scale_lora_layers(self, lora_scale) |
|
|
|
height, width = hidden_states.shape[-2:] |
|
|
|
hidden_states = self.pos_embed(hidden_states) |
|
temb = self.time_text_embed(timestep, pooled_projections) |
|
encoder_hidden_states = self.context_embedder(encoder_hidden_states) |
|
|
|
for index_block, block in enumerate(self.transformer_blocks): |
|
if self.training and self.gradient_checkpointing: |
|
|
|
def create_custom_forward(module, return_dict=None): |
|
def custom_forward(*inputs): |
|
if return_dict is not None: |
|
return module(*inputs, return_dict=return_dict) |
|
else: |
|
return module(*inputs) |
|
|
|
return custom_forward |
|
|
|
ckpt_kwargs: Dict[str, Any] = ( |
|
{"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} |
|
) |
|
encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint( |
|
create_custom_forward(block), |
|
hidden_states, |
|
encoder_hidden_states, |
|
temb, |
|
**ckpt_kwargs, |
|
) |
|
|
|
else: |
|
if hasattr(self, "use_trt_infer") and self.use_trt_infer: |
|
feed_dict = { |
|
"hidden_states": hidden_states, |
|
"encoder_hidden_states": encoder_hidden_states, |
|
"temb": temb, |
|
} |
|
_results = self.engines[f"transformer_blocks.{index_block}"]( |
|
feed_dict, self.cuda_stream |
|
) |
|
if index_block != 23: |
|
encoder_hidden_states = _results["encoder_hidden_states_out"] |
|
hidden_states = _results["hidden_states_out"] |
|
else: |
|
encoder_hidden_states, hidden_states = block( |
|
hidden_states=hidden_states, |
|
encoder_hidden_states=encoder_hidden_states, |
|
temb=temb, |
|
) |
|
|
|
|
|
if block_controlnet_hidden_states is not None and block.context_pre_only is False: |
|
interval_control = len(self.transformer_blocks) // len(block_controlnet_hidden_states) |
|
hidden_states = ( |
|
hidden_states + block_controlnet_hidden_states[index_block // interval_control] |
|
) |
|
|
|
hidden_states = self.norm_out(hidden_states, temb) |
|
hidden_states = self.proj_out(hidden_states) |
|
|
|
|
|
patch_size = self.config.patch_size |
|
height = height // patch_size |
|
width = width // patch_size |
|
|
|
hidden_states = hidden_states.reshape( |
|
shape=(hidden_states.shape[0], height, width, patch_size, patch_size, self.out_channels) |
|
) |
|
hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) |
|
output = hidden_states.reshape( |
|
shape=(hidden_states.shape[0], self.out_channels, height * patch_size, width * patch_size) |
|
) |
|
|
|
if USE_PEFT_BACKEND: |
|
|
|
unscale_lora_layers(self, lora_scale) |
|
|
|
if not return_dict: |
|
return (output,) |
|
|
|
return Transformer2DModelOutput(sample=output) |
|
|