Spaces:
Build error
Build error
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. | |
# SPDX-License-Identifier: Apache-2.0 | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
from typing import Optional, Tuple | |
import torch | |
from einops import rearrange | |
from torch import nn | |
from torchvision import transforms | |
from cosmos_predict1.diffusion.conditioner import DataType | |
from cosmos_predict1.diffusion.module.blocks import GeneralDITTransformerBlock, PatchEmbed | |
from cosmos_predict1.diffusion.module.parallel import split_inputs_cp | |
from cosmos_predict1.diffusion.module.position_embedding import ( | |
MultiviewSinCosPosEmbAxis, | |
MultiviewVideoRopePosition3DEmb, | |
) | |
from cosmos_predict1.diffusion.networks.general_dit import GeneralDIT | |
from cosmos_predict1.utils import log | |
class MultiviewGeneralDIT(GeneralDIT): | |
def __init__( | |
self, | |
max_img_h: int, | |
max_img_w: int, | |
max_frames: int, | |
in_channels: int, | |
out_channels: int, | |
patch_spatial: tuple, | |
patch_temporal: int, | |
concat_padding_mask: bool = True, | |
# attention settings | |
block_config: str = "FA-CA-MLP", | |
model_channels: int = 768, | |
num_blocks: int = 10, | |
num_heads: int = 16, | |
mlp_ratio: float = 4.0, | |
block_x_format: str = "BTHWD", | |
# cross attention settings | |
crossattn_emb_channels: int = 1024, | |
use_cross_attn_mask: bool = False, | |
# positional embedding settings | |
pos_emb_cls: str = "sincos", | |
pos_emb_learnable: bool = False, | |
pos_emb_interpolation: str = "crop", | |
affline_emb_norm: bool = False, # whether or not to normalize the affine embedding | |
use_adaln_lora: bool = False, | |
adaln_lora_dim: int = 256, | |
rope_h_extrapolation_ratio: float = 1.0, | |
rope_w_extrapolation_ratio: float = 1.0, | |
rope_t_extrapolation_ratio: float = 1.0, | |
extra_per_block_abs_pos_emb: bool = True, | |
extra_per_block_abs_pos_emb_type: str = "sincos", | |
extra_h_extrapolation_ratio: float = 1.0, | |
extra_w_extrapolation_ratio: float = 1.0, | |
extra_t_extrapolation_ratio: float = 1.0, | |
n_views: int = 3, | |
view_condition_dim: int = 3, | |
traj_condition_dim: int = 0, | |
concat_view_embedding: bool = True, | |
concat_traj_embedding: bool = False, | |
add_repeat_frame_embedding: bool = False, | |
): | |
self.n_views = n_views | |
self.view_condition_dim = view_condition_dim | |
self.concat_view_embedding = concat_view_embedding | |
self.traj_condition_dim = traj_condition_dim | |
self.concat_traj_embedding = concat_traj_embedding | |
self.add_repeat_frame_embedding = add_repeat_frame_embedding | |
super().__init__( | |
max_img_h, | |
max_img_w, | |
max_frames, | |
in_channels, | |
out_channels, | |
patch_spatial, | |
patch_temporal, | |
concat_padding_mask, | |
block_config, | |
model_channels, | |
num_blocks, | |
num_heads, | |
mlp_ratio, | |
block_x_format, | |
crossattn_emb_channels, | |
use_cross_attn_mask, | |
pos_emb_cls, | |
pos_emb_learnable, | |
pos_emb_interpolation, | |
affline_emb_norm, # whether or not to normalize the affine embedding | |
use_adaln_lora, | |
adaln_lora_dim, | |
rope_h_extrapolation_ratio, | |
rope_w_extrapolation_ratio, | |
rope_t_extrapolation_ratio, | |
extra_per_block_abs_pos_emb, | |
extra_per_block_abs_pos_emb_type, | |
extra_h_extrapolation_ratio, | |
extra_w_extrapolation_ratio, | |
extra_t_extrapolation_ratio, | |
) | |
# reinit self.blocks | |
del self.blocks | |
self.blocks = nn.ModuleDict() | |
for idx in range(self.num_blocks): | |
self.blocks[f"block{idx}"] = GeneralDITTransformerBlock( | |
x_dim=model_channels, | |
context_dim=crossattn_emb_channels, | |
num_heads=num_heads, | |
block_config=block_config, | |
mlp_ratio=mlp_ratio, | |
x_format=self.block_x_format, | |
use_adaln_lora=use_adaln_lora, | |
adaln_lora_dim=adaln_lora_dim, | |
n_views=self.n_views, | |
) | |
self.view_embeddings = nn.Embedding(n_views, view_condition_dim) # Learnable embedding layer | |
if self.concat_traj_embedding: | |
self.traj_embeddings = nn.Linear(192, self.traj_condition_dim) # Learnable embedding layer | |
if self.add_repeat_frame_embedding: | |
self.repeat_frame_embedding = nn.Linear(1, view_condition_dim) # Learnable embedding layer | |
self.initialize_weights() | |
def build_patch_embed(self): | |
( | |
concat_padding_mask, | |
in_channels, | |
patch_spatial, | |
patch_temporal, | |
model_channels, | |
view_condition_dim, | |
traj_condition_dim, | |
) = ( | |
self.concat_padding_mask, | |
self.in_channels, | |
self.patch_spatial, | |
self.patch_temporal, | |
self.model_channels, | |
self.view_condition_dim, | |
self.traj_condition_dim, | |
) | |
if self.concat_view_embedding: | |
in_channels = in_channels + view_condition_dim if view_condition_dim > 0 else in_channels | |
if self.concat_traj_embedding: | |
in_channels = in_channels + traj_condition_dim if traj_condition_dim > 0 else in_channels | |
in_channels = in_channels + 1 if concat_padding_mask else in_channels | |
self.x_embedder = PatchEmbed( | |
spatial_patch_size=patch_spatial, | |
temporal_patch_size=patch_temporal, | |
in_channels=in_channels, | |
out_channels=model_channels, | |
bias=False, | |
) | |
def build_pos_embed(self): | |
if self.pos_emb_cls == "rope3d": | |
cls_type = MultiviewVideoRopePosition3DEmb | |
else: | |
raise ValueError(f"Unknown pos_emb_cls {self.pos_emb_cls}") | |
log.critical(f"Building positional embedding with {self.pos_emb_cls} class, impl {cls_type}") | |
kwargs = dict( | |
model_channels=self.model_channels, | |
len_h=self.max_img_h // self.patch_spatial, | |
len_w=self.max_img_w // self.patch_spatial, | |
len_t=self.max_frames // self.patch_temporal, | |
max_fps=30, | |
min_fps=1, | |
is_learnable=self.pos_emb_learnable, | |
interpolation=self.pos_emb_interpolation, | |
head_dim=self.model_channels // self.num_heads, | |
h_extrapolation_ratio=self.rope_h_extrapolation_ratio, | |
w_extrapolation_ratio=self.rope_w_extrapolation_ratio, | |
t_extrapolation_ratio=self.rope_t_extrapolation_ratio, | |
n_views=self.n_views, | |
) | |
self.pos_embedder = cls_type( | |
**kwargs, | |
) | |
if self.extra_per_block_abs_pos_emb: | |
assert self.extra_per_block_abs_pos_emb_type in [ | |
"sincos", | |
], f"Unknown extra_per_block_abs_pos_emb_type {self.extra_per_block_abs_pos_emb_type}" | |
kwargs["h_extrapolation_ratio"] = self.extra_h_extrapolation_ratio | |
kwargs["w_extrapolation_ratio"] = self.extra_w_extrapolation_ratio | |
kwargs["t_extrapolation_ratio"] = self.extra_t_extrapolation_ratio | |
self.extra_pos_embedder = MultiviewSinCosPosEmbAxis(**kwargs) | |
def forward_before_blocks( | |
self, | |
x: torch.Tensor, | |
timesteps: torch.Tensor, | |
crossattn_emb: torch.Tensor, | |
crossattn_mask: Optional[torch.Tensor] = None, | |
fps: Optional[torch.Tensor] = None, | |
image_size: Optional[torch.Tensor] = None, | |
padding_mask: Optional[torch.Tensor] = None, | |
scalar_feature: Optional[torch.Tensor] = None, | |
data_type: Optional[DataType] = DataType.VIDEO, | |
latent_condition: Optional[torch.Tensor] = None, | |
latent_condition_sigma: Optional[torch.Tensor] = None, | |
**kwargs, | |
) -> torch.Tensor: | |
""" | |
Args: | |
x: (B, C, T, H, W) tensor of spatial-temp inputs | |
timesteps: (B, ) tensor of timesteps | |
crossattn_emb: (B, N, D) tensor of cross-attention embeddings | |
crossattn_mask: (B, N) tensor of cross-attention masks | |
""" | |
trajectory = kwargs.get("trajectory", None) | |
frame_repeat = kwargs.get("frame_repeat", None) | |
del kwargs | |
assert isinstance( | |
data_type, DataType | |
), f"Expected DataType, got {type(data_type)}. We need discuss this flag later." | |
original_shape = x.shape | |
x_B_T_H_W_D, rope_emb_L_1_1_D, extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D = self.prepare_embedded_sequence( | |
x, | |
fps=fps, | |
padding_mask=padding_mask, | |
latent_condition=latent_condition, | |
latent_condition_sigma=latent_condition_sigma, | |
trajectory=trajectory, | |
frame_repeat=frame_repeat, | |
) | |
# logging affline scale information | |
affline_scale_log_info = {} | |
timesteps_B_D, adaln_lora_B_3D = self.t_embedder(timesteps.flatten()) | |
affline_emb_B_D = timesteps_B_D | |
affline_scale_log_info["timesteps_B_D"] = timesteps_B_D.detach() | |
if scalar_feature is not None: | |
raise NotImplementedError("Scalar feature is not implemented yet.") | |
timesteps_B_D = timesteps_B_D + scalar_feature.mean(dim=1) | |
affline_scale_log_info["affline_emb_B_D"] = affline_emb_B_D.detach() | |
affline_emb_B_D = self.affline_norm(affline_emb_B_D) | |
# for logging purpose | |
self.affline_scale_log_info = affline_scale_log_info | |
self.affline_emb = affline_emb_B_D | |
self.crossattn_emb = crossattn_emb | |
self.crossattn_mask = crossattn_mask | |
if self.use_cross_attn_mask: | |
crossattn_mask = crossattn_mask[:, None, None, :].to(dtype=torch.bool) # [B, 1, 1, length] | |
else: | |
crossattn_mask = None | |
if self.blocks["block0"].x_format == "THWBD": | |
x = rearrange(x_B_T_H_W_D, "B T H W D -> T H W B D") | |
if extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D is not None: | |
extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D = rearrange( | |
extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D, "B T H W D -> T H W B D" | |
) | |
crossattn_emb = rearrange(crossattn_emb, "B M D -> M B D") | |
if crossattn_mask: | |
crossattn_mask = rearrange(crossattn_mask, "B M -> M B") | |
elif self.blocks["block0"].x_format == "BTHWD": | |
x = x_B_T_H_W_D | |
else: | |
raise ValueError(f"Unknown x_format {self.blocks[0].x_format}") | |
output = { | |
"x": x, | |
"affline_emb_B_D": affline_emb_B_D, | |
"crossattn_emb": crossattn_emb, | |
"crossattn_mask": crossattn_mask, | |
"rope_emb_L_1_1_D": rope_emb_L_1_1_D, | |
"adaln_lora_B_3D": adaln_lora_B_3D, | |
"original_shape": original_shape, | |
"extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D": extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D, | |
} | |
return output | |
def prepare_embedded_sequence( | |
self, | |
x_B_C_T_H_W: torch.Tensor, | |
fps: Optional[torch.Tensor] = None, | |
padding_mask: Optional[torch.Tensor] = None, | |
latent_condition: Optional[torch.Tensor] = None, | |
latent_condition_sigma: Optional[torch.Tensor] = None, | |
trajectory: Optional[torch.Tensor] = None, | |
frame_repeat: Optional[torch.Tensor] = None, | |
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: | |
""" | |
Prepares an embedded sequence tensor by applying positional embeddings and handling padding masks. | |
Args: | |
x_B_C_T_H_W (torch.Tensor): video | |
fps (Optional[torch.Tensor]): Frames per second tensor to be used for positional embedding when required. | |
If None, a default value (`self.base_fps`) will be used. | |
padding_mask (Optional[torch.Tensor]): current it is not used | |
Returns: | |
Tuple[torch.Tensor, Optional[torch.Tensor]]: | |
- A tensor of shape (B, T, H, W, D) with the embedded sequence. | |
- An optional positional embedding tensor, returned only if the positional embedding class | |
(`self.pos_emb_cls`) includes 'rope'. Otherwise, None. | |
Notes: | |
- If `self.concat_padding_mask` is True, a padding mask channel is concatenated to the input tensor. | |
- The method of applying positional embeddings depends on the value of `self.pos_emb_cls`. | |
- If 'rope' is in `self.pos_emb_cls` (case insensitive), the positional embeddings are generated using | |
the `self.pos_embedder` with the shape [T, H, W]. | |
- If "fps_aware" is in `self.pos_emb_cls`, the positional embeddings are generated using the `self.pos_embedder` | |
with the fps tensor. | |
- Otherwise, the positional embeddings are generated without considering fps. | |
""" | |
if self.concat_padding_mask: | |
padding_mask = transforms.functional.resize( | |
padding_mask, list(x_B_C_T_H_W.shape[-2:]), interpolation=transforms.InterpolationMode.NEAREST | |
) | |
x_B_C_T_H_W = torch.cat( | |
[x_B_C_T_H_W, padding_mask.unsqueeze(1).repeat(1, 1, x_B_C_T_H_W.shape[2], 1, 1)], dim=1 | |
) | |
view_indices = torch.arange(self.n_views).to(x_B_C_T_H_W.device) # View indices [0, 1, ..., V-1] | |
view_embedding = self.view_embeddings(view_indices) # Shape: [V, embedding_dim] | |
view_embedding = rearrange(view_embedding, "V D -> D V") | |
view_embedding = view_embedding.unsqueeze(0).unsqueeze(3).unsqueeze(4).unsqueeze(5) # Shape: [1, D, V, 1, 1, 1] | |
if self.add_repeat_frame_embedding: | |
if frame_repeat is None: | |
frame_repeat = ( | |
torch.zeros([x_B_C_T_H_W.shape[0], view_embedding.shape[1]]) | |
.to(view_embedding.device) | |
.to(view_embedding.dtype) | |
) | |
frame_repeat_embedding = self.repeat_frame_embedding(frame_repeat.unsqueeze(-1)) | |
frame_repeat_embedding = rearrange(frame_repeat_embedding, "B V D -> B D V") | |
view_embedding = view_embedding + frame_repeat_embedding.unsqueeze(3).unsqueeze(4).unsqueeze(5) | |
x_B_C_V_T_H_W = rearrange(x_B_C_T_H_W, "B C (V T) H W -> B C V T H W", V=self.n_views) | |
view_embedding = view_embedding.expand( | |
x_B_C_V_T_H_W.shape[0], | |
view_embedding.shape[1], | |
view_embedding.shape[2], | |
x_B_C_V_T_H_W.shape[3], | |
x_B_C_V_T_H_W.shape[4], | |
x_B_C_V_T_H_W.shape[5], | |
) # Shape: [B, V, 3, t, H, W] | |
if self.concat_traj_embedding: | |
traj_emb = self.traj_embeddings(trajectory) | |
traj_emb = traj_emb.unsqueeze(2).unsqueeze(3).unsqueeze(4).unsqueeze(5) | |
traj_emb = traj_emb.expand( | |
x_B_C_V_T_H_W.shape[0], | |
traj_emb.shape[1], | |
view_embedding.shape[2], | |
x_B_C_V_T_H_W.shape[3], | |
x_B_C_V_T_H_W.shape[4], | |
x_B_C_V_T_H_W.shape[5], | |
) # Shape: [B, V, 3, t, H, W] | |
x_B_C_V_T_H_W = torch.cat([x_B_C_V_T_H_W, view_embedding, traj_emb], dim=1) | |
else: | |
x_B_C_V_T_H_W = torch.cat([x_B_C_V_T_H_W, view_embedding], dim=1) | |
x_B_C_T_H_W = rearrange(x_B_C_V_T_H_W, " B C V T H W -> B C (V T) H W", V=self.n_views) | |
x_B_T_H_W_D = self.x_embedder(x_B_C_T_H_W) | |
if self.extra_per_block_abs_pos_emb: | |
extra_pos_emb = self.extra_pos_embedder(x_B_T_H_W_D, fps=fps) | |
else: | |
extra_pos_emb = None | |
if "rope" in self.pos_emb_cls.lower(): | |
return x_B_T_H_W_D, self.pos_embedder(x_B_T_H_W_D, fps=fps), extra_pos_emb | |
if "fps_aware" in self.pos_emb_cls: | |
x_B_T_H_W_D = x_B_T_H_W_D + self.pos_embedder(x_B_T_H_W_D, fps=fps) # [B, T, H, W, D] | |
else: | |
x_B_T_H_W_D = x_B_T_H_W_D + self.pos_embedder(x_B_T_H_W_D) # [B, T, H, W, D] | |
return x_B_T_H_W_D, None, extra_pos_emb | |