Sm0kyWu commited on
Commit
a03e4b0
Β·
verified Β·
1 Parent(s): a0d04b2

Upload 69 files

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. Amodal3R/__init__.py +6 -3
  2. Amodal3R/models/__init__.py +74 -3
  3. Amodal3R/models/sparse_structure_flow.py +200 -3
  4. Amodal3R/models/sparse_structure_flow_doubleattn_weighted.py +207 -3
  5. Amodal3R/models/sparse_structure_vae.py +306 -3
  6. Amodal3R/models/structured_latent_flow.py +265 -3
  7. Amodal3R/models/structured_latent_flow_doubleattn_weighted.py +273 -3
  8. Amodal3R/models/structured_latent_vae/__init__.py +3 -3
  9. Amodal3R/models/structured_latent_vae/base.py +125 -3
  10. Amodal3R/models/structured_latent_vae/decoder_gs.py +129 -3
  11. Amodal3R/models/structured_latent_vae/decoder_mesh.py +167 -3
  12. Amodal3R/models/structured_latent_vae/encoder.py +76 -3
  13. Amodal3R/modules/attention/__init__.py +36 -3
  14. Amodal3R/modules/attention/full_attn.py +140 -3
  15. Amodal3R/modules/attention/modules.py +265 -3
  16. Amodal3R/modules/norm.py +25 -3
  17. Amodal3R/modules/sparse/__init__.py +102 -3
  18. Amodal3R/modules/sparse/attention/__init__.py +4 -3
  19. Amodal3R/modules/sparse/attention/full_attn.py +215 -3
  20. Amodal3R/modules/sparse/attention/modules.py +305 -3
  21. Amodal3R/modules/sparse/attention/serialized_attn.py +193 -3
  22. Amodal3R/modules/sparse/attention/windowed_attn.py +135 -3
  23. Amodal3R/modules/sparse/basic.py +471 -3
  24. Amodal3R/modules/sparse/conv/__init__.py +21 -3
  25. Amodal3R/modules/sparse/conv/conv_spconv.py +80 -3
  26. Amodal3R/modules/sparse/conv/conv_torchsparse.py +38 -3
  27. Amodal3R/modules/sparse/linear.py +15 -3
  28. Amodal3R/modules/sparse/nonlinearity.py +35 -3
  29. Amodal3R/modules/sparse/norm.py +58 -3
  30. Amodal3R/modules/sparse/spatial.py +110 -3
  31. Amodal3R/modules/sparse/transformer/__init__.py +2 -3
  32. Amodal3R/modules/sparse/transformer/blocks.py +151 -3
  33. Amodal3R/modules/sparse/transformer/modulated.py +459 -3
  34. Amodal3R/modules/spatial.py +48 -3
  35. Amodal3R/modules/transformer/__init__.py +2 -3
  36. Amodal3R/modules/transformer/blocks.py +182 -3
  37. Amodal3R/modules/transformer/modulated.py +434 -3
  38. Amodal3R/modules/utils.py +54 -3
  39. Amodal3R/pipelines/Amodal3R_image_to_3d.py +435 -3
  40. Amodal3R/pipelines/__init__.py +24 -3
  41. Amodal3R/pipelines/base.py +66 -3
  42. Amodal3R/pipelines/samplers/__init__.py +2 -3
  43. Amodal3R/pipelines/samplers/base.py +20 -3
  44. Amodal3R/pipelines/samplers/classifier_free_guidance_mixin.py +12 -3
  45. Amodal3R/pipelines/samplers/flow_euler.py +199 -3
  46. Amodal3R/pipelines/samplers/guidance_interval_mixin.py +15 -3
  47. Amodal3R/renderers/__init__.py +29 -3
  48. Amodal3R/renderers/gaussian_render.py +231 -3
  49. Amodal3R/renderers/mesh_renderer.py +137 -3
  50. Amodal3R/renderers/sh_utils.py +118 -3
Amodal3R/__init__.py CHANGED
@@ -1,3 +1,6 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6eb340add1370ec95a8c19c36d5e7deb96e5950f1ffd2464e988fd77b813c130
3
- size 141
 
 
 
 
1
+ from . import models
2
+ from . import modules
3
+ from . import pipelines
4
+ from . import renderers
5
+ from . import representations
6
+ from . import utils
Amodal3R/models/__init__.py CHANGED
@@ -1,3 +1,74 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab53a0e6e4744aebbfb0032872ade7ec46d8712724c621ec08f3e8a134f5cb23
3
- size 2957
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+
3
+ __attributes = {
4
+ 'SparseStructureEncoder': 'sparse_structure_vae',
5
+ 'SparseStructureDecoder': 'sparse_structure_vae',
6
+ 'SparseStructureFlowModel': 'sparse_structure_flow',
7
+ 'SLatEncoder': 'structured_latent_vae',
8
+ 'SLatGaussianDecoder': 'structured_latent_vae',
9
+ 'SLatGaussianRefineDecoder': 'structured_latent_vae',
10
+ 'SLatMeshDecoder': 'structured_latent_vae',
11
+ 'SLatFlowModel': 'structured_latent_flow',
12
+ 'SparseStructureFlowModelMaskAsCondWeighted': 'sparse_structure_flow_doubleattn_weighted',
13
+ 'SLatFlowModelMaskAsCondWeighted': 'structured_latent_flow_doubleattn_weighted',
14
+ }
15
+
16
+ __submodules = []
17
+
18
+ __all__ = list(__attributes.keys()) + __submodules
19
+
20
+ def __getattr__(name):
21
+ if name not in globals():
22
+ if name in __attributes:
23
+ module_name = __attributes[name]
24
+ module = importlib.import_module(f".{module_name}", __name__)
25
+ globals()[name] = getattr(module, name)
26
+ elif name in __submodules:
27
+ module = importlib.import_module(f".{name}", __name__)
28
+ globals()[name] = module
29
+ else:
30
+ raise AttributeError(f"module {__name__} has no attribute {name}")
31
+ return globals()[name]
32
+
33
+
34
+ def from_pretrained(path: str, **kwargs):
35
+ """
36
+ Load a model from a pretrained checkpoint.
37
+
38
+ Args:
39
+ path: The path to the checkpoint. Can be either local path or a Hugging Face model name.
40
+ NOTE: config file and model file should take the name f'{path}.json' and f'{path}.safetensors' respectively.
41
+ **kwargs: Additional arguments for the model constructor.
42
+ """
43
+ import os
44
+ import json
45
+ from safetensors.torch import load_file
46
+ is_local = os.path.exists(f"{path}.json") and os.path.exists(f"{path}.safetensors")
47
+
48
+ if is_local:
49
+ config_file = f"{path}.json"
50
+ model_file = f"{path}.safetensors"
51
+ else:
52
+ from huggingface_hub import hf_hub_download
53
+ path_parts = path.split('/')
54
+ repo_id = f'{path_parts[0]}/{path_parts[1]}'
55
+ model_name = '/'.join(path_parts[2:])
56
+ config_file = hf_hub_download(repo_id, f"{model_name}.json")
57
+ model_file = hf_hub_download(repo_id, f"{model_name}.safetensors")
58
+
59
+ with open(config_file, 'r') as f:
60
+ config = json.load(f)
61
+ model = __getattr__(config['name'])(**config['args'], **kwargs)
62
+ model.load_state_dict(load_file(model_file))
63
+
64
+ return model
65
+
66
+
67
+ # For Pylance
68
+ if __name__ == '__main__':
69
+ from .sparse_structure_vae import SparseStructureEncoder, SparseStructureDecoder
70
+ from .sparse_structure_flow import SparseStructureFlowModel
71
+ from .structured_latent_vae import SLatEncoder, SLatGaussianDecoder, SLatMeshDecoder
72
+ from .structured_latent_flow import SLatFlowModel
73
+ from .sparse_structure_flow_doubleattn_weighted import SparseStructureFlowModelMaskAsCondWeighted
74
+ from .structured_latent_flow_doubleattn_weighted import SLatFlowModelMaskAsCondWeighted
Amodal3R/models/sparse_structure_flow.py CHANGED
@@ -1,3 +1,200 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb5c67ec40a97587b55d63809eb51ebc320a7d79d06b55f170f16567eeb24eda
3
- size 7348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ..modules.utils import convert_module_to_f16, convert_module_to_f32
7
+ from ..modules.transformer import AbsolutePositionEmbedder, ModulatedTransformerCrossBlock
8
+ from ..modules.spatial import patchify, unpatchify
9
+
10
+
11
+ class TimestepEmbedder(nn.Module):
12
+ """
13
+ Embeds scalar timesteps into vector representations.
14
+ """
15
+ def __init__(self, hidden_size, frequency_embedding_size=256):
16
+ super().__init__()
17
+ self.mlp = nn.Sequential(
18
+ nn.Linear(frequency_embedding_size, hidden_size, bias=True),
19
+ nn.SiLU(),
20
+ nn.Linear(hidden_size, hidden_size, bias=True),
21
+ )
22
+ self.frequency_embedding_size = frequency_embedding_size
23
+
24
+ @staticmethod
25
+ def timestep_embedding(t, dim, max_period=10000):
26
+ """
27
+ Create sinusoidal timestep embeddings.
28
+
29
+ Args:
30
+ t: a 1-D Tensor of N indices, one per batch element.
31
+ These may be fractional.
32
+ dim: the dimension of the output.
33
+ max_period: controls the minimum frequency of the embeddings.
34
+
35
+ Returns:
36
+ an (N, D) Tensor of positional embeddings.
37
+ """
38
+ # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
39
+ half = dim // 2
40
+ freqs = torch.exp(
41
+ -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
42
+ ).to(device=t.device)
43
+ args = t[:, None].float() * freqs[None]
44
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
45
+ if dim % 2:
46
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
47
+ return embedding
48
+
49
+ def forward(self, t):
50
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
51
+ t_emb = self.mlp(t_freq)
52
+ return t_emb
53
+
54
+
55
+ class SparseStructureFlowModel(nn.Module):
56
+ def __init__(
57
+ self,
58
+ resolution: int,
59
+ in_channels: int,
60
+ model_channels: int,
61
+ cond_channels: int,
62
+ out_channels: int,
63
+ num_blocks: int,
64
+ num_heads: Optional[int] = None,
65
+ num_head_channels: Optional[int] = 64,
66
+ mlp_ratio: float = 4,
67
+ patch_size: int = 2,
68
+ pe_mode: Literal["ape", "rope"] = "ape",
69
+ use_fp16: bool = False,
70
+ use_checkpoint: bool = False,
71
+ share_mod: bool = False,
72
+ qk_rms_norm: bool = False,
73
+ qk_rms_norm_cross: bool = False,
74
+ ):
75
+ super().__init__()
76
+ self.resolution = resolution
77
+ self.in_channels = in_channels
78
+ self.model_channels = model_channels
79
+ self.cond_channels = cond_channels
80
+ self.out_channels = out_channels
81
+ self.num_blocks = num_blocks
82
+ self.num_heads = num_heads or model_channels // num_head_channels
83
+ self.mlp_ratio = mlp_ratio
84
+ self.patch_size = patch_size
85
+ self.pe_mode = pe_mode
86
+ self.use_fp16 = use_fp16
87
+ self.use_checkpoint = use_checkpoint
88
+ self.share_mod = share_mod
89
+ self.qk_rms_norm = qk_rms_norm
90
+ self.qk_rms_norm_cross = qk_rms_norm_cross
91
+ self.dtype = torch.float16 if use_fp16 else torch.float32
92
+
93
+ self.t_embedder = TimestepEmbedder(model_channels)
94
+ if share_mod:
95
+ self.adaLN_modulation = nn.Sequential(
96
+ nn.SiLU(),
97
+ nn.Linear(model_channels, 6 * model_channels, bias=True)
98
+ )
99
+
100
+ if pe_mode == "ape":
101
+ pos_embedder = AbsolutePositionEmbedder(model_channels, 3)
102
+ coords = torch.meshgrid(*[torch.arange(res, device=self.device) for res in [resolution // patch_size] * 3], indexing='ij')
103
+ coords = torch.stack(coords, dim=-1).reshape(-1, 3)
104
+ pos_emb = pos_embedder(coords)
105
+ self.register_buffer("pos_emb", pos_emb)
106
+
107
+ self.input_layer = nn.Linear(in_channels * patch_size**3, model_channels)
108
+
109
+ self.blocks = nn.ModuleList([
110
+ ModulatedTransformerCrossBlock(
111
+ model_channels,
112
+ cond_channels,
113
+ num_heads=self.num_heads,
114
+ mlp_ratio=self.mlp_ratio,
115
+ attn_mode='full',
116
+ use_checkpoint=self.use_checkpoint,
117
+ use_rope=(pe_mode == "rope"),
118
+ share_mod=share_mod,
119
+ qk_rms_norm=self.qk_rms_norm,
120
+ qk_rms_norm_cross=self.qk_rms_norm_cross,
121
+ )
122
+ for _ in range(num_blocks)
123
+ ])
124
+
125
+ self.out_layer = nn.Linear(model_channels, out_channels * patch_size**3)
126
+
127
+ self.initialize_weights()
128
+ if use_fp16:
129
+ self.convert_to_fp16()
130
+
131
+ @property
132
+ def device(self) -> torch.device:
133
+ """
134
+ Return the device of the model.
135
+ """
136
+ return next(self.parameters()).device
137
+
138
+ def convert_to_fp16(self) -> None:
139
+ """
140
+ Convert the torso of the model to float16.
141
+ """
142
+ self.blocks.apply(convert_module_to_f16)
143
+
144
+ def convert_to_fp32(self) -> None:
145
+ """
146
+ Convert the torso of the model to float32.
147
+ """
148
+ self.blocks.apply(convert_module_to_f32)
149
+
150
+ def initialize_weights(self) -> None:
151
+ # Initialize transformer layers:
152
+ def _basic_init(module):
153
+ if isinstance(module, nn.Linear):
154
+ torch.nn.init.xavier_uniform_(module.weight)
155
+ if module.bias is not None:
156
+ nn.init.constant_(module.bias, 0)
157
+ self.apply(_basic_init)
158
+
159
+ # Initialize timestep embedding MLP:
160
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
161
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
162
+
163
+ # Zero-out adaLN modulation layers in DiT blocks:
164
+ if self.share_mod:
165
+ nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
166
+ nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
167
+ else:
168
+ for block in self.blocks:
169
+ nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
170
+ nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
171
+
172
+ # Zero-out output layers:
173
+ nn.init.constant_(self.out_layer.weight, 0)
174
+ nn.init.constant_(self.out_layer.bias, 0)
175
+
176
+ def forward(self, x: torch.Tensor, t: torch.Tensor, cond: torch.Tensor) -> torch.Tensor:
177
+ assert [*x.shape] == [x.shape[0], self.in_channels, *[self.resolution] * 3], \
178
+ f"Input shape mismatch, got {x.shape}, expected {[x.shape[0], self.in_channels, *[self.resolution] * 3]}"
179
+
180
+ h = patchify(x, self.patch_size)
181
+ h = h.view(*h.shape[:2], -1).permute(0, 2, 1).contiguous()
182
+
183
+ h = self.input_layer(h)
184
+ h = h + self.pos_emb[None]
185
+ t_emb = self.t_embedder(t)
186
+ if self.share_mod:
187
+ t_emb = self.adaLN_modulation(t_emb)
188
+ t_emb = t_emb.type(self.dtype)
189
+ h = h.type(self.dtype)
190
+ cond = cond.type(self.dtype)
191
+ for block in self.blocks:
192
+ h = block(h, t_emb, cond)
193
+ h = h.type(x.dtype)
194
+ h = F.layer_norm(h, h.shape[-1:])
195
+ h = self.out_layer(h)
196
+
197
+ h = h.permute(0, 2, 1).view(h.shape[0], h.shape[2], *[self.resolution // self.patch_size] * 3)
198
+ h = unpatchify(h, self.patch_size).contiguous()
199
+
200
+ return h
Amodal3R/models/sparse_structure_flow_doubleattn_weighted.py CHANGED
@@ -1,3 +1,207 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b50b5a0e1bf2fb4e26dfd3a6beb8c5f0ef4687970e315201096e26ba7e8b29c6
3
- size 7851
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ..modules.utils import convert_module_to_f16, convert_module_to_f32
7
+ from ..modules.transformer import AbsolutePositionEmbedder, ModulatedTransformerCrossBlock, ModulatedTransformerCrossBlockMaskAsCondWeighted
8
+ from ..modules.spatial import patchify, unpatchify
9
+
10
+
11
+ class TimestepEmbedder(nn.Module):
12
+ """
13
+ Embeds scalar timesteps into vector representations.
14
+ """
15
+ def __init__(self, hidden_size, frequency_embedding_size=256):
16
+ super().__init__()
17
+ self.mlp = nn.Sequential(
18
+ nn.Linear(frequency_embedding_size, hidden_size, bias=True),
19
+ nn.SiLU(),
20
+ nn.Linear(hidden_size, hidden_size, bias=True),
21
+ )
22
+ self.frequency_embedding_size = frequency_embedding_size
23
+
24
+ @staticmethod
25
+ def timestep_embedding(t, dim, max_period=10000):
26
+ """
27
+ Create sinusoidal timestep embeddings.
28
+
29
+ Args:
30
+ t: a 1-D Tensor of N indices, one per batch element.
31
+ These may be fractional.
32
+ dim: the dimension of the output.
33
+ max_period: controls the minimum frequency of the embeddings.
34
+
35
+ Returns:
36
+ an (N, D) Tensor of positional embeddings.
37
+ """
38
+ # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
39
+ half = dim // 2
40
+ freqs = torch.exp(
41
+ -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
42
+ ).to(device=t.device)
43
+ args = t[:, None].float() * freqs[None]
44
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
45
+ if dim % 2:
46
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
47
+ return embedding
48
+
49
+ def forward(self, t):
50
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
51
+ t_emb = self.mlp(t_freq)
52
+ return t_emb
53
+
54
+
55
+ class SparseStructureFlowModelMaskAsCondWeighted(nn.Module):
56
+ def __init__(
57
+ self,
58
+ resolution: int,
59
+ in_channels: int,
60
+ model_channels: int,
61
+ cond_channels: int,
62
+ out_channels: int,
63
+ num_blocks: int,
64
+ num_heads: Optional[int] = None,
65
+ num_head_channels: Optional[int] = 64,
66
+ mlp_ratio: float = 4,
67
+ patch_size: int = 2,
68
+ pe_mode: Literal["ape", "rope"] = "ape",
69
+ use_fp16: bool = False,
70
+ use_checkpoint: bool = False,
71
+ share_mod: bool = False,
72
+ qk_rms_norm: bool = False,
73
+ qk_rms_norm_cross: bool = False,
74
+ mask_cond_type: Literal["mask_transformer", "mask_patcher"] = "mask_transformer",
75
+ ):
76
+ super().__init__()
77
+ self.resolution = resolution
78
+ self.in_channels = in_channels
79
+ self.model_channels = model_channels
80
+ self.cond_channels = cond_channels
81
+ self.out_channels = out_channels
82
+ self.num_blocks = num_blocks
83
+ self.num_heads = num_heads or model_channels // num_head_channels
84
+ self.mlp_ratio = mlp_ratio
85
+ self.patch_size = patch_size
86
+ self.pe_mode = pe_mode
87
+ self.use_fp16 = use_fp16
88
+ self.use_checkpoint = use_checkpoint
89
+ self.share_mod = share_mod
90
+ self.qk_rms_norm = qk_rms_norm
91
+ self.qk_rms_norm_cross = qk_rms_norm_cross
92
+ self.dtype = torch.float16 if use_fp16 else torch.float32
93
+ self.mask_cond_type = mask_cond_type
94
+ self.t_embedder = TimestepEmbedder(model_channels)
95
+ if share_mod:
96
+ self.adaLN_modulation = nn.Sequential(
97
+ nn.SiLU(),
98
+ nn.Linear(model_channels, 6 * model_channels, bias=True)
99
+ )
100
+
101
+ if pe_mode == "ape":
102
+ pos_embedder = AbsolutePositionEmbedder(model_channels, 3)
103
+ coords = torch.meshgrid(*[torch.arange(res, device=self.device) for res in [resolution // patch_size] * 3], indexing='ij')
104
+ coords = torch.stack(coords, dim=-1).reshape(-1, 3)
105
+ pos_emb = pos_embedder(coords)
106
+ self.register_buffer("pos_emb", pos_emb)
107
+
108
+ self.input_layer = nn.Linear(in_channels * patch_size**3, model_channels)
109
+
110
+ self.blocks = nn.ModuleList([
111
+ ModulatedTransformerCrossBlockMaskAsCondWeighted(
112
+ model_channels,
113
+ cond_channels,
114
+ num_heads=self.num_heads,
115
+ mlp_ratio=self.mlp_ratio,
116
+ attn_mode='full',
117
+ use_checkpoint=self.use_checkpoint,
118
+ use_rope=(pe_mode == "rope"),
119
+ share_mod=share_mod,
120
+ qk_rms_norm=self.qk_rms_norm,
121
+ qk_rms_norm_cross=self.qk_rms_norm_cross,
122
+ )
123
+ for _ in range(num_blocks)
124
+ ])
125
+
126
+ self.out_layer = nn.Linear(model_channels, out_channels * patch_size**3)
127
+
128
+ self.initialize_weights()
129
+ if use_fp16:
130
+ self.convert_to_fp16()
131
+
132
+ @property
133
+ def device(self) -> torch.device:
134
+ """
135
+ Return the device of the model.
136
+ """
137
+ return next(self.parameters()).device
138
+
139
+ def convert_to_fp16(self) -> None:
140
+ """
141
+ Convert the torso of the model to float16.
142
+ """
143
+ self.blocks.apply(convert_module_to_f16)
144
+
145
+ def convert_to_fp32(self) -> None:
146
+ """
147
+ Convert the torso of the model to float32.
148
+ """
149
+ self.blocks.apply(convert_module_to_f32)
150
+
151
+ def initialize_weights(self) -> None:
152
+ # Initialize transformer layers:
153
+ def _basic_init(module):
154
+ if isinstance(module, nn.Linear):
155
+ torch.nn.init.xavier_uniform_(module.weight)
156
+ if module.bias is not None:
157
+ nn.init.constant_(module.bias, 0)
158
+ self.apply(_basic_init)
159
+
160
+ # Initialize timestep embedding MLP:
161
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
162
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
163
+
164
+ # Zero-out adaLN modulation layers in DiT blocks:
165
+ if self.share_mod:
166
+ nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
167
+ nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
168
+ else:
169
+ for block in self.blocks:
170
+ nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
171
+ nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
172
+
173
+ # Zero-out output layers:
174
+ nn.init.constant_(self.out_layer.weight, 0)
175
+ nn.init.constant_(self.out_layer.bias, 0)
176
+
177
+ def forward(self, x: torch.Tensor, t: torch.Tensor, cond: torch.Tensor) -> torch.Tensor:
178
+ assert [*x.shape] == [x.shape[0], self.in_channels, *[self.resolution] * 3], \
179
+ f"Input shape mismatch, got {x.shape}, expected {[x.shape[0], self.in_channels, *[self.resolution] * 3]}"
180
+
181
+ h = patchify(x, self.patch_size)
182
+ h = h.view(*h.shape[:2], -1).permute(0, 2, 1).contiguous()
183
+
184
+ h = self.input_layer(h)
185
+ h = h + self.pos_emb[None]
186
+ t_emb = self.t_embedder(t)
187
+ if self.share_mod:
188
+ t_emb = self.adaLN_modulation(t_emb)
189
+ t_emb = t_emb.type(self.dtype)
190
+ h = h.type(self.dtype)
191
+ cond_split = {}
192
+
193
+ assert cond.shape[1] == 1374+37*37+37*37, "cond shape mismatch"
194
+ cond_split['cond'] = cond[:,:1374,:].type(self.dtype)
195
+ cond_split['mask'] = cond[:,1374:1374+37*37,0:1].type(self.dtype)
196
+ cond_split['mask_occ'] = cond[:,1374+37*37:,:].type(self.dtype)
197
+
198
+ for block in self.blocks:
199
+ h = block(h, t_emb, cond_split)
200
+ h = h.type(x.dtype)
201
+ h = F.layer_norm(h, h.shape[-1:])
202
+ h = self.out_layer(h)
203
+
204
+ h = h.permute(0, 2, 1).view(h.shape[0], h.shape[2], *[self.resolution // self.patch_size] * 3)
205
+ h = unpatchify(h, self.patch_size).contiguous()
206
+
207
+ return h
Amodal3R/models/sparse_structure_vae.py CHANGED
@@ -1,3 +1,306 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d32e4490b2f5356b72229ee7c8fb25702238b4f0877c49f1cae15ddc9ca615bc
3
- size 9886
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from ..modules.norm import GroupNorm32, ChannelLayerNorm32
6
+ from ..modules.spatial import pixel_shuffle_3d
7
+ from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
8
+
9
+
10
+ def norm_layer(norm_type: str, *args, **kwargs) -> nn.Module:
11
+ """
12
+ Return a normalization layer.
13
+ """
14
+ if norm_type == "group":
15
+ return GroupNorm32(32, *args, **kwargs)
16
+ elif norm_type == "layer":
17
+ return ChannelLayerNorm32(*args, **kwargs)
18
+ else:
19
+ raise ValueError(f"Invalid norm type {norm_type}")
20
+
21
+
22
+ class ResBlock3d(nn.Module):
23
+ def __init__(
24
+ self,
25
+ channels: int,
26
+ out_channels: Optional[int] = None,
27
+ norm_type: Literal["group", "layer"] = "layer",
28
+ ):
29
+ super().__init__()
30
+ self.channels = channels
31
+ self.out_channels = out_channels or channels
32
+
33
+ self.norm1 = norm_layer(norm_type, channels)
34
+ self.norm2 = norm_layer(norm_type, self.out_channels)
35
+ self.conv1 = nn.Conv3d(channels, self.out_channels, 3, padding=1)
36
+ self.conv2 = zero_module(nn.Conv3d(self.out_channels, self.out_channels, 3, padding=1))
37
+ self.skip_connection = nn.Conv3d(channels, self.out_channels, 1) if channels != self.out_channels else nn.Identity()
38
+
39
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
40
+ h = self.norm1(x)
41
+ h = F.silu(h)
42
+ h = self.conv1(h)
43
+ h = self.norm2(h)
44
+ h = F.silu(h)
45
+ h = self.conv2(h)
46
+ h = h + self.skip_connection(x)
47
+ return h
48
+
49
+
50
+ class DownsampleBlock3d(nn.Module):
51
+ def __init__(
52
+ self,
53
+ in_channels: int,
54
+ out_channels: int,
55
+ mode: Literal["conv", "avgpool"] = "conv",
56
+ ):
57
+ assert mode in ["conv", "avgpool"], f"Invalid mode {mode}"
58
+
59
+ super().__init__()
60
+ self.in_channels = in_channels
61
+ self.out_channels = out_channels
62
+
63
+ if mode == "conv":
64
+ self.conv = nn.Conv3d(in_channels, out_channels, 2, stride=2)
65
+ elif mode == "avgpool":
66
+ assert in_channels == out_channels, "Pooling mode requires in_channels to be equal to out_channels"
67
+
68
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
69
+ if hasattr(self, "conv"):
70
+ return self.conv(x)
71
+ else:
72
+ return F.avg_pool3d(x, 2)
73
+
74
+
75
+ class UpsampleBlock3d(nn.Module):
76
+ def __init__(
77
+ self,
78
+ in_channels: int,
79
+ out_channels: int,
80
+ mode: Literal["conv", "nearest"] = "conv",
81
+ ):
82
+ assert mode in ["conv", "nearest"], f"Invalid mode {mode}"
83
+
84
+ super().__init__()
85
+ self.in_channels = in_channels
86
+ self.out_channels = out_channels
87
+
88
+ if mode == "conv":
89
+ self.conv = nn.Conv3d(in_channels, out_channels*8, 3, padding=1)
90
+ elif mode == "nearest":
91
+ assert in_channels == out_channels, "Nearest mode requires in_channels to be equal to out_channels"
92
+
93
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
94
+ if hasattr(self, "conv"):
95
+ x = self.conv(x)
96
+ return pixel_shuffle_3d(x, 2)
97
+ else:
98
+ return F.interpolate(x, scale_factor=2, mode="nearest")
99
+
100
+
101
+ class SparseStructureEncoder(nn.Module):
102
+ """
103
+ Encoder for Sparse Structure (\mathcal{E}_S in the paper Sec. 3.3).
104
+
105
+ Args:
106
+ in_channels (int): Channels of the input.
107
+ latent_channels (int): Channels of the latent representation.
108
+ num_res_blocks (int): Number of residual blocks at each resolution.
109
+ channels (List[int]): Channels of the encoder blocks.
110
+ num_res_blocks_middle (int): Number of residual blocks in the middle.
111
+ norm_type (Literal["group", "layer"]): Type of normalization layer.
112
+ use_fp16 (bool): Whether to use FP16.
113
+ """
114
+ def __init__(
115
+ self,
116
+ in_channels: int,
117
+ latent_channels: int,
118
+ num_res_blocks: int,
119
+ channels: List[int],
120
+ num_res_blocks_middle: int = 2,
121
+ norm_type: Literal["group", "layer"] = "layer",
122
+ use_fp16: bool = False,
123
+ ):
124
+ super().__init__()
125
+ self.in_channels = in_channels
126
+ self.latent_channels = latent_channels
127
+ self.num_res_blocks = num_res_blocks
128
+ self.channels = channels
129
+ self.num_res_blocks_middle = num_res_blocks_middle
130
+ self.norm_type = norm_type
131
+ self.use_fp16 = use_fp16
132
+ self.dtype = torch.float16 if use_fp16 else torch.float32
133
+
134
+ self.input_layer = nn.Conv3d(in_channels, channels[0], 3, padding=1)
135
+
136
+ self.blocks = nn.ModuleList([])
137
+ for i, ch in enumerate(channels):
138
+ self.blocks.extend([
139
+ ResBlock3d(ch, ch)
140
+ for _ in range(num_res_blocks)
141
+ ])
142
+ if i < len(channels) - 1:
143
+ self.blocks.append(
144
+ DownsampleBlock3d(ch, channels[i+1])
145
+ )
146
+
147
+ self.middle_block = nn.Sequential(*[
148
+ ResBlock3d(channels[-1], channels[-1])
149
+ for _ in range(num_res_blocks_middle)
150
+ ])
151
+
152
+ self.out_layer = nn.Sequential(
153
+ norm_layer(norm_type, channels[-1]),
154
+ nn.SiLU(),
155
+ nn.Conv3d(channels[-1], latent_channels*2, 3, padding=1)
156
+ )
157
+
158
+ if use_fp16:
159
+ self.convert_to_fp16()
160
+
161
+ @property
162
+ def device(self) -> torch.device:
163
+ """
164
+ Return the device of the model.
165
+ """
166
+ return next(self.parameters()).device
167
+
168
+ def convert_to_fp16(self) -> None:
169
+ """
170
+ Convert the torso of the model to float16.
171
+ """
172
+ self.use_fp16 = True
173
+ self.dtype = torch.float16
174
+ self.blocks.apply(convert_module_to_f16)
175
+ self.middle_block.apply(convert_module_to_f16)
176
+
177
+ def convert_to_fp32(self) -> None:
178
+ """
179
+ Convert the torso of the model to float32.
180
+ """
181
+ self.use_fp16 = False
182
+ self.dtype = torch.float32
183
+ self.blocks.apply(convert_module_to_f32)
184
+ self.middle_block.apply(convert_module_to_f32)
185
+
186
+ def forward(self, x: torch.Tensor, sample_posterior: bool = False, return_raw: bool = False) -> torch.Tensor:
187
+ h = self.input_layer(x)
188
+ h = h.type(self.dtype)
189
+
190
+ for block in self.blocks:
191
+ h = block(h)
192
+ h = self.middle_block(h)
193
+
194
+ h = h.type(x.dtype)
195
+ h = self.out_layer(h)
196
+
197
+ mean, logvar = h.chunk(2, dim=1)
198
+
199
+ if sample_posterior:
200
+ std = torch.exp(0.5 * logvar)
201
+ z = mean + std * torch.randn_like(std)
202
+ else:
203
+ z = mean
204
+
205
+ if return_raw:
206
+ return z, mean, logvar
207
+ return z
208
+
209
+
210
+ class SparseStructureDecoder(nn.Module):
211
+ """
212
+ Decoder for Sparse Structure (\mathcal{D}_S in the paper Sec. 3.3).
213
+
214
+ Args:
215
+ out_channels (int): Channels of the output.
216
+ latent_channels (int): Channels of the latent representation.
217
+ num_res_blocks (int): Number of residual blocks at each resolution.
218
+ channels (List[int]): Channels of the decoder blocks.
219
+ num_res_blocks_middle (int): Number of residual blocks in the middle.
220
+ norm_type (Literal["group", "layer"]): Type of normalization layer.
221
+ use_fp16 (bool): Whether to use FP16.
222
+ """
223
+ def __init__(
224
+ self,
225
+ out_channels: int,
226
+ latent_channels: int,
227
+ num_res_blocks: int,
228
+ channels: List[int],
229
+ num_res_blocks_middle: int = 2,
230
+ norm_type: Literal["group", "layer"] = "layer",
231
+ use_fp16: bool = False,
232
+ ):
233
+ super().__init__()
234
+ self.out_channels = out_channels
235
+ self.latent_channels = latent_channels
236
+ self.num_res_blocks = num_res_blocks
237
+ self.channels = channels
238
+ self.num_res_blocks_middle = num_res_blocks_middle
239
+ self.norm_type = norm_type
240
+ self.use_fp16 = use_fp16
241
+ self.dtype = torch.float16 if use_fp16 else torch.float32
242
+
243
+ self.input_layer = nn.Conv3d(latent_channels, channels[0], 3, padding=1)
244
+
245
+ self.middle_block = nn.Sequential(*[
246
+ ResBlock3d(channels[0], channels[0])
247
+ for _ in range(num_res_blocks_middle)
248
+ ])
249
+
250
+ self.blocks = nn.ModuleList([])
251
+ for i, ch in enumerate(channels):
252
+ self.blocks.extend([
253
+ ResBlock3d(ch, ch)
254
+ for _ in range(num_res_blocks)
255
+ ])
256
+ if i < len(channels) - 1:
257
+ self.blocks.append(
258
+ UpsampleBlock3d(ch, channels[i+1])
259
+ )
260
+
261
+ self.out_layer = nn.Sequential(
262
+ norm_layer(norm_type, channels[-1]),
263
+ nn.SiLU(),
264
+ nn.Conv3d(channels[-1], out_channels, 3, padding=1)
265
+ )
266
+
267
+ if use_fp16:
268
+ self.convert_to_fp16()
269
+
270
+ @property
271
+ def device(self) -> torch.device:
272
+ """
273
+ Return the device of the model.
274
+ """
275
+ return next(self.parameters()).device
276
+
277
+ def convert_to_fp16(self) -> None:
278
+ """
279
+ Convert the torso of the model to float16.
280
+ """
281
+ self.use_fp16 = True
282
+ self.dtype = torch.float16
283
+ self.blocks.apply(convert_module_to_f16)
284
+ self.middle_block.apply(convert_module_to_f16)
285
+
286
+ def convert_to_fp32(self) -> None:
287
+ """
288
+ Convert the torso of the model to float32.
289
+ """
290
+ self.use_fp16 = False
291
+ self.dtype = torch.float32
292
+ self.blocks.apply(convert_module_to_f32)
293
+ self.middle_block.apply(convert_module_to_f32)
294
+
295
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
296
+ h = self.input_layer(x)
297
+
298
+ h = h.type(self.dtype)
299
+
300
+ h = self.middle_block(h)
301
+ for block in self.blocks:
302
+ h = block(h)
303
+
304
+ h = h.type(x.dtype)
305
+ h = self.out_layer(h)
306
+ return h
Amodal3R/models/structured_latent_flow.py CHANGED
@@ -1,3 +1,265 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e5f35fdb4cc9b6f7492e76903f182be06095ee6ddc39f8d55e32e9cba45cb246
3
- size 9832
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
7
+ from ..modules.transformer import AbsolutePositionEmbedder
8
+ from ..modules.norm import LayerNorm32
9
+ from ..modules import sparse as sp
10
+ from ..modules.sparse.transformer import ModulatedSparseTransformerCrossBlock
11
+ from .sparse_structure_flow import TimestepEmbedder
12
+
13
+
14
+ class SparseResBlock3d(nn.Module):
15
+ def __init__(
16
+ self,
17
+ channels: int,
18
+ emb_channels: int,
19
+ out_channels: Optional[int] = None,
20
+ downsample: bool = False,
21
+ upsample: bool = False,
22
+ ):
23
+ super().__init__()
24
+ self.channels = channels
25
+ self.emb_channels = emb_channels
26
+ self.out_channels = out_channels or channels
27
+ self.downsample = downsample
28
+ self.upsample = upsample
29
+
30
+ assert not (downsample and upsample), "Cannot downsample and upsample at the same time"
31
+
32
+ self.norm1 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
33
+ self.norm2 = LayerNorm32(self.out_channels, elementwise_affine=False, eps=1e-6)
34
+ self.conv1 = sp.SparseConv3d(channels, self.out_channels, 3)
35
+ self.conv2 = zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3))
36
+ self.emb_layers = nn.Sequential(
37
+ nn.SiLU(),
38
+ nn.Linear(emb_channels, 2 * self.out_channels, bias=True),
39
+ )
40
+ self.skip_connection = sp.SparseLinear(channels, self.out_channels) if channels != self.out_channels else nn.Identity()
41
+ self.updown = None
42
+ if self.downsample:
43
+ self.updown = sp.SparseDownsample(2)
44
+ elif self.upsample:
45
+ self.updown = sp.SparseUpsample(2)
46
+
47
+ def _updown(self, x: sp.SparseTensor) -> sp.SparseTensor:
48
+ if self.updown is not None:
49
+ x = self.updown(x)
50
+ return x
51
+
52
+ def forward(self, x: sp.SparseTensor, emb: torch.Tensor) -> sp.SparseTensor:
53
+ emb_out = self.emb_layers(emb).type(x.dtype)
54
+ scale, shift = torch.chunk(emb_out, 2, dim=1)
55
+
56
+ x = self._updown(x)
57
+ h = x.replace(self.norm1(x.feats))
58
+ h = h.replace(F.silu(h.feats))
59
+ # print(h.feats.dtype)
60
+ # print(self.conv1.conv.weight.dtype)
61
+ # exit(0)
62
+ h = self.conv1(h)
63
+ h = h.replace(self.norm2(h.feats)) * (1 + scale) + shift
64
+ h = h.replace(F.silu(h.feats))
65
+ h = self.conv2(h)
66
+ h = h + self.skip_connection(x)
67
+
68
+ return h
69
+
70
+
71
+ class SLatFlowModel(nn.Module):
72
+ def __init__(
73
+ self,
74
+ resolution: int,
75
+ in_channels: int,
76
+ model_channels: int,
77
+ cond_channels: int,
78
+ out_channels: int,
79
+ num_blocks: int,
80
+ num_heads: Optional[int] = None,
81
+ num_head_channels: Optional[int] = 64,
82
+ mlp_ratio: float = 4,
83
+ patch_size: int = 2,
84
+ num_io_res_blocks: int = 2,
85
+ io_block_channels: List[int] = None,
86
+ pe_mode: Literal["ape", "rope"] = "ape",
87
+ use_fp16: bool = False,
88
+ use_checkpoint: bool = False,
89
+ use_skip_connection: bool = True,
90
+ share_mod: bool = False,
91
+ qk_rms_norm: bool = False,
92
+ qk_rms_norm_cross: bool = False,
93
+ ):
94
+ super().__init__()
95
+ self.resolution = resolution
96
+ self.in_channels = in_channels
97
+ self.model_channels = model_channels
98
+ self.cond_channels = cond_channels
99
+ self.out_channels = out_channels
100
+ self.num_blocks = num_blocks
101
+ self.num_heads = num_heads or model_channels // num_head_channels
102
+ self.mlp_ratio = mlp_ratio
103
+ self.patch_size = patch_size
104
+ self.num_io_res_blocks = num_io_res_blocks
105
+ self.io_block_channels = io_block_channels
106
+ self.pe_mode = pe_mode
107
+ self.use_fp16 = use_fp16
108
+ self.use_checkpoint = use_checkpoint
109
+ self.use_skip_connection = use_skip_connection
110
+ self.share_mod = share_mod
111
+ self.qk_rms_norm = qk_rms_norm
112
+ self.qk_rms_norm_cross = qk_rms_norm_cross
113
+ self.dtype = torch.float16 if use_fp16 else torch.float32
114
+
115
+ assert int(np.log2(patch_size)) == np.log2(patch_size), "Patch size must be a power of 2"
116
+ assert np.log2(patch_size) == len(io_block_channels), "Number of IO ResBlocks must match the number of stages"
117
+
118
+ self.t_embedder = TimestepEmbedder(model_channels)
119
+ if share_mod:
120
+ self.adaLN_modulation = nn.Sequential(
121
+ nn.SiLU(),
122
+ nn.Linear(model_channels, 6 * model_channels, bias=True)
123
+ )
124
+
125
+ if pe_mode == "ape":
126
+ self.pos_embedder = AbsolutePositionEmbedder(model_channels)
127
+
128
+ self.input_layer = sp.SparseLinear(in_channels, io_block_channels[0])
129
+ self.input_blocks = nn.ModuleList([])
130
+ for chs, next_chs in zip(io_block_channels, io_block_channels[1:] + [model_channels]):
131
+ self.input_blocks.extend([
132
+ SparseResBlock3d(
133
+ chs,
134
+ model_channels,
135
+ out_channels=chs,
136
+ )
137
+ for _ in range(num_io_res_blocks-1)
138
+ ])
139
+ self.input_blocks.append(
140
+ SparseResBlock3d(
141
+ chs,
142
+ model_channels,
143
+ out_channels=next_chs,
144
+ downsample=True,
145
+ )
146
+ )
147
+
148
+ self.blocks = nn.ModuleList([
149
+ ModulatedSparseTransformerCrossBlock(
150
+ model_channels,
151
+ cond_channels,
152
+ num_heads=self.num_heads,
153
+ mlp_ratio=self.mlp_ratio,
154
+ attn_mode='full',
155
+ use_checkpoint=self.use_checkpoint,
156
+ use_rope=(pe_mode == "rope"),
157
+ share_mod=self.share_mod,
158
+ qk_rms_norm=self.qk_rms_norm,
159
+ qk_rms_norm_cross=self.qk_rms_norm_cross,
160
+ )
161
+ for _ in range(num_blocks)
162
+ ])
163
+
164
+ self.out_blocks = nn.ModuleList([])
165
+ for chs, prev_chs in zip(reversed(io_block_channels), [model_channels] + list(reversed(io_block_channels[1:]))):
166
+ self.out_blocks.append(
167
+ SparseResBlock3d(
168
+ prev_chs * 2 if self.use_skip_connection else prev_chs,
169
+ model_channels,
170
+ out_channels=chs,
171
+ upsample=True,
172
+ )
173
+ )
174
+ self.out_blocks.extend([
175
+ SparseResBlock3d(
176
+ chs * 2 if self.use_skip_connection else chs,
177
+ model_channels,
178
+ out_channels=chs,
179
+ )
180
+ for _ in range(num_io_res_blocks-1)
181
+ ])
182
+ self.out_layer = sp.SparseLinear(io_block_channels[0], out_channels)
183
+
184
+ self.initialize_weights()
185
+ if use_fp16:
186
+ self.convert_to_fp16()
187
+
188
+ @property
189
+ def device(self) -> torch.device:
190
+ """
191
+ Return the device of the model.
192
+ """
193
+ return next(self.parameters()).device
194
+
195
+ def convert_to_fp16(self) -> None:
196
+ """
197
+ Convert the torso of the model to float16.
198
+ """
199
+ self.input_blocks.apply(convert_module_to_f16)
200
+ self.blocks.apply(convert_module_to_f16)
201
+ self.out_blocks.apply(convert_module_to_f16)
202
+
203
+ def convert_to_fp32(self) -> None:
204
+ """
205
+ Convert the torso of the model to float32.
206
+ """
207
+ self.input_blocks.apply(convert_module_to_f32)
208
+ self.blocks.apply(convert_module_to_f32)
209
+ self.out_blocks.apply(convert_module_to_f32)
210
+
211
+ def initialize_weights(self) -> None:
212
+ # Initialize transformer layers:
213
+ def _basic_init(module):
214
+ if isinstance(module, nn.Linear):
215
+ torch.nn.init.xavier_uniform_(module.weight)
216
+ if module.bias is not None:
217
+ nn.init.constant_(module.bias, 0)
218
+ self.apply(_basic_init)
219
+
220
+ # Initialize timestep embedding MLP:
221
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
222
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
223
+
224
+ # Zero-out adaLN modulation layers in DiT blocks:
225
+ if self.share_mod:
226
+ nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
227
+ nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
228
+ else:
229
+ for block in self.blocks:
230
+ nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
231
+ nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
232
+
233
+ # Zero-out output layers:
234
+ nn.init.constant_(self.out_layer.weight, 0)
235
+ nn.init.constant_(self.out_layer.bias, 0)
236
+
237
+ def forward(self, x: sp.SparseTensor, t: torch.Tensor, cond: torch.Tensor) -> sp.SparseTensor:
238
+ h = self.input_layer(x).type(self.dtype)
239
+ t_emb = self.t_embedder(t)
240
+ if self.share_mod:
241
+ t_emb = self.adaLN_modulation(t_emb)
242
+ t_emb = t_emb.type(self.dtype)
243
+ cond = cond.type(self.dtype)
244
+
245
+ skips = []
246
+ # pack with input blocks
247
+ for block in self.input_blocks:
248
+ h = block(h, t_emb)
249
+ skips.append(h.feats)
250
+
251
+ if self.pe_mode == "ape":
252
+ h = h + self.pos_embedder(h.coords[:, 1:]).type(self.dtype)
253
+ for block in self.blocks:
254
+ h = block(h, t_emb, cond)
255
+
256
+ # unpack with output blocks
257
+ for block, skip in zip(self.out_blocks, reversed(skips)):
258
+ if self.use_skip_connection:
259
+ h = block(h.replace(torch.cat([h.feats, skip], dim=1)), t_emb)
260
+ else:
261
+ h = block(h, t_emb)
262
+
263
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
264
+ h = self.out_layer(h.type(x.dtype))
265
+ return h
Amodal3R/models/structured_latent_flow_doubleattn_weighted.py CHANGED
@@ -1,3 +1,273 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e04c4d99d0b64c7fa135746b8b11b12fe73faecc4fe32b6d9d60a7f4e95232a1
3
- size 10500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
7
+ from ..modules.transformer import AbsolutePositionEmbedder
8
+ from ..modules.norm import LayerNorm32
9
+ from ..modules import sparse as sp
10
+ from ..modules.sparse.transformer import ModulatedSparseTransformerCrossBlock, ModulatedSparseTransformerCrossBlockMaskAsCondWeighted
11
+ from .sparse_structure_flow import TimestepEmbedder
12
+
13
+
14
+ class SparseResBlock3d(nn.Module):
15
+ def __init__(
16
+ self,
17
+ channels: int,
18
+ emb_channels: int,
19
+ out_channels: Optional[int] = None,
20
+ downsample: bool = False,
21
+ upsample: bool = False,
22
+ ):
23
+ super().__init__()
24
+ self.channels = channels
25
+ self.emb_channels = emb_channels
26
+ self.out_channels = out_channels or channels
27
+ self.downsample = downsample
28
+ self.upsample = upsample
29
+
30
+ assert not (downsample and upsample), "Cannot downsample and upsample at the same time"
31
+
32
+ self.norm1 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
33
+ self.norm2 = LayerNorm32(self.out_channels, elementwise_affine=False, eps=1e-6)
34
+ self.conv1 = sp.SparseConv3d(channels, self.out_channels, 3)
35
+ self.conv2 = zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3))
36
+ self.emb_layers = nn.Sequential(
37
+ nn.SiLU(),
38
+ nn.Linear(emb_channels, 2 * self.out_channels, bias=True),
39
+ )
40
+ self.skip_connection = sp.SparseLinear(channels, self.out_channels) if channels != self.out_channels else nn.Identity()
41
+ self.updown = None
42
+ if self.downsample:
43
+ self.updown = sp.SparseDownsample(2)
44
+ elif self.upsample:
45
+ self.updown = sp.SparseUpsample(2)
46
+
47
+ def _updown(self, x: sp.SparseTensor) -> sp.SparseTensor:
48
+ if self.updown is not None:
49
+ x = self.updown(x)
50
+ return x
51
+
52
+ def forward(self, x: sp.SparseTensor, emb: torch.Tensor) -> sp.SparseTensor:
53
+ emb_out = self.emb_layers(emb).type(x.dtype)
54
+ scale, shift = torch.chunk(emb_out, 2, dim=1)
55
+
56
+ x = self._updown(x)
57
+ h = x.replace(self.norm1(x.feats))
58
+ h = h.replace(F.silu(h.feats))
59
+ h = self.conv1(h)
60
+ h = h.replace(self.norm2(h.feats)) * (1 + scale) + shift
61
+ h = h.replace(F.silu(h.feats))
62
+ h = self.conv2(h)
63
+ h = h + self.skip_connection(x)
64
+
65
+ return h
66
+
67
+
68
+ class SLatFlowModelMaskAsCondWeighted(nn.Module):
69
+ def __init__(
70
+ self,
71
+ resolution: int,
72
+ in_channels: int,
73
+ model_channels: int,
74
+ cond_channels: int,
75
+ out_channels: int,
76
+ num_blocks: int,
77
+ num_heads: Optional[int] = None,
78
+ num_head_channels: Optional[int] = 64,
79
+ mlp_ratio: float = 4,
80
+ patch_size: int = 2,
81
+ num_io_res_blocks: int = 2,
82
+ io_block_channels: List[int] = None,
83
+ pe_mode: Literal["ape", "rope"] = "ape",
84
+ use_fp16: bool = False,
85
+ use_checkpoint: bool = False,
86
+ use_skip_connection: bool = True,
87
+ share_mod: bool = False,
88
+ qk_rms_norm: bool = False,
89
+ qk_rms_norm_cross: bool = False,
90
+ mask_cond_type: Literal["mask_transformer", "mask_patcher"] = "mask_transformer",
91
+ ):
92
+ super().__init__()
93
+ self.resolution = resolution
94
+ self.in_channels = in_channels
95
+ self.model_channels = model_channels
96
+ self.cond_channels = cond_channels
97
+ self.out_channels = out_channels
98
+ self.num_blocks = num_blocks
99
+ self.num_heads = num_heads or model_channels // num_head_channels
100
+ self.mlp_ratio = mlp_ratio
101
+ self.patch_size = patch_size
102
+ self.num_io_res_blocks = num_io_res_blocks
103
+ self.io_block_channels = io_block_channels
104
+ self.pe_mode = pe_mode
105
+ self.use_fp16 = use_fp16
106
+ self.use_checkpoint = use_checkpoint
107
+ self.use_skip_connection = use_skip_connection
108
+ self.share_mod = share_mod
109
+ self.qk_rms_norm = qk_rms_norm
110
+ self.qk_rms_norm_cross = qk_rms_norm_cross
111
+ self.dtype = torch.float16 if use_fp16 else torch.float32
112
+ self.mask_cond_type = mask_cond_type
113
+
114
+ assert int(np.log2(patch_size)) == np.log2(patch_size), "Patch size must be a power of 2"
115
+ assert np.log2(patch_size) == len(io_block_channels), "Number of IO ResBlocks must match the number of stages"
116
+
117
+ self.t_embedder = TimestepEmbedder(model_channels)
118
+ if share_mod:
119
+ self.adaLN_modulation = nn.Sequential(
120
+ nn.SiLU(),
121
+ nn.Linear(model_channels, 6 * model_channels, bias=True)
122
+ )
123
+
124
+ if pe_mode == "ape":
125
+ self.pos_embedder = AbsolutePositionEmbedder(model_channels)
126
+
127
+ self.input_layer = sp.SparseLinear(in_channels, io_block_channels[0])
128
+ self.input_blocks = nn.ModuleList([])
129
+ for chs, next_chs in zip(io_block_channels, io_block_channels[1:] + [model_channels]):
130
+ self.input_blocks.extend([
131
+ SparseResBlock3d(
132
+ chs,
133
+ model_channels,
134
+ out_channels=chs,
135
+ )
136
+ for _ in range(num_io_res_blocks-1)
137
+ ])
138
+ self.input_blocks.append(
139
+ SparseResBlock3d(
140
+ chs,
141
+ model_channels,
142
+ out_channels=next_chs,
143
+ downsample=True,
144
+ )
145
+ )
146
+
147
+ self.blocks = nn.ModuleList([
148
+ ModulatedSparseTransformerCrossBlockMaskAsCondWeighted(
149
+ model_channels,
150
+ cond_channels,
151
+ num_heads=self.num_heads,
152
+ mlp_ratio=self.mlp_ratio,
153
+ attn_mode='full',
154
+ use_checkpoint=self.use_checkpoint,
155
+ use_rope=(pe_mode == "rope"),
156
+ share_mod=self.share_mod,
157
+ qk_rms_norm=self.qk_rms_norm,
158
+ qk_rms_norm_cross=self.qk_rms_norm_cross,
159
+ )
160
+ for _ in range(num_blocks)
161
+ ])
162
+
163
+ self.out_blocks = nn.ModuleList([])
164
+ for chs, prev_chs in zip(reversed(io_block_channels), [model_channels] + list(reversed(io_block_channels[1:]))):
165
+ self.out_blocks.append(
166
+ SparseResBlock3d(
167
+ prev_chs * 2 if self.use_skip_connection else prev_chs,
168
+ model_channels,
169
+ out_channels=chs,
170
+ upsample=True,
171
+ )
172
+ )
173
+ self.out_blocks.extend([
174
+ SparseResBlock3d(
175
+ chs * 2 if self.use_skip_connection else chs,
176
+ model_channels,
177
+ out_channels=chs,
178
+ )
179
+ for _ in range(num_io_res_blocks-1)
180
+ ])
181
+ self.out_layer = sp.SparseLinear(io_block_channels[0], out_channels)
182
+
183
+ self.initialize_weights()
184
+ if use_fp16:
185
+ self.convert_to_fp16()
186
+
187
+ @property
188
+ def device(self) -> torch.device:
189
+ """
190
+ Return the device of the model.
191
+ """
192
+ return next(self.parameters()).device
193
+
194
+ def convert_to_fp16(self) -> None:
195
+ """
196
+ Convert the torso of the model to float16.
197
+ """
198
+ self.input_blocks.apply(convert_module_to_f16)
199
+ self.blocks.apply(convert_module_to_f16)
200
+ self.out_blocks.apply(convert_module_to_f16)
201
+
202
+ def convert_to_fp32(self) -> None:
203
+ """
204
+ Convert the torso of the model to float32.
205
+ """
206
+ self.input_blocks.apply(convert_module_to_f32)
207
+ self.blocks.apply(convert_module_to_f32)
208
+ self.out_blocks.apply(convert_module_to_f32)
209
+
210
+ def initialize_weights(self) -> None:
211
+ # Initialize transformer layers:
212
+ def _basic_init(module):
213
+ if isinstance(module, nn.Linear):
214
+ torch.nn.init.xavier_uniform_(module.weight)
215
+ if module.bias is not None:
216
+ nn.init.constant_(module.bias, 0)
217
+ self.apply(_basic_init)
218
+
219
+ # Initialize timestep embedding MLP:
220
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
221
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
222
+
223
+ # Zero-out adaLN modulation layers in DiT blocks:
224
+ if self.share_mod:
225
+ nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
226
+ nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
227
+ else:
228
+ for block in self.blocks:
229
+ nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
230
+ nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
231
+
232
+ # Zero-out output layers:
233
+ nn.init.constant_(self.out_layer.weight, 0)
234
+ nn.init.constant_(self.out_layer.bias, 0)
235
+
236
+ def forward(self, x: sp.SparseTensor, t: torch.Tensor, cond: torch.Tensor) -> sp.SparseTensor:
237
+ h = self.input_layer(x).type(self.dtype)
238
+ t_emb = self.t_embedder(t)
239
+ if self.share_mod:
240
+ t_emb = self.adaLN_modulation(t_emb)
241
+ t_emb = t_emb.type(self.dtype)
242
+ cond_split = {}
243
+ if self.mask_cond_type == "mask_transformer":
244
+ cond_split['cond'] = cond[:,:1374,:].type(self.dtype)
245
+ cond_split['mask'] = cond[:,1374:,:].type(self.dtype)
246
+ elif self.mask_cond_type == "mask_patcher":
247
+ assert cond.shape[1] == 1374+37*37+37*37, "cond shape mismatch"
248
+ cond_split['cond'] = cond[:,:1374,:].type(self.dtype)
249
+ cond_split['mask'] = cond[:,1374:1374+37*37,0:1].type(self.dtype)
250
+ cond_split['mask_occ'] = cond[:,1374+37*37:,:].type(self.dtype)
251
+
252
+
253
+ skips = []
254
+ # pack with input blocks
255
+ for block in self.input_blocks:
256
+ h = block(h, t_emb)
257
+ skips.append(h.feats)
258
+
259
+ if self.pe_mode == "ape":
260
+ h = h + self.pos_embedder(h.coords[:, 1:]).type(self.dtype)
261
+ for block in self.blocks:
262
+ h = block(h, t_emb, cond_split)
263
+
264
+ # unpack with output blocks
265
+ for block, skip in zip(self.out_blocks, reversed(skips)):
266
+ if self.use_skip_connection:
267
+ h = block(h.replace(torch.cat([h.feats, skip], dim=1)), t_emb)
268
+ else:
269
+ h = block(h, t_emb)
270
+
271
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
272
+ h = self.out_layer(h.type(x.dtype))
273
+ return h
Amodal3R/models/structured_latent_vae/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:12bd521a23180894ad8f1eb439dad808e280a3efbaf6e8c0f0ec032f5323c4a8
3
- size 119
 
1
+ from .encoder import SLatEncoder
2
+ from .decoder_gs import SLatGaussianDecoder
3
+ from .decoder_mesh import SLatMeshDecoder
Amodal3R/models/structured_latent_vae/base.py CHANGED
@@ -1,3 +1,125 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3cca8187d92667a4596e18c54184a6e1bf3bcfe049244555f9c78b258c4c8b0b
3
- size 4686
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ...modules.utils import convert_module_to_f16, convert_module_to_f32
5
+ from ...modules import sparse as sp
6
+ from ...modules.transformer import AbsolutePositionEmbedder
7
+ from ...modules.sparse.transformer import SparseTransformerBlock, SparseTransformerCrossBlock
8
+
9
+
10
+ def block_attn_config(self):
11
+ """
12
+ Return the attention configuration of the model.
13
+ """
14
+ for i in range(self.num_blocks):
15
+ if self.attn_mode == "shift_window":
16
+ yield "serialized", self.window_size, 0, (16 * (i % 2),) * 3, sp.SerializeMode.Z_ORDER
17
+ elif self.attn_mode == "shift_sequence":
18
+ yield "serialized", self.window_size, self.window_size // 2 * (i % 2), (0, 0, 0), sp.SerializeMode.Z_ORDER
19
+ elif self.attn_mode == "shift_order":
20
+ yield "serialized", self.window_size, 0, (0, 0, 0), sp.SerializeModes[i % 4]
21
+ elif self.attn_mode == "full":
22
+ yield "full", None, None, None, None
23
+ elif self.attn_mode == "swin":
24
+ yield "windowed", self.window_size, None, self.window_size // 2 * (i % 2), None
25
+
26
+
27
+ class SparseTransformerBase(nn.Module):
28
+ """
29
+ Sparse Transformer without output layers.
30
+ Serve as the base class for encoder and decoder.
31
+ """
32
+ def __init__(
33
+ self,
34
+ in_channels: int,
35
+ model_channels: int,
36
+ num_blocks: int,
37
+ num_heads: Optional[int] = None,
38
+ num_head_channels: Optional[int] = 64,
39
+ mlp_ratio: float = 4.0,
40
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
41
+ window_size: Optional[int] = None,
42
+ pe_mode: Literal["ape", "rope"] = "ape",
43
+ use_fp16: bool = False,
44
+ use_checkpoint: bool = False,
45
+ qk_rms_norm: bool = False,
46
+ ):
47
+ super().__init__()
48
+ self.in_channels = in_channels
49
+ self.model_channels = model_channels
50
+ self.num_blocks = num_blocks
51
+ self.window_size = window_size
52
+ self.num_heads = num_heads or model_channels // num_head_channels
53
+ self.mlp_ratio = mlp_ratio
54
+ self.attn_mode = attn_mode
55
+ self.pe_mode = pe_mode
56
+ self.use_fp16 = use_fp16
57
+ self.use_checkpoint = use_checkpoint
58
+ self.qk_rms_norm = qk_rms_norm
59
+ self.dtype = torch.float16 if use_fp16 else torch.float32
60
+
61
+ if pe_mode == "ape":
62
+ self.pos_embedder = AbsolutePositionEmbedder(model_channels)
63
+
64
+ self.input_layer = sp.SparseLinear(in_channels, model_channels)
65
+ self.blocks = nn.ModuleList([
66
+ SparseTransformerBlock(
67
+ model_channels,
68
+ num_heads=self.num_heads,
69
+ mlp_ratio=self.mlp_ratio,
70
+ attn_mode=attn_mode,
71
+ window_size=window_size,
72
+ shift_sequence=shift_sequence,
73
+ shift_window=shift_window,
74
+ serialize_mode=serialize_mode,
75
+ use_checkpoint=self.use_checkpoint,
76
+ use_rope=(pe_mode == "rope"),
77
+ qk_rms_norm=self.qk_rms_norm,
78
+ )
79
+ for attn_mode, window_size, shift_sequence, shift_window, serialize_mode in block_attn_config(self)
80
+ ])
81
+
82
+ @property
83
+ def device(self) -> torch.device:
84
+ """
85
+ Return the device of the model.
86
+ """
87
+ return next(self.parameters()).device
88
+
89
+ def convert_to_fp16(self) -> None:
90
+ """
91
+ Convert the torso of the model to float16.
92
+ """
93
+ self.blocks.apply(convert_module_to_f16)
94
+
95
+ def convert_to_fp32(self) -> None:
96
+ """
97
+ Convert the torso of the model to float32.
98
+ """
99
+ self.blocks.apply(convert_module_to_f32)
100
+
101
+ def initialize_weights(self) -> None:
102
+ # Initialize transformer layers:
103
+ def _basic_init(module):
104
+ if isinstance(module, nn.Linear):
105
+ torch.nn.init.xavier_uniform_(module.weight)
106
+ if module.bias is not None:
107
+ nn.init.constant_(module.bias, 0)
108
+ self.apply(_basic_init)
109
+
110
+ def forward(self, x: sp.SparseTensor, if_return_h: bool = False) -> Union[sp.SparseTensor, Tuple[sp.SparseTensor, sp.SparseTensor]]:
111
+ h = self.input_layer(x)
112
+ if self.pe_mode == "ape":
113
+ h = h + self.pos_embedder(x.coords[:, 1:])
114
+ h = h.type(self.dtype)
115
+ cnt = 0
116
+ for block in self.blocks:
117
+ h = block(h)
118
+ assert torch.isfinite(h.feats).all(), "h contains NaN or Inf!"
119
+ cnt += 1
120
+ if cnt * 2 == self.num_blocks:
121
+ mid_h = h
122
+ if if_return_h:
123
+ return h, mid_h
124
+ else:
125
+ return h
Amodal3R/models/structured_latent_vae/decoder_gs.py CHANGED
@@ -1,3 +1,129 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:bef321e849ee8968a0c14816264c2914770b36ca2e006f6ffc3c33d68a1b3d93
3
- size 5542
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from ...modules import sparse as sp
6
+ from ...utils.random_utils import hammersley_sequence
7
+ from .base import SparseTransformerBase, SparseTransformerCrossBase
8
+ from ...representations import Gaussian
9
+
10
+
11
+ class SLatGaussianDecoder(SparseTransformerBase):
12
+ def __init__(
13
+ self,
14
+ resolution: int,
15
+ model_channels: int,
16
+ latent_channels: int,
17
+ num_blocks: int,
18
+ num_heads: Optional[int] = None,
19
+ num_head_channels: Optional[int] = 64,
20
+ mlp_ratio: float = 4,
21
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
22
+ window_size: int = 8,
23
+ pe_mode: Literal["ape", "rope"] = "ape",
24
+ use_fp16: bool = False,
25
+ use_checkpoint: bool = False,
26
+ qk_rms_norm: bool = False,
27
+ representation_config: dict = None,
28
+ ):
29
+ super().__init__(
30
+ in_channels=latent_channels,
31
+ model_channels=model_channels,
32
+ num_blocks=num_blocks,
33
+ num_heads=num_heads,
34
+ num_head_channels=num_head_channels,
35
+ mlp_ratio=mlp_ratio,
36
+ attn_mode=attn_mode,
37
+ window_size=window_size,
38
+ pe_mode=pe_mode,
39
+ use_fp16=use_fp16,
40
+ use_checkpoint=use_checkpoint,
41
+ qk_rms_norm=qk_rms_norm,
42
+ )
43
+ self.resolution = resolution
44
+ self.rep_config = representation_config
45
+ self._calc_layout()
46
+ self.out_layer = sp.SparseLinear(model_channels, self.out_channels)
47
+ self._build_perturbation()
48
+
49
+ self.initialize_weights()
50
+ if use_fp16:
51
+ self.convert_to_fp16()
52
+
53
+ def initialize_weights(self) -> None:
54
+ super().initialize_weights()
55
+ # Zero-out output layers:
56
+ nn.init.constant_(self.out_layer.weight, 0)
57
+ nn.init.constant_(self.out_layer.bias, 0)
58
+
59
+ def _build_perturbation(self) -> None:
60
+ perturbation = [hammersley_sequence(3, i, self.rep_config['num_gaussians']) for i in range(self.rep_config['num_gaussians'])]
61
+ perturbation = torch.tensor(perturbation).float() * 2 - 1
62
+ perturbation = perturbation / self.rep_config['voxel_size']
63
+ perturbation = torch.atanh(perturbation).to(self.device)
64
+ self.register_buffer('offset_perturbation', perturbation)
65
+
66
+ def _calc_layout(self) -> None:
67
+ self.layout = {
68
+ '_xyz' : {'shape': (self.rep_config['num_gaussians'], 3), 'size': self.rep_config['num_gaussians'] * 3},
69
+ '_features_dc' : {'shape': (self.rep_config['num_gaussians'], 1, 3), 'size': self.rep_config['num_gaussians'] * 3},
70
+ '_scaling' : {'shape': (self.rep_config['num_gaussians'], 3), 'size': self.rep_config['num_gaussians'] * 3},
71
+ '_rotation' : {'shape': (self.rep_config['num_gaussians'], 4), 'size': self.rep_config['num_gaussians'] * 4},
72
+ '_opacity' : {'shape': (self.rep_config['num_gaussians'], 1), 'size': self.rep_config['num_gaussians']},
73
+ }
74
+ start = 0
75
+ for k, v in self.layout.items():
76
+ v['range'] = (start, start + v['size'])
77
+ start += v['size']
78
+ self.out_channels = start
79
+
80
+ def to_representation(self, x: sp.SparseTensor) -> List[Gaussian]:
81
+ """
82
+ Convert a batch of network outputs to 3D representations.
83
+
84
+ Args:
85
+ x: The [N x * x C] sparse tensor output by the network.
86
+
87
+ Returns:
88
+ list of representations
89
+ """
90
+ ret = []
91
+ for i in range(x.shape[0]):
92
+ representation = Gaussian(
93
+ sh_degree=0,
94
+ aabb=[-0.5, -0.5, -0.5, 1.0, 1.0, 1.0],
95
+ mininum_kernel_size = self.rep_config['3d_filter_kernel_size'],
96
+ scaling_bias = self.rep_config['scaling_bias'],
97
+ opacity_bias = self.rep_config['opacity_bias'],
98
+ scaling_activation = self.rep_config['scaling_activation']
99
+ )
100
+ xyz = (x.coords[x.layout[i]][:, 1:].float() + 0.5) / self.resolution
101
+ for k, v in self.layout.items():
102
+ if k == '_xyz':
103
+ offset = x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape'])
104
+ offset = offset * self.rep_config['lr'][k]
105
+ if self.rep_config['perturb_offset']:
106
+ offset = offset + self.offset_perturbation
107
+ offset = torch.tanh(offset) / self.resolution * 0.5 * self.rep_config['voxel_size']
108
+ _xyz = xyz.unsqueeze(1) + offset
109
+ setattr(representation, k, _xyz.flatten(0, 1))
110
+ else:
111
+ feats = x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape']).flatten(0, 1)
112
+ feats = feats * self.rep_config['lr'][k]
113
+ setattr(representation, k, feats)
114
+ ret.append(representation)
115
+ return ret
116
+
117
+ def forward(self, x: sp.SparseTensor, if_return_h: bool = False) -> Union[List[Gaussian], Tuple[List[Gaussian], sp.SparseTensor]]:
118
+ output = super().forward(x, if_return_h)
119
+ if if_return_h:
120
+ h, mid_h = output
121
+ else:
122
+ h = output
123
+ h = h.type(x.dtype)
124
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
125
+ h = self.out_layer(h)
126
+ if if_return_h:
127
+ return self.to_representation(h), mid_h
128
+ else:
129
+ return self.to_representation(h)
Amodal3R/models/structured_latent_vae/decoder_mesh.py CHANGED
@@ -1,3 +1,167 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:07f5dcf213d8aded7ba1ad089ce70c45b0929fd1c9aef4e1cb4917b97834cc3d
3
- size 5665
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ...modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
7
+ from ...modules import sparse as sp
8
+ from .base import SparseTransformerBase
9
+ from ...representations import MeshExtractResult
10
+ from ...representations.mesh import SparseFeatures2Mesh
11
+
12
+
13
+ class SparseSubdivideBlock3d(nn.Module):
14
+ """
15
+ A 3D subdivide block that can subdivide the sparse tensor.
16
+
17
+ Args:
18
+ channels: channels in the inputs and outputs.
19
+ out_channels: if specified, the number of output channels.
20
+ num_groups: the number of groups for the group norm.
21
+ """
22
+ def __init__(
23
+ self,
24
+ channels: int,
25
+ resolution: int,
26
+ out_channels: Optional[int] = None,
27
+ num_groups: int = 32
28
+ ):
29
+ super().__init__()
30
+ self.channels = channels
31
+ self.resolution = resolution
32
+ self.out_resolution = resolution * 2
33
+ self.out_channels = out_channels or channels
34
+
35
+ self.act_layers = nn.Sequential(
36
+ sp.SparseGroupNorm32(num_groups, channels),
37
+ sp.SparseSiLU()
38
+ )
39
+
40
+ self.sub = sp.SparseSubdivide()
41
+
42
+ self.out_layers = nn.Sequential(
43
+ sp.SparseConv3d(channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}"),
44
+ sp.SparseGroupNorm32(num_groups, self.out_channels),
45
+ sp.SparseSiLU(),
46
+ zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}")),
47
+ )
48
+
49
+ if self.out_channels == channels:
50
+ self.skip_connection = nn.Identity()
51
+ else:
52
+ self.skip_connection = sp.SparseConv3d(channels, self.out_channels, 1, indice_key=f"res_{self.out_resolution}")
53
+
54
+ def forward(self, x: sp.SparseTensor) -> sp.SparseTensor:
55
+ """
56
+ Apply the block to a Tensor, conditioned on a timestep embedding.
57
+
58
+ Args:
59
+ x: an [N x C x ...] Tensor of features.
60
+ Returns:
61
+ an [N x C x ...] Tensor of outputs.
62
+ """
63
+ h = self.act_layers(x)
64
+ h = self.sub(h)
65
+ x = self.sub(x)
66
+ h = self.out_layers(h)
67
+ h = h + self.skip_connection(x)
68
+ return h
69
+
70
+
71
+ class SLatMeshDecoder(SparseTransformerBase):
72
+ def __init__(
73
+ self,
74
+ resolution: int,
75
+ model_channels: int,
76
+ latent_channels: int,
77
+ num_blocks: int,
78
+ num_heads: Optional[int] = None,
79
+ num_head_channels: Optional[int] = 64,
80
+ mlp_ratio: float = 4,
81
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
82
+ window_size: int = 8,
83
+ pe_mode: Literal["ape", "rope"] = "ape",
84
+ use_fp16: bool = False,
85
+ use_checkpoint: bool = False,
86
+ qk_rms_norm: bool = False,
87
+ representation_config: dict = None,
88
+ ):
89
+ super().__init__(
90
+ in_channels=latent_channels,
91
+ model_channels=model_channels,
92
+ num_blocks=num_blocks,
93
+ num_heads=num_heads,
94
+ num_head_channels=num_head_channels,
95
+ mlp_ratio=mlp_ratio,
96
+ attn_mode=attn_mode,
97
+ window_size=window_size,
98
+ pe_mode=pe_mode,
99
+ use_fp16=use_fp16,
100
+ use_checkpoint=use_checkpoint,
101
+ qk_rms_norm=qk_rms_norm,
102
+ )
103
+ self.resolution = resolution
104
+ self.rep_config = representation_config
105
+ self.mesh_extractor = SparseFeatures2Mesh(res=self.resolution*4, use_color=self.rep_config.get('use_color', False))
106
+ self.out_channels = self.mesh_extractor.feats_channels
107
+ self.upsample = nn.ModuleList([
108
+ SparseSubdivideBlock3d(
109
+ channels=model_channels,
110
+ resolution=resolution,
111
+ out_channels=model_channels // 4
112
+ ),
113
+ SparseSubdivideBlock3d(
114
+ channels=model_channels // 4,
115
+ resolution=resolution * 2,
116
+ out_channels=model_channels // 8
117
+ )
118
+ ])
119
+ self.out_layer = sp.SparseLinear(model_channels // 8, self.out_channels)
120
+
121
+ self.initialize_weights()
122
+ if use_fp16:
123
+ self.convert_to_fp16()
124
+
125
+ def initialize_weights(self) -> None:
126
+ super().initialize_weights()
127
+ # Zero-out output layers:
128
+ nn.init.constant_(self.out_layer.weight, 0)
129
+ nn.init.constant_(self.out_layer.bias, 0)
130
+
131
+ def convert_to_fp16(self) -> None:
132
+ """
133
+ Convert the torso of the model to float16.
134
+ """
135
+ super().convert_to_fp16()
136
+ self.upsample.apply(convert_module_to_f16)
137
+
138
+ def convert_to_fp32(self) -> None:
139
+ """
140
+ Convert the torso of the model to float32.
141
+ """
142
+ super().convert_to_fp32()
143
+ self.upsample.apply(convert_module_to_f32)
144
+
145
+ def to_representation(self, x: sp.SparseTensor) -> List[MeshExtractResult]:
146
+ """
147
+ Convert a batch of network outputs to 3D representations.
148
+
149
+ Args:
150
+ x: The [N x * x C] sparse tensor output by the network.
151
+
152
+ Returns:
153
+ list of representations
154
+ """
155
+ ret = []
156
+ for i in range(x.shape[0]):
157
+ mesh = self.mesh_extractor(x[i], training=self.training)
158
+ ret.append(mesh)
159
+ return ret
160
+
161
+ def forward(self, x: sp.SparseTensor) -> List[MeshExtractResult]:
162
+ h = super().forward(x)
163
+ for block in self.upsample:
164
+ h = block(h)
165
+ h = h.type(x.dtype)
166
+ h = self.out_layer(h)
167
+ return self.to_representation(h)
Amodal3R/models/structured_latent_vae/encoder.py CHANGED
@@ -1,3 +1,76 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f95ee4c9d643bfdfb5f9b169978634fff962a0cfcd556cfe8c0b0931322b414
3
- size 2623
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from ...modules import sparse as sp
6
+ from .base import SparseTransformerBase
7
+
8
+
9
+ class SLatEncoder(SparseTransformerBase):
10
+ def __init__(
11
+ self,
12
+ resolution: int,
13
+ in_channels: int,
14
+ model_channels: int,
15
+ latent_channels: int,
16
+ num_blocks: int,
17
+ num_heads: Optional[int] = None,
18
+ num_head_channels: Optional[int] = 64,
19
+ mlp_ratio: float = 4,
20
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
21
+ window_size: int = 8,
22
+ pe_mode: Literal["ape", "rope"] = "ape",
23
+ use_fp16: bool = False,
24
+ use_checkpoint: bool = False,
25
+ qk_rms_norm: bool = False,
26
+ ):
27
+ super().__init__(
28
+ in_channels=in_channels,
29
+ model_channels=model_channels,
30
+ num_blocks=num_blocks,
31
+ num_heads=num_heads,
32
+ num_head_channels=num_head_channels,
33
+ mlp_ratio=mlp_ratio,
34
+ attn_mode=attn_mode,
35
+ window_size=window_size,
36
+ pe_mode=pe_mode,
37
+ use_fp16=use_fp16,
38
+ use_checkpoint=use_checkpoint,
39
+ qk_rms_norm=qk_rms_norm,
40
+ )
41
+ self.resolution = resolution
42
+ self.out_layer = sp.SparseLinear(model_channels, 2 * latent_channels)
43
+
44
+ self.initialize_weights()
45
+ if use_fp16:
46
+ self.convert_to_fp16()
47
+
48
+ def initialize_weights(self) -> None:
49
+ super().initialize_weights()
50
+ # Zero-out output layers:
51
+ nn.init.constant_(self.out_layer.weight, 0)
52
+ nn.init.constant_(self.out_layer.bias, 0)
53
+
54
+ def forward(self, x: sp.SparseTensor, sample_posterior=True, return_raw=False):
55
+ assert torch.isfinite(x.feats).all(), "Input feats contains NaN or Inf!"
56
+ h = super().forward(x)
57
+ h = h.type(x.dtype)
58
+ assert torch.isfinite(h.feats).all(), "Step1 contains NaN or Inf!"
59
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
60
+ assert torch.isfinite(h.feats).all(), "Step2 contains NaN or Inf!"
61
+ h = self.out_layer(h)
62
+ assert torch.isfinite(h.feats).all(), "Step3 contains NaN or Inf!"
63
+
64
+ # Sample from the posterior distribution
65
+ mean, logvar = h.feats.chunk(2, dim=-1)
66
+ if sample_posterior:
67
+ std = torch.exp(0.5 * logvar)
68
+ z = mean + std * torch.randn_like(std)
69
+ else:
70
+ z = mean
71
+ z = h.replace(z)
72
+
73
+ if return_raw:
74
+ return z, mean, logvar
75
+ else:
76
+ return z
Amodal3R/modules/attention/__init__.py CHANGED
@@ -1,3 +1,36 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:163edbd14e9b602922dc83f0e3e1d1a217296ea94e7f9f5ba04a9af3fe96d760
3
- size 758
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ BACKEND = 'flash_attn'
4
+ DEBUG = False
5
+
6
+ def __from_env():
7
+ import os
8
+
9
+ global BACKEND
10
+ global DEBUG
11
+
12
+ env_attn_backend = os.environ.get('ATTN_BACKEND')
13
+ env_sttn_debug = os.environ.get('ATTN_DEBUG')
14
+
15
+ if env_attn_backend is not None and env_attn_backend in ['xformers', 'flash_attn', 'sdpa', 'naive']:
16
+ BACKEND = env_attn_backend
17
+ if env_sttn_debug is not None:
18
+ DEBUG = env_sttn_debug == '1'
19
+
20
+ print(f"[ATTENTION] Using backend: {BACKEND}")
21
+
22
+
23
+ __from_env()
24
+
25
+
26
+ def set_backend(backend: Literal['xformers', 'flash_attn']):
27
+ global BACKEND
28
+ BACKEND = backend
29
+
30
+ def set_debug(debug: bool):
31
+ global DEBUG
32
+ DEBUG = debug
33
+
34
+
35
+ from .full_attn import *
36
+ from .modules import *
Amodal3R/modules/attention/full_attn.py CHANGED
@@ -1,3 +1,140 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:18f6fe3168efff666ad7ef5321ff493280eafc95d69bc24a47938b9febbb8551
3
- size 4932
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import math
4
+ from . import DEBUG, BACKEND
5
+
6
+ if BACKEND == 'xformers':
7
+ import xformers.ops as xops
8
+ elif BACKEND == 'flash_attn':
9
+ import flash_attn
10
+ elif BACKEND == 'sdpa':
11
+ from torch.nn.functional import scaled_dot_product_attention as sdpa
12
+ elif BACKEND == 'naive':
13
+ pass
14
+ else:
15
+ raise ValueError(f"Unknown attention backend: {BACKEND}")
16
+
17
+
18
+ __all__ = [
19
+ 'scaled_dot_product_attention',
20
+ ]
21
+
22
+
23
+ def _naive_sdpa(q, k, v):
24
+ """
25
+ Naive implementation of scaled dot product attention.
26
+ """
27
+ q = q.permute(0, 2, 1, 3) # [N, H, L, C]
28
+ k = k.permute(0, 2, 1, 3) # [N, H, L, C]
29
+ v = v.permute(0, 2, 1, 3) # [N, H, L, C]
30
+ scale_factor = 1 / math.sqrt(q.size(-1))
31
+ attn_weight = q @ k.transpose(-2, -1) * scale_factor
32
+ attn_weight = torch.softmax(attn_weight, dim=-1)
33
+ out = attn_weight @ v
34
+ out = out.permute(0, 2, 1, 3) # [N, L, H, C]
35
+ return out
36
+
37
+
38
+ @overload
39
+ def scaled_dot_product_attention(qkv: torch.Tensor) -> torch.Tensor:
40
+ """
41
+ Apply scaled dot product attention.
42
+
43
+ Args:
44
+ qkv (torch.Tensor): A [N, L, 3, H, C] tensor containing Qs, Ks, and Vs.
45
+ """
46
+ ...
47
+
48
+ @overload
49
+ def scaled_dot_product_attention(q: torch.Tensor, kv: torch.Tensor) -> torch.Tensor:
50
+ """
51
+ Apply scaled dot product attention.
52
+
53
+ Args:
54
+ q (torch.Tensor): A [N, L, H, C] tensor containing Qs.
55
+ kv (torch.Tensor): A [N, L, 2, H, C] tensor containing Ks and Vs.
56
+ """
57
+ ...
58
+
59
+ @overload
60
+ def scaled_dot_product_attention(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
61
+ """
62
+ Apply scaled dot product attention.
63
+
64
+ Args:
65
+ q (torch.Tensor): A [N, L, H, Ci] tensor containing Qs.
66
+ k (torch.Tensor): A [N, L, H, Ci] tensor containing Ks.
67
+ v (torch.Tensor): A [N, L, H, Co] tensor containing Vs.
68
+
69
+ Note:
70
+ k and v are assumed to have the same coordinate map.
71
+ """
72
+ ...
73
+
74
+ def scaled_dot_product_attention(*args, **kwargs):
75
+ arg_names_dict = {
76
+ 1: ['qkv'],
77
+ 2: ['q', 'kv'],
78
+ 3: ['q', 'k', 'v']
79
+ }
80
+ num_all_args = len(args) + len(kwargs)
81
+ assert num_all_args in arg_names_dict, f"Invalid number of arguments, got {num_all_args}, expected 1, 2, or 3"
82
+ for key in arg_names_dict[num_all_args][len(args):]:
83
+ assert key in kwargs, f"Missing argument {key}"
84
+
85
+ if num_all_args == 1:
86
+ qkv = args[0] if len(args) > 0 else kwargs['qkv']
87
+ assert len(qkv.shape) == 5 and qkv.shape[2] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, L, 3, H, C]"
88
+ device = qkv.device
89
+
90
+ elif num_all_args == 2:
91
+ q = args[0] if len(args) > 0 else kwargs['q']
92
+ kv = args[1] if len(args) > 1 else kwargs['kv']
93
+ assert q.shape[0] == kv.shape[0], f"Batch size mismatch, got {q.shape[0]} and {kv.shape[0]}"
94
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, C]"
95
+ assert len(kv.shape) == 5, f"Invalid shape for kv, got {kv.shape}, expected [N, L, 2, H, C]"
96
+ device = q.device
97
+
98
+ elif num_all_args == 3:
99
+ q = args[0] if len(args) > 0 else kwargs['q']
100
+ k = args[1] if len(args) > 1 else kwargs['k']
101
+ v = args[2] if len(args) > 2 else kwargs['v']
102
+ assert q.shape[0] == k.shape[0] == v.shape[0], f"Batch size mismatch, got {q.shape[0]}, {k.shape[0]}, and {v.shape[0]}"
103
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, Ci]"
104
+ assert len(k.shape) == 4, f"Invalid shape for k, got {k.shape}, expected [N, L, H, Ci]"
105
+ assert len(v.shape) == 4, f"Invalid shape for v, got {v.shape}, expected [N, L, H, Co]"
106
+ device = q.device
107
+
108
+ if BACKEND == 'xformers':
109
+ if num_all_args == 1:
110
+ q, k, v = qkv.unbind(dim=2)
111
+ elif num_all_args == 2:
112
+ k, v = kv.unbind(dim=2)
113
+ out = xops.memory_efficient_attention(q, k, v)
114
+ elif BACKEND == 'flash_attn':
115
+ if num_all_args == 1:
116
+ out = flash_attn.flash_attn_qkvpacked_func(qkv)
117
+ elif num_all_args == 2:
118
+ out = flash_attn.flash_attn_kvpacked_func(q, kv)
119
+ elif num_all_args == 3:
120
+ out = flash_attn.flash_attn_func(q, k, v)
121
+ elif BACKEND == 'sdpa':
122
+ if num_all_args == 1:
123
+ q, k, v = qkv.unbind(dim=2)
124
+ elif num_all_args == 2:
125
+ k, v = kv.unbind(dim=2)
126
+ q = q.permute(0, 2, 1, 3) # [N, H, L, C]
127
+ k = k.permute(0, 2, 1, 3) # [N, H, L, C]
128
+ v = v.permute(0, 2, 1, 3) # [N, H, L, C]
129
+ out = sdpa(q, k, v) # [N, H, L, C]
130
+ out = out.permute(0, 2, 1, 3) # [N, L, H, C]
131
+ elif BACKEND == 'naive':
132
+ if num_all_args == 1:
133
+ q, k, v = qkv.unbind(dim=2)
134
+ elif num_all_args == 2:
135
+ k, v = kv.unbind(dim=2)
136
+ out = _naive_sdpa(q, k, v)
137
+ else:
138
+ raise ValueError(f"Unknown attention module: {BACKEND}")
139
+
140
+ return out
Amodal3R/modules/attention/modules.py CHANGED
@@ -1,3 +1,265 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:71298bae8cac47f56545a22f21e4c48ccb9f687a6339add24c9303a718bad139
3
- size 11311
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from .full_attn import scaled_dot_product_attention
6
+ import math
7
+
8
+
9
+ class MultiHeadRMSNorm(nn.Module):
10
+ def __init__(self, dim: int, heads: int):
11
+ super().__init__()
12
+ self.scale = dim ** 0.5
13
+ self.gamma = nn.Parameter(torch.ones(heads, dim))
14
+
15
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
16
+ return (F.normalize(x.float(), dim = -1) * self.gamma * self.scale).to(x.dtype)
17
+
18
+
19
+ class RotaryPositionEmbedder(nn.Module):
20
+ def __init__(self, hidden_size: int, in_channels: int = 3):
21
+ super().__init__()
22
+ assert hidden_size % 2 == 0, "Hidden size must be divisible by 2"
23
+ self.hidden_size = hidden_size
24
+ self.in_channels = in_channels
25
+ self.freq_dim = hidden_size // in_channels // 2
26
+ self.freqs = torch.arange(self.freq_dim, dtype=torch.float32) / self.freq_dim
27
+ self.freqs = 1.0 / (10000 ** self.freqs)
28
+
29
+ def _get_phases(self, indices: torch.Tensor) -> torch.Tensor:
30
+ self.freqs = self.freqs.to(indices.device)
31
+ phases = torch.outer(indices, self.freqs)
32
+ phases = torch.polar(torch.ones_like(phases), phases)
33
+ return phases
34
+
35
+ def _rotary_embedding(self, x: torch.Tensor, phases: torch.Tensor) -> torch.Tensor:
36
+ x_complex = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2))
37
+ x_rotated = x_complex * phases
38
+ x_embed = torch.view_as_real(x_rotated).reshape(*x_rotated.shape[:-1], -1).to(x.dtype)
39
+ return x_embed
40
+
41
+ def forward(self, q: torch.Tensor, k: torch.Tensor, indices: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
42
+ """
43
+ Args:
44
+ q (sp.SparseTensor): [..., N, D] tensor of queries
45
+ k (sp.SparseTensor): [..., N, D] tensor of keys
46
+ indices (torch.Tensor): [..., N, C] tensor of spatial positions
47
+ """
48
+ if indices is None:
49
+ indices = torch.arange(q.shape[-2], device=q.device)
50
+ if len(q.shape) > 2:
51
+ indices = indices.unsqueeze(0).expand(q.shape[:-2] + (-1,))
52
+
53
+ phases = self._get_phases(indices.reshape(-1)).reshape(*indices.shape[:-1], -1)
54
+ if phases.shape[1] < self.hidden_size // 2:
55
+ phases = torch.cat([phases, torch.polar(
56
+ torch.ones(*phases.shape[:-1], self.hidden_size // 2 - phases.shape[1], device=phases.device),
57
+ torch.zeros(*phases.shape[:-1], self.hidden_size // 2 - phases.shape[1], device=phases.device)
58
+ )], dim=-1)
59
+ q_embed = self._rotary_embedding(q, phases)
60
+ k_embed = self._rotary_embedding(k, phases)
61
+ return q_embed, k_embed
62
+
63
+
64
+ class MultiHeadAttention(nn.Module):
65
+ def __init__(
66
+ self,
67
+ channels: int,
68
+ num_heads: int,
69
+ ctx_channels: Optional[int]=None,
70
+ type: Literal["self", "cross"] = "self",
71
+ attn_mode: Literal["full", "windowed"] = "full",
72
+ window_size: Optional[int] = None,
73
+ shift_window: Optional[Tuple[int, int, int]] = None,
74
+ qkv_bias: bool = True,
75
+ use_rope: bool = False,
76
+ qk_rms_norm: bool = False,
77
+ ):
78
+ super().__init__()
79
+ assert channels % num_heads == 0
80
+ assert type in ["self", "cross"], f"Invalid attention type: {type}"
81
+ assert attn_mode in ["full", "windowed"], f"Invalid attention mode: {attn_mode}"
82
+ assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention"
83
+
84
+ if attn_mode == "windowed":
85
+ raise NotImplementedError("Windowed attention is not yet implemented")
86
+
87
+ self.channels = channels
88
+ self.head_dim = channels // num_heads
89
+ self.ctx_channels = ctx_channels if ctx_channels is not None else channels
90
+ self.num_heads = num_heads
91
+ self._type = type
92
+ self.attn_mode = attn_mode
93
+ self.window_size = window_size
94
+ self.shift_window = shift_window
95
+ self.use_rope = use_rope
96
+ self.qk_rms_norm = qk_rms_norm
97
+
98
+ if self._type == "self":
99
+ self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias)
100
+ else:
101
+ self.to_q = nn.Linear(channels, channels, bias=qkv_bias)
102
+ self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias)
103
+
104
+ if self.qk_rms_norm:
105
+ self.q_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads)
106
+ self.k_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads)
107
+
108
+ self.to_out = nn.Linear(channels, channels)
109
+
110
+ if use_rope:
111
+ self.rope = RotaryPositionEmbedder(channels)
112
+
113
+ def forward(self, x: torch.Tensor, context: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None) -> torch.Tensor:
114
+ B, L, C = x.shape
115
+ if self._type == "self":
116
+ qkv = self.to_qkv(x)
117
+ qkv = qkv.reshape(B, L, 3, self.num_heads, -1)
118
+ if self.use_rope:
119
+ q, k, v = qkv.unbind(dim=2)
120
+ q, k = self.rope(q, k, indices)
121
+ qkv = torch.stack([q, k, v], dim=2)
122
+ if self.attn_mode == "full":
123
+ if self.qk_rms_norm:
124
+ q, k, v = qkv.unbind(dim=2)
125
+ q = self.q_rms_norm(q)
126
+ k = self.k_rms_norm(k)
127
+ h = scaled_dot_product_attention(q, k, v)
128
+ else:
129
+ h = scaled_dot_product_attention(qkv)
130
+ elif self.attn_mode == "windowed":
131
+ raise NotImplementedError("Windowed attention is not yet implemented")
132
+ else:
133
+ Lkv = context.shape[1]
134
+ q = self.to_q(x)
135
+ kv = self.to_kv(context)
136
+ q = q.reshape(B, L, self.num_heads, -1)
137
+ kv = kv.reshape(B, Lkv, 2, self.num_heads, -1)
138
+ if self.qk_rms_norm:
139
+ q = self.q_rms_norm(q)
140
+ k, v = kv.unbind(dim=2)
141
+ k = self.k_rms_norm(k)
142
+ h = scaled_dot_product_attention(q, k, v)
143
+ else:
144
+ h = scaled_dot_product_attention(q, kv)
145
+ h = h.reshape(B, L, -1)
146
+ h = self.to_out(h)
147
+ return h
148
+
149
+
150
+ class MultiHeadAttentionWeighted(nn.Module):
151
+ def __init__(
152
+ self,
153
+ channels: int,
154
+ num_heads: int,
155
+ ctx_channels: Optional[int]=None,
156
+ type: Literal["self", "cross"] = "self",
157
+ attn_mode: Literal["full", "windowed"] = "full",
158
+ window_size: Optional[int] = None,
159
+ shift_window: Optional[Tuple[int, int, int]] = None,
160
+ qkv_bias: bool = True,
161
+ use_rope: bool = False,
162
+ qk_rms_norm: bool = False,
163
+ ):
164
+ super().__init__()
165
+ assert channels % num_heads == 0
166
+ assert type in ["self", "cross"], f"Invalid attention type: {type}"
167
+ assert attn_mode in ["full", "windowed"], f"Invalid attention mode: {attn_mode}"
168
+ assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention"
169
+
170
+ if attn_mode == "windowed":
171
+ raise NotImplementedError("Windowed attention is not yet implemented")
172
+
173
+ self.channels = channels
174
+ self.head_dim = channels // num_heads
175
+ self.ctx_channels = ctx_channels if ctx_channels is not None else channels
176
+ self.num_heads = num_heads
177
+ self._type = type
178
+ self.attn_mode = attn_mode
179
+ self.window_size = window_size
180
+ self.shift_window = shift_window
181
+ self.use_rope = use_rope
182
+ self.qk_rms_norm = qk_rms_norm
183
+
184
+ if self._type == "self":
185
+ self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias)
186
+ else:
187
+ self.to_q = nn.Linear(channels, channels, bias=qkv_bias)
188
+ self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias)
189
+
190
+ if self.qk_rms_norm:
191
+ self.q_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads)
192
+ self.k_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads)
193
+
194
+ self.to_out = nn.Linear(channels, channels)
195
+
196
+ if use_rope:
197
+ self.rope = RotaryPositionEmbedder(channels)
198
+
199
+ def forward(self, x: torch.Tensor, context: Optional[torch.Tensor] = None, mask_weight: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None) -> torch.Tensor:
200
+ B, L, C = x.shape
201
+ if self._type == "self":
202
+ qkv = self.to_qkv(x)
203
+ qkv = qkv.reshape(B, L, 3, self.num_heads, -1)
204
+ if self.use_rope:
205
+ q, k, v = qkv.unbind(dim=2)
206
+ q, k = self.rope(q, k, indices)
207
+ qkv = torch.stack([q, k, v], dim=2)
208
+ if self.attn_mode == "full":
209
+ if self.qk_rms_norm:
210
+ q, k, v = qkv.unbind(dim=2)
211
+ q = self.q_rms_norm(q)
212
+ k = self.k_rms_norm(k)
213
+ h = scaled_dot_product_attention(q, k, v)
214
+ else:
215
+ h = scaled_dot_product_attention(qkv)
216
+ elif self.attn_mode == "windowed":
217
+ raise NotImplementedError("Windowed attention is not yet implemented")
218
+ else:
219
+ Lkv = context.shape[1]
220
+ q = self.to_q(x)
221
+ kv = self.to_kv(context)
222
+ q = q.reshape(B, L, self.num_heads, -1)
223
+ kv = kv.reshape(B, Lkv, 2, self.num_heads, -1)
224
+ if self.qk_rms_norm:
225
+ if mask_weight is not None:
226
+ q = self.q_rms_norm(q)
227
+ k, v = kv.unbind(dim=2)
228
+ k = self.k_rms_norm(k)
229
+ h = self.weighted_scaled_dot_product_attention(q, k, v, mask_weight)
230
+ else:
231
+ q = self.q_rms_norm(q)
232
+ k, v = kv.unbind(dim=2)
233
+ k = self.k_rms_norm(k)
234
+ h = scaled_dot_product_attention(q, k, v)
235
+ else:
236
+ if mask_weight is not None:
237
+ k, v = kv.unbind(dim=2)
238
+ h = self.weighted_scaled_dot_product_attention(q, k, v, mask_weight)
239
+ else:
240
+ h = scaled_dot_product_attention(q, kv)
241
+ h = h.reshape(B, L, -1)
242
+ h = self.to_out(h)
243
+ return h
244
+
245
+ def weighted_scaled_dot_product_attention(self, q, k, v, mask_weight, eps=1e-6):
246
+ # q: (B, L_q, num_heads, head_dim)
247
+ # k: (B, L_k, num_heads, head_dim)
248
+ # v: (B, L_k, num_heads, head_dim)
249
+ # mask_weight: (B, L_k, 1)
250
+ assert mask_weight.shape[1] == k.shape[1] - 5
251
+ d = k.size(-1)
252
+ q = q.permute(0, 2, 1, 3)
253
+ k = k.permute(0, 2, 1, 3)
254
+ v = v.permute(0, 2, 1, 3)
255
+ attn_logits = q @ k.transpose(-2, -1) / math.sqrt(d) # (B, num_heads, L_q, L_k)
256
+
257
+ B, L_k, _ = mask_weight.shape
258
+ cls_weight = torch.ones(B, 5, device=mask_weight.device, dtype=mask_weight.dtype)
259
+ mask_weight = torch.cat([cls_weight, mask_weight.squeeze(2)], dim=1) # (B, L_k + 5)
260
+ mask_bias = torch.log(mask_weight.unsqueeze(1).unsqueeze(1) + eps)
261
+ attn_logits = attn_logits + mask_bias
262
+
263
+ attn = torch.softmax(attn_logits, dim=-1)
264
+ output = attn @ v
265
+ return output.permute(0, 2, 1, 3) # (B, L_q, num_heads, head_dim)
Amodal3R/modules/norm.py CHANGED
@@ -1,3 +1,25 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:dffc3a789ddcab606186326ce581345bfe848912f3e590753b967f9451e5a9d7
3
- size 720
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+
5
+ class LayerNorm32(nn.LayerNorm):
6
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
7
+ return super().forward(x.float()).type(x.dtype)
8
+
9
+
10
+ class GroupNorm32(nn.GroupNorm):
11
+ """
12
+ A GroupNorm layer that converts to float32 before the forward pass.
13
+ """
14
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
15
+ return super().forward(x.float()).type(x.dtype)
16
+
17
+
18
+ class ChannelLayerNorm32(LayerNorm32):
19
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
20
+ DIM = x.dim()
21
+ x = x.permute(0, *range(2, DIM), 1).contiguous()
22
+ x = super().forward(x)
23
+ x = x.permute(0, DIM-1, *range(1, DIM-1)).contiguous()
24
+ return x
25
+
Amodal3R/modules/sparse/__init__.py CHANGED
@@ -1,3 +1,102 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f01025f71781973767aa7c8c55ec61f7c44a045b1a61592e143e5666190dbdf1
3
- size 2907
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ BACKEND = 'spconv'
4
+ DEBUG = False
5
+ ATTN = 'flash_attn'
6
+
7
+ def __from_env():
8
+ import os
9
+
10
+ global BACKEND
11
+ global DEBUG
12
+ global ATTN
13
+
14
+ env_sparse_backend = os.environ.get('SPARSE_BACKEND')
15
+ env_sparse_debug = os.environ.get('SPARSE_DEBUG')
16
+ env_sparse_attn = os.environ.get('SPARSE_ATTN_BACKEND')
17
+ if env_sparse_attn is None:
18
+ env_sparse_attn = os.environ.get('ATTN_BACKEND')
19
+
20
+ if env_sparse_backend is not None and env_sparse_backend in ['spconv', 'torchsparse']:
21
+ BACKEND = env_sparse_backend
22
+ if env_sparse_debug is not None:
23
+ DEBUG = env_sparse_debug == '1'
24
+ if env_sparse_attn is not None and env_sparse_attn in ['xformers', 'flash_attn']:
25
+ ATTN = env_sparse_attn
26
+
27
+ print(f"[SPARSE] Backend: {BACKEND}, Attention: {ATTN}")
28
+
29
+
30
+ __from_env()
31
+
32
+
33
+ def set_backend(backend: Literal['spconv', 'torchsparse']):
34
+ global BACKEND
35
+ BACKEND = backend
36
+
37
+ def set_debug(debug: bool):
38
+ global DEBUG
39
+ DEBUG = debug
40
+
41
+ def set_attn(attn: Literal['xformers', 'flash_attn']):
42
+ global ATTN
43
+ ATTN = attn
44
+
45
+
46
+ import importlib
47
+
48
+ __attributes = {
49
+ 'SparseTensor': 'basic',
50
+ 'sparse_batch_broadcast': 'basic',
51
+ 'sparse_batch_op': 'basic',
52
+ 'sparse_cat': 'basic',
53
+ 'sparse_unbind': 'basic',
54
+ 'SparseGroupNorm': 'norm',
55
+ 'SparseLayerNorm': 'norm',
56
+ 'SparseGroupNorm32': 'norm',
57
+ 'SparseLayerNorm32': 'norm',
58
+ 'SparseReLU': 'nonlinearity',
59
+ 'SparseSiLU': 'nonlinearity',
60
+ 'SparseGELU': 'nonlinearity',
61
+ 'SparseActivation': 'nonlinearity',
62
+ 'SparseLinear': 'linear',
63
+ 'sparse_scaled_dot_product_attention': 'attention',
64
+ 'SerializeMode': 'attention',
65
+ 'sparse_serialized_scaled_dot_product_self_attention': 'attention',
66
+ 'sparse_windowed_scaled_dot_product_self_attention': 'attention',
67
+ 'SparseMultiHeadAttention': 'attention',
68
+ 'SparseConv3d': 'conv',
69
+ 'SparseInverseConv3d': 'conv',
70
+ 'SparseDownsample': 'spatial',
71
+ 'SparseUpsample': 'spatial',
72
+ 'SparseSubdivide' : 'spatial'
73
+ }
74
+
75
+ __submodules = ['transformer']
76
+
77
+ __all__ = list(__attributes.keys()) + __submodules
78
+
79
+ def __getattr__(name):
80
+ if name not in globals():
81
+ if name in __attributes:
82
+ module_name = __attributes[name]
83
+ module = importlib.import_module(f".{module_name}", __name__)
84
+ globals()[name] = getattr(module, name)
85
+ elif name in __submodules:
86
+ module = importlib.import_module(f".{name}", __name__)
87
+ globals()[name] = module
88
+ else:
89
+ raise AttributeError(f"module {__name__} has no attribute {name}")
90
+ return globals()[name]
91
+
92
+
93
+ # For Pylance
94
+ if __name__ == '__main__':
95
+ from .basic import *
96
+ from .norm import *
97
+ from .nonlinearity import *
98
+ from .linear import *
99
+ from .attention import *
100
+ from .conv import *
101
+ from .spatial import *
102
+ import transformer
Amodal3R/modules/sparse/attention/__init__.py CHANGED
@@ -1,3 +1,4 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:78a2d717c82a6b63f39e82272e94c309befc355e4fbf38dbcfc044b7578e46f4
3
- size 108
 
 
1
+ from .full_attn import *
2
+ from .serialized_attn import *
3
+ from .windowed_attn import *
4
+ from .modules import *
Amodal3R/modules/sparse/attention/full_attn.py CHANGED
@@ -1,3 +1,215 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfed8f6d72223a44433f9a38f3a0eb89d114ac77f710306b3e6ec1acfc12b432
3
- size 9206
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ from .. import SparseTensor
4
+ from .. import DEBUG, ATTN
5
+
6
+ if ATTN == 'xformers':
7
+ import xformers.ops as xops
8
+ elif ATTN == 'flash_attn':
9
+ import flash_attn
10
+ else:
11
+ raise ValueError(f"Unknown attention module: {ATTN}")
12
+
13
+
14
+ __all__ = [
15
+ 'sparse_scaled_dot_product_attention',
16
+ ]
17
+
18
+
19
+ @overload
20
+ def sparse_scaled_dot_product_attention(qkv: SparseTensor) -> SparseTensor:
21
+ """
22
+ Apply scaled dot product attention to a sparse tensor.
23
+
24
+ Args:
25
+ qkv (SparseTensor): A [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
26
+ """
27
+ ...
28
+
29
+ @overload
30
+ def sparse_scaled_dot_product_attention(q: SparseTensor, kv: Union[SparseTensor, torch.Tensor]) -> SparseTensor:
31
+ """
32
+ Apply scaled dot product attention to a sparse tensor.
33
+
34
+ Args:
35
+ q (SparseTensor): A [N, *, H, C] sparse tensor containing Qs.
36
+ kv (SparseTensor or torch.Tensor): A [N, *, 2, H, C] sparse tensor or a [N, L, 2, H, C] dense tensor containing Ks and Vs.
37
+ """
38
+ ...
39
+
40
+ @overload
41
+ def sparse_scaled_dot_product_attention(q: torch.Tensor, kv: SparseTensor) -> torch.Tensor:
42
+ """
43
+ Apply scaled dot product attention to a sparse tensor.
44
+
45
+ Args:
46
+ q (SparseTensor): A [N, L, H, C] dense tensor containing Qs.
47
+ kv (SparseTensor or torch.Tensor): A [N, *, 2, H, C] sparse tensor containing Ks and Vs.
48
+ """
49
+ ...
50
+
51
+ @overload
52
+ def sparse_scaled_dot_product_attention(q: SparseTensor, k: SparseTensor, v: SparseTensor) -> SparseTensor:
53
+ """
54
+ Apply scaled dot product attention to a sparse tensor.
55
+
56
+ Args:
57
+ q (SparseTensor): A [N, *, H, Ci] sparse tensor containing Qs.
58
+ k (SparseTensor): A [N, *, H, Ci] sparse tensor containing Ks.
59
+ v (SparseTensor): A [N, *, H, Co] sparse tensor containing Vs.
60
+
61
+ Note:
62
+ k and v are assumed to have the same coordinate map.
63
+ """
64
+ ...
65
+
66
+ @overload
67
+ def sparse_scaled_dot_product_attention(q: SparseTensor, k: torch.Tensor, v: torch.Tensor) -> SparseTensor:
68
+ """
69
+ Apply scaled dot product attention to a sparse tensor.
70
+
71
+ Args:
72
+ q (SparseTensor): A [N, *, H, Ci] sparse tensor containing Qs.
73
+ k (torch.Tensor): A [N, L, H, Ci] dense tensor containing Ks.
74
+ v (torch.Tensor): A [N, L, H, Co] dense tensor containing Vs.
75
+ """
76
+ ...
77
+
78
+ @overload
79
+ def sparse_scaled_dot_product_attention(q: torch.Tensor, k: SparseTensor, v: SparseTensor) -> torch.Tensor:
80
+ """
81
+ Apply scaled dot product attention to a sparse tensor.
82
+
83
+ Args:
84
+ q (torch.Tensor): A [N, L, H, Ci] dense tensor containing Qs.
85
+ k (SparseTensor): A [N, *, H, Ci] sparse tensor containing Ks.
86
+ v (SparseTensor): A [N, *, H, Co] sparse tensor containing Vs.
87
+ """
88
+ ...
89
+
90
+ def sparse_scaled_dot_product_attention(*args, **kwargs):
91
+ arg_names_dict = {
92
+ 1: ['qkv'],
93
+ 2: ['q', 'kv'],
94
+ 3: ['q', 'k', 'v']
95
+ }
96
+ num_all_args = len(args) + len(kwargs)
97
+ assert num_all_args in arg_names_dict, f"Invalid number of arguments, got {num_all_args}, expected 1, 2, or 3"
98
+ for key in arg_names_dict[num_all_args][len(args):]:
99
+ assert key in kwargs, f"Missing argument {key}"
100
+
101
+ if num_all_args == 1:
102
+ qkv = args[0] if len(args) > 0 else kwargs['qkv']
103
+ assert isinstance(qkv, SparseTensor), f"qkv must be a SparseTensor, got {type(qkv)}"
104
+ assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
105
+ device = qkv.device
106
+
107
+ s = qkv
108
+ q_seqlen = [qkv.layout[i].stop - qkv.layout[i].start for i in range(qkv.shape[0])]
109
+ kv_seqlen = q_seqlen
110
+ qkv = qkv.feats # [T, 3, H, C]
111
+
112
+ elif num_all_args == 2:
113
+ q = args[0] if len(args) > 0 else kwargs['q']
114
+ kv = args[1] if len(args) > 1 else kwargs['kv']
115
+ assert isinstance(q, SparseTensor) and isinstance(kv, (SparseTensor, torch.Tensor)) or \
116
+ isinstance(q, torch.Tensor) and isinstance(kv, SparseTensor), \
117
+ f"Invalid types, got {type(q)} and {type(kv)}"
118
+ assert q.shape[0] == kv.shape[0], f"Batch size mismatch, got {q.shape[0]} and {kv.shape[0]}"
119
+ device = q.device
120
+
121
+ if isinstance(q, SparseTensor):
122
+ assert len(q.shape) == 3, f"Invalid shape for q, got {q.shape}, expected [N, *, H, C]"
123
+ s = q
124
+ q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])]
125
+ q = q.feats # [T_Q, H, C]
126
+ else:
127
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, C]"
128
+ s = None
129
+ N, L, H, C = q.shape
130
+ q_seqlen = [L] * N
131
+ q = q.reshape(N * L, H, C) # [T_Q, H, C]
132
+
133
+ if isinstance(kv, SparseTensor):
134
+ assert len(kv.shape) == 4 and kv.shape[1] == 2, f"Invalid shape for kv, got {kv.shape}, expected [N, *, 2, H, C]"
135
+ kv_seqlen = [kv.layout[i].stop - kv.layout[i].start for i in range(kv.shape[0])]
136
+ kv = kv.feats # [T_KV, 2, H, C]
137
+ else:
138
+ assert len(kv.shape) == 5, f"Invalid shape for kv, got {kv.shape}, expected [N, L, 2, H, C]"
139
+ N, L, _, H, C = kv.shape
140
+ kv_seqlen = [L] * N
141
+ kv = kv.reshape(N * L, 2, H, C) # [T_KV, 2, H, C]
142
+
143
+ elif num_all_args == 3:
144
+ q = args[0] if len(args) > 0 else kwargs['q']
145
+ k = args[1] if len(args) > 1 else kwargs['k']
146
+ v = args[2] if len(args) > 2 else kwargs['v']
147
+ assert isinstance(q, SparseTensor) and isinstance(k, (SparseTensor, torch.Tensor)) and type(k) == type(v) or \
148
+ isinstance(q, torch.Tensor) and isinstance(k, SparseTensor) and isinstance(v, SparseTensor), \
149
+ f"Invalid types, got {type(q)}, {type(k)}, and {type(v)}"
150
+ assert q.shape[0] == k.shape[0] == v.shape[0], f"Batch size mismatch, got {q.shape[0]}, {k.shape[0]}, and {v.shape[0]}"
151
+ device = q.device
152
+
153
+ if isinstance(q, SparseTensor):
154
+ assert len(q.shape) == 3, f"Invalid shape for q, got {q.shape}, expected [N, *, H, Ci]"
155
+ s = q
156
+ q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])]
157
+ q = q.feats # [T_Q, H, Ci]
158
+ else:
159
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, Ci]"
160
+ s = None
161
+ N, L, H, CI = q.shape
162
+ q_seqlen = [L] * N
163
+ q = q.reshape(N * L, H, CI) # [T_Q, H, Ci]
164
+
165
+ if isinstance(k, SparseTensor):
166
+ assert len(k.shape) == 3, f"Invalid shape for k, got {k.shape}, expected [N, *, H, Ci]"
167
+ assert len(v.shape) == 3, f"Invalid shape for v, got {v.shape}, expected [N, *, H, Co]"
168
+ kv_seqlen = [k.layout[i].stop - k.layout[i].start for i in range(k.shape[0])]
169
+ k = k.feats # [T_KV, H, Ci]
170
+ v = v.feats # [T_KV, H, Co]
171
+ else:
172
+ assert len(k.shape) == 4, f"Invalid shape for k, got {k.shape}, expected [N, L, H, Ci]"
173
+ assert len(v.shape) == 4, f"Invalid shape for v, got {v.shape}, expected [N, L, H, Co]"
174
+ N, L, H, CI, CO = *k.shape, v.shape[-1]
175
+ kv_seqlen = [L] * N
176
+ k = k.reshape(N * L, H, CI) # [T_KV, H, Ci]
177
+ v = v.reshape(N * L, H, CO) # [T_KV, H, Co]
178
+
179
+ if DEBUG:
180
+ if s is not None:
181
+ for i in range(s.shape[0]):
182
+ assert (s.coords[s.layout[i]] == i).all(), f"SparseScaledDotProductSelfAttention: batch index mismatch"
183
+ if num_all_args in [2, 3]:
184
+ assert q.shape[:2] == [1, sum(q_seqlen)], f"SparseScaledDotProductSelfAttention: q shape mismatch"
185
+ if num_all_args == 3:
186
+ assert k.shape[:2] == [1, sum(kv_seqlen)], f"SparseScaledDotProductSelfAttention: k shape mismatch"
187
+ assert v.shape[:2] == [1, sum(kv_seqlen)], f"SparseScaledDotProductSelfAttention: v shape mismatch"
188
+
189
+ if ATTN == 'xformers':
190
+ if num_all_args == 1:
191
+ q, k, v = qkv.unbind(dim=1)
192
+ elif num_all_args == 2:
193
+ k, v = kv.unbind(dim=1)
194
+ q = q.unsqueeze(0)
195
+ k = k.unsqueeze(0)
196
+ v = v.unsqueeze(0)
197
+ mask = xops.fmha.BlockDiagonalMask.from_seqlens(q_seqlen, kv_seqlen)
198
+ out = xops.memory_efficient_attention(q, k, v, mask)[0]
199
+ elif ATTN == 'flash_attn':
200
+ cu_seqlens_q = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(q_seqlen), dim=0)]).int().to(device)
201
+ if num_all_args in [2, 3]:
202
+ cu_seqlens_kv = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(kv_seqlen), dim=0)]).int().to(device)
203
+ if num_all_args == 1:
204
+ out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv, cu_seqlens_q, max(q_seqlen))
205
+ elif num_all_args == 2:
206
+ out = flash_attn.flash_attn_varlen_kvpacked_func(q, kv, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen))
207
+ elif num_all_args == 3:
208
+ out = flash_attn.flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen))
209
+ else:
210
+ raise ValueError(f"Unknown attention module: {ATTN}")
211
+
212
+ if s is not None:
213
+ return s.replace(out)
214
+ else:
215
+ return out.reshape(N, L, H, -1)
Amodal3R/modules/sparse/attention/modules.py CHANGED
@@ -1,3 +1,305 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0b285cf86c17874a5c2d91fcc9707a6bd2db36c15ff8efa94eba07e2bf914a60
3
- size 13055
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from .. import SparseTensor
6
+ from .full_attn import sparse_scaled_dot_product_attention
7
+ from .serialized_attn import SerializeMode, sparse_serialized_scaled_dot_product_self_attention
8
+ from .windowed_attn import sparse_windowed_scaled_dot_product_self_attention
9
+ from ...attention import RotaryPositionEmbedder
10
+ import math
11
+
12
+
13
+ class SparseMultiHeadRMSNorm(nn.Module):
14
+ def __init__(self, dim: int, heads: int):
15
+ super().__init__()
16
+ self.scale = dim ** 0.5
17
+ self.gamma = nn.Parameter(torch.ones(heads, dim))
18
+
19
+ def forward(self, x: Union[SparseTensor, torch.Tensor]) -> Union[SparseTensor, torch.Tensor]:
20
+ x_type = x.dtype
21
+ x = x.float()
22
+ if isinstance(x, SparseTensor):
23
+ x = x.replace(F.normalize(x.feats, dim=-1))
24
+ else:
25
+ x = F.normalize(x, dim=-1)
26
+ return (x * self.gamma * self.scale).to(x_type)
27
+
28
+
29
+ class SparseMultiHeadAttention(nn.Module):
30
+ def __init__(
31
+ self,
32
+ channels: int,
33
+ num_heads: int,
34
+ ctx_channels: Optional[int] = None,
35
+ type: Literal["self", "cross"] = "self",
36
+ attn_mode: Literal["full", "serialized", "windowed"] = "full",
37
+ window_size: Optional[int] = None,
38
+ shift_sequence: Optional[int] = None,
39
+ shift_window: Optional[Tuple[int, int, int]] = None,
40
+ serialize_mode: Optional[SerializeMode] = None,
41
+ qkv_bias: bool = True,
42
+ use_rope: bool = False,
43
+ qk_rms_norm: bool = False,
44
+ ):
45
+ super().__init__()
46
+ assert channels % num_heads == 0
47
+ assert type in ["self", "cross"], f"Invalid attention type: {type}"
48
+ assert attn_mode in ["full", "serialized", "windowed"], f"Invalid attention mode: {attn_mode}"
49
+ assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention"
50
+ assert type == "self" or use_rope is False, "Rotary position embeddings only supported for self-attention"
51
+ self.channels = channels
52
+ self.ctx_channels = ctx_channels if ctx_channels is not None else channels
53
+ self.num_heads = num_heads
54
+ self._type = type
55
+ self.attn_mode = attn_mode
56
+ self.window_size = window_size
57
+ self.shift_sequence = shift_sequence
58
+ self.shift_window = shift_window
59
+ self.serialize_mode = serialize_mode
60
+ self.use_rope = use_rope
61
+ self.qk_rms_norm = qk_rms_norm
62
+
63
+ if self._type == "self":
64
+ self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias)
65
+ else:
66
+ self.to_q = nn.Linear(channels, channels, bias=qkv_bias)
67
+ self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias)
68
+
69
+ if self.qk_rms_norm:
70
+ self.q_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads)
71
+ self.k_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads)
72
+
73
+ self.to_out = nn.Linear(channels, channels)
74
+
75
+ if use_rope:
76
+ self.rope = RotaryPositionEmbedder(channels)
77
+
78
+ @staticmethod
79
+ def _linear(module: nn.Linear, x: Union[SparseTensor, torch.Tensor]) -> Union[SparseTensor, torch.Tensor]:
80
+ if isinstance(x, SparseTensor):
81
+ return x.replace(module(x.feats))
82
+ else:
83
+ return module(x)
84
+
85
+ @staticmethod
86
+ def _reshape_chs(x: Union[SparseTensor, torch.Tensor], shape: Tuple[int, ...]) -> Union[SparseTensor, torch.Tensor]:
87
+ if isinstance(x, SparseTensor):
88
+ return x.reshape(*shape)
89
+ else:
90
+ return x.reshape(*x.shape[:2], *shape)
91
+
92
+ def _fused_pre(self, x: Union[SparseTensor, torch.Tensor], num_fused: int) -> Union[SparseTensor, torch.Tensor]:
93
+ if isinstance(x, SparseTensor):
94
+ x_feats = x.feats.unsqueeze(0)
95
+ else:
96
+ x_feats = x
97
+ x_feats = x_feats.reshape(*x_feats.shape[:2], num_fused, self.num_heads, -1)
98
+ return x.replace(x_feats.squeeze(0)) if isinstance(x, SparseTensor) else x_feats
99
+
100
+ def _rope(self, qkv: SparseTensor) -> SparseTensor:
101
+ q, k, v = qkv.feats.unbind(dim=1) # [T, H, C]
102
+ q, k = self.rope(q, k, qkv.coords[:, 1:])
103
+ qkv = qkv.replace(torch.stack([q, k, v], dim=1))
104
+ return qkv
105
+
106
+ def forward(self, x: Union[SparseTensor, torch.Tensor], context: Optional[Union[SparseTensor, torch.Tensor]] = None) -> Union[SparseTensor, torch.Tensor]:
107
+ if self._type == "self":
108
+ qkv = self._linear(self.to_qkv, x)
109
+ qkv = self._fused_pre(qkv, num_fused=3)
110
+ if self.use_rope:
111
+ qkv = self._rope(qkv)
112
+ if self.qk_rms_norm:
113
+ q, k, v = qkv.unbind(dim=1)
114
+ q = self.q_rms_norm(q)
115
+ k = self.k_rms_norm(k)
116
+ qkv = qkv.replace(torch.stack([q.feats, k.feats, v.feats], dim=1))
117
+ if self.attn_mode == "full":
118
+ h = sparse_scaled_dot_product_attention(qkv)
119
+ elif self.attn_mode == "serialized":
120
+ h = sparse_serialized_scaled_dot_product_self_attention(
121
+ qkv, self.window_size, serialize_mode=self.serialize_mode, shift_sequence=self.shift_sequence, shift_window=self.shift_window
122
+ )
123
+ elif self.attn_mode == "windowed":
124
+ h = sparse_windowed_scaled_dot_product_self_attention(
125
+ qkv, self.window_size, shift_window=self.shift_window
126
+ )
127
+ else:
128
+ q = self._linear(self.to_q, x)
129
+ q = self._reshape_chs(q, (self.num_heads, -1))
130
+ kv = self._linear(self.to_kv, context)
131
+ kv = self._fused_pre(kv, num_fused=2)
132
+ if self.qk_rms_norm:
133
+ q = self.q_rms_norm(q)
134
+ k, v = kv.unbind(dim=1)
135
+ k = self.k_rms_norm(k)
136
+ kv = kv.replace(torch.stack([k.feats, v.feats], dim=1))
137
+ h = sparse_scaled_dot_product_attention(q, kv)
138
+ h = self._reshape_chs(h, (-1,))
139
+ h = self._linear(self.to_out, h)
140
+ return h
141
+
142
+
143
+ class SparseMultiHeadAttentionWeighted(nn.Module):
144
+ def __init__(
145
+ self,
146
+ channels: int,
147
+ num_heads: int,
148
+ ctx_channels: Optional[int] = None,
149
+ type: Literal["self", "cross"] = "self",
150
+ attn_mode: Literal["full", "serialized", "windowed"] = "full",
151
+ window_size: Optional[int] = None,
152
+ shift_sequence: Optional[int] = None,
153
+ shift_window: Optional[Tuple[int, int, int]] = None,
154
+ serialize_mode: Optional[SerializeMode] = None,
155
+ qkv_bias: bool = True,
156
+ use_rope: bool = False,
157
+ qk_rms_norm: bool = False,
158
+ ):
159
+ super().__init__()
160
+ assert channels % num_heads == 0
161
+ assert type in ["self", "cross"], f"Invalid attention type: {type}"
162
+ assert attn_mode in ["full", "serialized", "windowed"], f"Invalid attention mode: {attn_mode}"
163
+ assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention"
164
+ assert type == "self" or use_rope is False, "Rotary position embeddings only supported for self-attention"
165
+ self.channels = channels
166
+ self.ctx_channels = ctx_channels if ctx_channels is not None else channels
167
+ self.num_heads = num_heads
168
+ self._type = type
169
+ self.attn_mode = attn_mode
170
+ self.window_size = window_size
171
+ self.shift_sequence = shift_sequence
172
+ self.shift_window = shift_window
173
+ self.serialize_mode = serialize_mode
174
+ self.use_rope = use_rope
175
+ self.qk_rms_norm = qk_rms_norm
176
+
177
+ if self._type == "self":
178
+ self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias)
179
+ else:
180
+ self.to_q = nn.Linear(channels, channels, bias=qkv_bias)
181
+ self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias)
182
+
183
+ if self.qk_rms_norm:
184
+ self.q_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads)
185
+ self.k_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads)
186
+
187
+ self.to_out = nn.Linear(channels, channels)
188
+
189
+ if use_rope:
190
+ self.rope = RotaryPositionEmbedder(channels)
191
+
192
+ @staticmethod
193
+ def _linear(module: nn.Linear, x: Union[SparseTensor, torch.Tensor]) -> Union[SparseTensor, torch.Tensor]:
194
+ if isinstance(x, SparseTensor):
195
+ return x.replace(module(x.feats))
196
+ else:
197
+ return module(x)
198
+
199
+ @staticmethod
200
+ def _reshape_chs(x: Union[SparseTensor, torch.Tensor], shape: Tuple[int, ...]) -> Union[SparseTensor, torch.Tensor]:
201
+ if isinstance(x, SparseTensor):
202
+ return x.reshape(*shape)
203
+ else:
204
+ return x.reshape(*x.shape[:2], *shape)
205
+
206
+ def _fused_pre(self, x: Union[SparseTensor, torch.Tensor], num_fused: int) -> Union[SparseTensor, torch.Tensor]:
207
+ if isinstance(x, SparseTensor):
208
+ x_feats = x.feats.unsqueeze(0)
209
+ else:
210
+ x_feats = x
211
+ x_feats = x_feats.reshape(*x_feats.shape[:2], num_fused, self.num_heads, -1)
212
+ return x.replace(x_feats.squeeze(0)) if isinstance(x, SparseTensor) else x_feats
213
+
214
+ def _rope(self, qkv: SparseTensor) -> SparseTensor:
215
+ q, k, v = qkv.feats.unbind(dim=1) # [T, H, C]
216
+ q, k = self.rope(q, k, qkv.coords[:, 1:])
217
+ qkv = qkv.replace(torch.stack([q, k, v], dim=1))
218
+ return qkv
219
+
220
+ def forward(self, x: Union[SparseTensor, torch.Tensor], context: Optional[Union[SparseTensor, torch.Tensor]] = None, mask_weight: Optional[Union[SparseTensor, torch.Tensor]] = None) -> Union[SparseTensor, torch.Tensor]:
221
+ if self._type == "self":
222
+ qkv = self._linear(self.to_qkv, x)
223
+ qkv = self._fused_pre(qkv, num_fused=3)
224
+ if self.use_rope:
225
+ qkv = self._rope(qkv)
226
+ if self.qk_rms_norm:
227
+ q, k, v = qkv.unbind(dim=1)
228
+ q = self.q_rms_norm(q)
229
+ k = self.k_rms_norm(k)
230
+ qkv = qkv.replace(torch.stack([q.feats, k.feats, v.feats], dim=1))
231
+ if self.attn_mode == "full":
232
+ h = sparse_scaled_dot_product_attention(qkv)
233
+ elif self.attn_mode == "serialized":
234
+ h = sparse_serialized_scaled_dot_product_self_attention(
235
+ qkv, self.window_size, serialize_mode=self.serialize_mode, shift_sequence=self.shift_sequence, shift_window=self.shift_window
236
+ )
237
+ elif self.attn_mode == "windowed":
238
+ h = sparse_windowed_scaled_dot_product_self_attention(
239
+ qkv, self.window_size, shift_window=self.shift_window
240
+ )
241
+ else:
242
+ q = self._linear(self.to_q, x)
243
+ q = self._reshape_chs(q, (self.num_heads, -1))
244
+ kv = self._linear(self.to_kv, context)
245
+ kv = self._fused_pre(kv, num_fused=2)
246
+ if self.qk_rms_norm:
247
+ q = self.q_rms_norm(q)
248
+ k, v = kv.unbind(dim=1)
249
+ k = self.k_rms_norm(k)
250
+ kv = kv.replace(torch.stack([k.feats, v.feats], dim=1))
251
+ if mask_weight is not None:
252
+ h = self.sparse_scaled_dot_product_attention_weighted(q, kv, mask_weight)
253
+ else:
254
+ h = sparse_scaled_dot_product_attention(q, kv)
255
+ h = self._reshape_chs(h, (-1,))
256
+ h = self._linear(self.to_out, h)
257
+ return h
258
+
259
+ def sparse_scaled_dot_product_attention_weighted(self, q, kv, mask_weight, eps=1e-6):
260
+ s = q
261
+ q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])]
262
+ q = q.feats # [T_Q, H, C]
263
+ T_q, H_q, C_q = q.shape
264
+
265
+ assert len(kv.shape) == 5, f"Invalid shape for kv, got {kv.shape}, expected [N, L, 2, H, C]"
266
+ N, L, _, H_kv, C_kv = kv.shape
267
+ kv_seqlen = [L] * N
268
+ kv = kv.reshape(N * L, 2, H_kv, C_kv) # [T_KV, 2, H, C]
269
+ k, v = kv.unbind(dim=1) # [T_KV, H, C]
270
+
271
+ q = q.unsqueeze(0)
272
+ k = k.unsqueeze(0)
273
+ v = v.unsqueeze(0)
274
+
275
+ # handle cls for mask_patcher
276
+ B, T_mask, _ = mask_weight.shape
277
+ cls_weight = torch.ones(B, 5, device=mask_weight.device, dtype=mask_weight.dtype)
278
+ mask_weight = torch.cat([cls_weight, mask_weight.squeeze(2)], dim=1)
279
+ mask_weight = mask_weight.reshape(B*(T_mask+5)).unsqueeze(0).unsqueeze(0).unsqueeze(0) # [1, T_KV, 1, 1]
280
+ mask_bias = torch.log(mask_weight + eps) # [1, T_KV, 1, 1]
281
+
282
+ q = q.permute(0, 2, 1, 3) # [1, num_heads, T_Q, head_dim]
283
+ k = k.permute(0, 2, 1, 3) # [1, num_heads, T_KV, head_dim]
284
+ v = v.permute(0, 2, 1, 3) # [1, num_heads, T_KV, head_dim]
285
+
286
+ attn_logits = q @ k.transpose(-2, -1) / math.sqrt(C_kv)
287
+ attn_logits += mask_bias
288
+ attn_weights = F.softmax(attn_logits, dim=-1)
289
+ output = attn_weights @ v
290
+ output = output.permute(0, 2, 1, 3).squeeze(0)
291
+
292
+ return s.replace(output)
293
+
294
+
295
+
296
+
297
+
298
+
299
+
300
+
301
+
302
+
303
+
304
+
305
+
Amodal3R/modules/sparse/attention/serialized_attn.py CHANGED
@@ -1,3 +1,193 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:57f0a6a1a79fb6fa3ad153769226c8f7f36d122b64017a3b029c880385b5f306
3
- size 8258
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ from enum import Enum
3
+ import torch
4
+ import math
5
+ from .. import SparseTensor
6
+ from .. import DEBUG, ATTN
7
+
8
+ if ATTN == 'xformers':
9
+ import xformers.ops as xops
10
+ elif ATTN == 'flash_attn':
11
+ import flash_attn
12
+ else:
13
+ raise ValueError(f"Unknown attention module: {ATTN}")
14
+
15
+
16
+ __all__ = [
17
+ 'sparse_serialized_scaled_dot_product_self_attention',
18
+ ]
19
+
20
+
21
+ class SerializeMode(Enum):
22
+ Z_ORDER = 0
23
+ Z_ORDER_TRANSPOSED = 1
24
+ HILBERT = 2
25
+ HILBERT_TRANSPOSED = 3
26
+
27
+
28
+ SerializeModes = [
29
+ SerializeMode.Z_ORDER,
30
+ SerializeMode.Z_ORDER_TRANSPOSED,
31
+ SerializeMode.HILBERT,
32
+ SerializeMode.HILBERT_TRANSPOSED
33
+ ]
34
+
35
+
36
+ def calc_serialization(
37
+ tensor: SparseTensor,
38
+ window_size: int,
39
+ serialize_mode: SerializeMode = SerializeMode.Z_ORDER,
40
+ shift_sequence: int = 0,
41
+ shift_window: Tuple[int, int, int] = (0, 0, 0)
42
+ ) -> Tuple[torch.Tensor, torch.Tensor, List[int]]:
43
+ """
44
+ Calculate serialization and partitioning for a set of coordinates.
45
+
46
+ Args:
47
+ tensor (SparseTensor): The input tensor.
48
+ window_size (int): The window size to use.
49
+ serialize_mode (SerializeMode): The serialization mode to use.
50
+ shift_sequence (int): The shift of serialized sequence.
51
+ shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
52
+
53
+ Returns:
54
+ (torch.Tensor, torch.Tensor): Forwards and backwards indices.
55
+ """
56
+ fwd_indices = []
57
+ bwd_indices = []
58
+ seq_lens = []
59
+ seq_batch_indices = []
60
+ offsets = [0]
61
+
62
+ if 'vox2seq' not in globals():
63
+ import vox2seq
64
+
65
+ # Serialize the input
66
+ serialize_coords = tensor.coords[:, 1:].clone()
67
+ serialize_coords += torch.tensor(shift_window, dtype=torch.int32, device=tensor.device).reshape(1, 3)
68
+ if serialize_mode == SerializeMode.Z_ORDER:
69
+ code = vox2seq.encode(serialize_coords, mode='z_order', permute=[0, 1, 2])
70
+ elif serialize_mode == SerializeMode.Z_ORDER_TRANSPOSED:
71
+ code = vox2seq.encode(serialize_coords, mode='z_order', permute=[1, 0, 2])
72
+ elif serialize_mode == SerializeMode.HILBERT:
73
+ code = vox2seq.encode(serialize_coords, mode='hilbert', permute=[0, 1, 2])
74
+ elif serialize_mode == SerializeMode.HILBERT_TRANSPOSED:
75
+ code = vox2seq.encode(serialize_coords, mode='hilbert', permute=[1, 0, 2])
76
+ else:
77
+ raise ValueError(f"Unknown serialize mode: {serialize_mode}")
78
+
79
+ for bi, s in enumerate(tensor.layout):
80
+ num_points = s.stop - s.start
81
+ num_windows = (num_points + window_size - 1) // window_size
82
+ valid_window_size = num_points / num_windows
83
+ to_ordered = torch.argsort(code[s.start:s.stop])
84
+ if num_windows == 1:
85
+ fwd_indices.append(to_ordered)
86
+ bwd_indices.append(torch.zeros_like(to_ordered).scatter_(0, to_ordered, torch.arange(num_points, device=tensor.device)))
87
+ fwd_indices[-1] += s.start
88
+ bwd_indices[-1] += offsets[-1]
89
+ seq_lens.append(num_points)
90
+ seq_batch_indices.append(bi)
91
+ offsets.append(offsets[-1] + seq_lens[-1])
92
+ else:
93
+ # Partition the input
94
+ offset = 0
95
+ mids = [(i + 0.5) * valid_window_size + shift_sequence for i in range(num_windows)]
96
+ split = [math.floor(i * valid_window_size + shift_sequence) for i in range(num_windows + 1)]
97
+ bwd_index = torch.zeros((num_points,), dtype=torch.int64, device=tensor.device)
98
+ for i in range(num_windows):
99
+ mid = mids[i]
100
+ valid_start = split[i]
101
+ valid_end = split[i + 1]
102
+ padded_start = math.floor(mid - 0.5 * window_size)
103
+ padded_end = padded_start + window_size
104
+ fwd_indices.append(to_ordered[torch.arange(padded_start, padded_end, device=tensor.device) % num_points])
105
+ offset += valid_start - padded_start
106
+ bwd_index.scatter_(0, fwd_indices[-1][valid_start-padded_start:valid_end-padded_start], torch.arange(offset, offset + valid_end - valid_start, device=tensor.device))
107
+ offset += padded_end - valid_start
108
+ fwd_indices[-1] += s.start
109
+ seq_lens.extend([window_size] * num_windows)
110
+ seq_batch_indices.extend([bi] * num_windows)
111
+ bwd_indices.append(bwd_index + offsets[-1])
112
+ offsets.append(offsets[-1] + num_windows * window_size)
113
+
114
+ fwd_indices = torch.cat(fwd_indices)
115
+ bwd_indices = torch.cat(bwd_indices)
116
+
117
+ return fwd_indices, bwd_indices, seq_lens, seq_batch_indices
118
+
119
+
120
+ def sparse_serialized_scaled_dot_product_self_attention(
121
+ qkv: SparseTensor,
122
+ window_size: int,
123
+ serialize_mode: SerializeMode = SerializeMode.Z_ORDER,
124
+ shift_sequence: int = 0,
125
+ shift_window: Tuple[int, int, int] = (0, 0, 0)
126
+ ) -> SparseTensor:
127
+ """
128
+ Apply serialized scaled dot product self attention to a sparse tensor.
129
+
130
+ Args:
131
+ qkv (SparseTensor): [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
132
+ window_size (int): The window size to use.
133
+ serialize_mode (SerializeMode): The serialization mode to use.
134
+ shift_sequence (int): The shift of serialized sequence.
135
+ shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
136
+ shift (int): The shift to use.
137
+ """
138
+ assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
139
+
140
+ serialization_spatial_cache_name = f'serialization_{serialize_mode}_{window_size}_{shift_sequence}_{shift_window}'
141
+ serialization_spatial_cache = qkv.get_spatial_cache(serialization_spatial_cache_name)
142
+ if serialization_spatial_cache is None:
143
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = calc_serialization(qkv, window_size, serialize_mode, shift_sequence, shift_window)
144
+ qkv.register_spatial_cache(serialization_spatial_cache_name, (fwd_indices, bwd_indices, seq_lens, seq_batch_indices))
145
+ else:
146
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = serialization_spatial_cache
147
+
148
+ M = fwd_indices.shape[0]
149
+ T = qkv.feats.shape[0]
150
+ H = qkv.feats.shape[2]
151
+ C = qkv.feats.shape[3]
152
+
153
+ qkv_feats = qkv.feats[fwd_indices] # [M, 3, H, C]
154
+
155
+ if DEBUG:
156
+ start = 0
157
+ qkv_coords = qkv.coords[fwd_indices]
158
+ for i in range(len(seq_lens)):
159
+ assert (qkv_coords[start:start+seq_lens[i], 0] == seq_batch_indices[i]).all(), f"SparseWindowedScaledDotProductSelfAttention: batch index mismatch"
160
+ start += seq_lens[i]
161
+
162
+ if all([seq_len == window_size for seq_len in seq_lens]):
163
+ B = len(seq_lens)
164
+ N = window_size
165
+ qkv_feats = qkv_feats.reshape(B, N, 3, H, C)
166
+ if ATTN == 'xformers':
167
+ q, k, v = qkv_feats.unbind(dim=2) # [B, N, H, C]
168
+ out = xops.memory_efficient_attention(q, k, v) # [B, N, H, C]
169
+ elif ATTN == 'flash_attn':
170
+ out = flash_attn.flash_attn_qkvpacked_func(qkv_feats) # [B, N, H, C]
171
+ else:
172
+ raise ValueError(f"Unknown attention module: {ATTN}")
173
+ out = out.reshape(B * N, H, C) # [M, H, C]
174
+ else:
175
+ if ATTN == 'xformers':
176
+ q, k, v = qkv_feats.unbind(dim=1) # [M, H, C]
177
+ q = q.unsqueeze(0) # [1, M, H, C]
178
+ k = k.unsqueeze(0) # [1, M, H, C]
179
+ v = v.unsqueeze(0) # [1, M, H, C]
180
+ mask = xops.fmha.BlockDiagonalMask.from_seqlens(seq_lens)
181
+ out = xops.memory_efficient_attention(q, k, v, mask)[0] # [M, H, C]
182
+ elif ATTN == 'flash_attn':
183
+ cu_seqlens = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(seq_lens), dim=0)], dim=0) \
184
+ .to(qkv.device).int()
185
+ out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv_feats, cu_seqlens, max(seq_lens)) # [M, H, C]
186
+
187
+ out = out[bwd_indices] # [T, H, C]
188
+
189
+ if DEBUG:
190
+ qkv_coords = qkv_coords[bwd_indices]
191
+ assert torch.equal(qkv_coords, qkv.coords), "SparseWindowedScaledDotProductSelfAttention: coordinate mismatch"
192
+
193
+ return qkv.replace(out)
Amodal3R/modules/sparse/attention/windowed_attn.py CHANGED
@@ -1,3 +1,135 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6730399cbe6dff15f9f3863a06c6076758f9b45377d07a31b54c8994afdf344
3
- size 6075
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import math
4
+ from .. import SparseTensor
5
+ from .. import DEBUG, ATTN
6
+
7
+ if ATTN == 'xformers':
8
+ import xformers.ops as xops
9
+ elif ATTN == 'flash_attn':
10
+ import flash_attn
11
+ else:
12
+ raise ValueError(f"Unknown attention module: {ATTN}")
13
+
14
+
15
+ __all__ = [
16
+ 'sparse_windowed_scaled_dot_product_self_attention',
17
+ ]
18
+
19
+
20
+ def calc_window_partition(
21
+ tensor: SparseTensor,
22
+ window_size: Union[int, Tuple[int, ...]],
23
+ shift_window: Union[int, Tuple[int, ...]] = 0
24
+ ) -> Tuple[torch.Tensor, torch.Tensor, List[int], List[int]]:
25
+ """
26
+ Calculate serialization and partitioning for a set of coordinates.
27
+
28
+ Args:
29
+ tensor (SparseTensor): The input tensor.
30
+ window_size (int): The window size to use.
31
+ shift_window (Tuple[int, ...]): The shift of serialized coordinates.
32
+
33
+ Returns:
34
+ (torch.Tensor): Forwards indices.
35
+ (torch.Tensor): Backwards indices.
36
+ (List[int]): Sequence lengths.
37
+ (List[int]): Sequence batch indices.
38
+ """
39
+ DIM = tensor.coords.shape[1] - 1
40
+ shift_window = (shift_window,) * DIM if isinstance(shift_window, int) else shift_window
41
+ window_size = (window_size,) * DIM if isinstance(window_size, int) else window_size
42
+ shifted_coords = tensor.coords.clone().detach()
43
+ shifted_coords[:, 1:] += torch.tensor(shift_window, device=tensor.device, dtype=torch.int32).unsqueeze(0)
44
+
45
+ MAX_COORDS = shifted_coords[:, 1:].max(dim=0).values.tolist()
46
+ NUM_WINDOWS = [math.ceil((mc + 1) / ws) for mc, ws in zip(MAX_COORDS, window_size)]
47
+ OFFSET = torch.cumprod(torch.tensor([1] + NUM_WINDOWS[::-1]), dim=0).tolist()[::-1]
48
+
49
+ shifted_coords[:, 1:] //= torch.tensor(window_size, device=tensor.device, dtype=torch.int32).unsqueeze(0)
50
+ shifted_indices = (shifted_coords * torch.tensor(OFFSET, device=tensor.device, dtype=torch.int32).unsqueeze(0)).sum(dim=1)
51
+ fwd_indices = torch.argsort(shifted_indices)
52
+ bwd_indices = torch.empty_like(fwd_indices)
53
+ bwd_indices[fwd_indices] = torch.arange(fwd_indices.shape[0], device=tensor.device)
54
+ seq_lens = torch.bincount(shifted_indices)
55
+ seq_batch_indices = torch.arange(seq_lens.shape[0], device=tensor.device, dtype=torch.int32) // OFFSET[0]
56
+ mask = seq_lens != 0
57
+ seq_lens = seq_lens[mask].tolist()
58
+ seq_batch_indices = seq_batch_indices[mask].tolist()
59
+
60
+ return fwd_indices, bwd_indices, seq_lens, seq_batch_indices
61
+
62
+
63
+ def sparse_windowed_scaled_dot_product_self_attention(
64
+ qkv: SparseTensor,
65
+ window_size: int,
66
+ shift_window: Tuple[int, int, int] = (0, 0, 0)
67
+ ) -> SparseTensor:
68
+ """
69
+ Apply windowed scaled dot product self attention to a sparse tensor.
70
+
71
+ Args:
72
+ qkv (SparseTensor): [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
73
+ window_size (int): The window size to use.
74
+ shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
75
+ shift (int): The shift to use.
76
+ """
77
+ assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
78
+
79
+ serialization_spatial_cache_name = f'window_partition_{window_size}_{shift_window}'
80
+ serialization_spatial_cache = qkv.get_spatial_cache(serialization_spatial_cache_name)
81
+ if serialization_spatial_cache is None:
82
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = calc_window_partition(qkv, window_size, shift_window)
83
+ qkv.register_spatial_cache(serialization_spatial_cache_name, (fwd_indices, bwd_indices, seq_lens, seq_batch_indices))
84
+ else:
85
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = serialization_spatial_cache
86
+
87
+ M = fwd_indices.shape[0]
88
+ T = qkv.feats.shape[0]
89
+ H = qkv.feats.shape[2]
90
+ C = qkv.feats.shape[3]
91
+
92
+ qkv_feats = qkv.feats[fwd_indices] # [M, 3, H, C]
93
+
94
+ if DEBUG:
95
+ start = 0
96
+ qkv_coords = qkv.coords[fwd_indices]
97
+ for i in range(len(seq_lens)):
98
+ seq_coords = qkv_coords[start:start+seq_lens[i]]
99
+ assert (seq_coords[:, 0] == seq_batch_indices[i]).all(), f"SparseWindowedScaledDotProductSelfAttention: batch index mismatch"
100
+ assert (seq_coords[:, 1:].max(dim=0).values - seq_coords[:, 1:].min(dim=0).values < window_size).all(), \
101
+ f"SparseWindowedScaledDotProductSelfAttention: window size exceeded"
102
+ start += seq_lens[i]
103
+
104
+ if all([seq_len == window_size for seq_len in seq_lens]):
105
+ B = len(seq_lens)
106
+ N = window_size
107
+ qkv_feats = qkv_feats.reshape(B, N, 3, H, C)
108
+ if ATTN == 'xformers':
109
+ q, k, v = qkv_feats.unbind(dim=2) # [B, N, H, C]
110
+ out = xops.memory_efficient_attention(q, k, v) # [B, N, H, C]
111
+ elif ATTN == 'flash_attn':
112
+ out = flash_attn.flash_attn_qkvpacked_func(qkv_feats) # [B, N, H, C]
113
+ else:
114
+ raise ValueError(f"Unknown attention module: {ATTN}")
115
+ out = out.reshape(B * N, H, C) # [M, H, C]
116
+ else:
117
+ if ATTN == 'xformers':
118
+ q, k, v = qkv_feats.unbind(dim=1) # [M, H, C]
119
+ q = q.unsqueeze(0) # [1, M, H, C]
120
+ k = k.unsqueeze(0) # [1, M, H, C]
121
+ v = v.unsqueeze(0) # [1, M, H, C]
122
+ mask = xops.fmha.BlockDiagonalMask.from_seqlens(seq_lens)
123
+ out = xops.memory_efficient_attention(q, k, v, mask)[0] # [M, H, C]
124
+ elif ATTN == 'flash_attn':
125
+ cu_seqlens = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(seq_lens), dim=0)], dim=0) \
126
+ .to(qkv.device).int()
127
+ out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv_feats, cu_seqlens, max(seq_lens)) # [M, H, C]
128
+
129
+ out = out[bwd_indices] # [T, H, C]
130
+
131
+ if DEBUG:
132
+ qkv_coords = qkv_coords[bwd_indices]
133
+ assert torch.equal(qkv_coords, qkv.coords), "SparseWindowedScaledDotProductSelfAttention: coordinate mismatch"
134
+
135
+ return qkv.replace(out)
Amodal3R/modules/sparse/basic.py CHANGED
@@ -1,3 +1,471 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:874754fac79e0b9d5d08e44ec3c1e72873ed4d3b69816fcb52023407ef3eb738
3
- size 17776
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from . import BACKEND, DEBUG
5
+ SparseTensorData = None # Lazy import
6
+
7
+
8
+ __all__ = [
9
+ 'SparseTensor',
10
+ 'sparse_batch_broadcast',
11
+ 'sparse_batch_op',
12
+ 'sparse_cat',
13
+ 'sparse_unbind',
14
+ ]
15
+
16
+
17
+ class SparseTensor:
18
+ """
19
+ Sparse tensor with support for both torchsparse and spconv backends.
20
+
21
+ Parameters:
22
+ - feats (torch.Tensor): Features of the sparse tensor.
23
+ - coords (torch.Tensor): Coordinates of the sparse tensor.
24
+ - shape (torch.Size): Shape of the sparse tensor.
25
+ - layout (List[slice]): Layout of the sparse tensor for each batch
26
+ - data (SparseTensorData): Sparse tensor data used for convolusion
27
+
28
+ NOTE:
29
+ - Data corresponding to a same batch should be contiguous.
30
+ - Coords should be in [0, 1023]
31
+ """
32
+ @overload
33
+ def __init__(self, feats: torch.Tensor, coords: torch.Tensor, shape: Optional[torch.Size] = None, layout: Optional[List[slice]] = None, **kwargs): ...
34
+
35
+ @overload
36
+ def __init__(self, data, shape: Optional[torch.Size] = None, layout: Optional[List[slice]] = None, **kwargs): ...
37
+
38
+ def __init__(self, *args, **kwargs):
39
+ # Lazy import of sparse tensor backend
40
+ global SparseTensorData
41
+ if SparseTensorData is None:
42
+ import importlib
43
+ if BACKEND == 'torchsparse':
44
+ SparseTensorData = importlib.import_module('torchsparse').SparseTensor
45
+ elif BACKEND == 'spconv':
46
+ SparseTensorData = importlib.import_module('spconv.pytorch').SparseConvTensor
47
+
48
+ method_id = 0
49
+ if len(args) != 0:
50
+ method_id = 0 if isinstance(args[0], torch.Tensor) else 1
51
+ else:
52
+ method_id = 1 if 'data' in kwargs else 0
53
+
54
+ if method_id == 0:
55
+ feats, coords, shape, layout = args + (None,) * (4 - len(args))
56
+ if 'feats' in kwargs:
57
+ feats = kwargs['feats']
58
+ del kwargs['feats']
59
+ if 'coords' in kwargs:
60
+ coords = kwargs['coords']
61
+ del kwargs['coords']
62
+ if 'shape' in kwargs:
63
+ shape = kwargs['shape']
64
+ del kwargs['shape']
65
+ if 'layout' in kwargs:
66
+ layout = kwargs['layout']
67
+ del kwargs['layout']
68
+
69
+ if shape is None:
70
+ shape = self.__cal_shape(feats, coords)
71
+ if layout is None:
72
+ layout = self.__cal_layout(coords, shape[0])
73
+ if BACKEND == 'torchsparse':
74
+ self.data = SparseTensorData(feats, coords, **kwargs)
75
+ elif BACKEND == 'spconv':
76
+ spatial_shape = list(coords.max(0)[0] + 1)[1:]
77
+ self.data = SparseTensorData(feats.reshape(feats.shape[0], -1), coords, spatial_shape, shape[0], **kwargs)
78
+ self.data._features = feats
79
+ elif method_id == 1:
80
+ data, shape, layout = args + (None,) * (3 - len(args))
81
+ if 'data' in kwargs:
82
+ data = kwargs['data']
83
+ del kwargs['data']
84
+ if 'shape' in kwargs:
85
+ shape = kwargs['shape']
86
+ del kwargs['shape']
87
+ if 'layout' in kwargs:
88
+ layout = kwargs['layout']
89
+ del kwargs['layout']
90
+
91
+ self.data = data
92
+ if shape is None:
93
+ shape = self.__cal_shape(self.feats, self.coords)
94
+ if layout is None:
95
+ layout = self.__cal_layout(self.coords, shape[0])
96
+ self._shape = shape
97
+ self._layout = layout
98
+ self._scale = kwargs.get('scale', (1, 1, 1))
99
+ self._spatial_cache = kwargs.get('spatial_cache', {})
100
+
101
+ if DEBUG:
102
+ try:
103
+ assert self.feats.shape[0] == self.coords.shape[0], f"Invalid feats shape: {self.feats.shape}, coords shape: {self.coords.shape}"
104
+ assert self.shape == self.__cal_shape(self.feats, self.coords), f"Invalid shape: {self.shape}"
105
+ assert self.layout == self.__cal_layout(self.coords, self.shape[0]), f"Invalid layout: {self.layout}"
106
+ for i in range(self.shape[0]):
107
+ assert torch.all(self.coords[self.layout[i], 0] == i), f"The data of batch {i} is not contiguous"
108
+ except Exception as e:
109
+ print('Debugging information:')
110
+ print(f"- Shape: {self.shape}")
111
+ print(f"- Layout: {self.layout}")
112
+ print(f"- Scale: {self._scale}")
113
+ print(f"- Coords: {self.coords}")
114
+ raise e
115
+
116
+ def __cal_shape(self, feats, coords):
117
+ shape = []
118
+ shape.append(coords[:, 0].max().item() + 1)
119
+ shape.extend([*feats.shape[1:]])
120
+ return torch.Size(shape)
121
+
122
+ def __cal_layout(self, coords, batch_size):
123
+ seq_len = torch.bincount(coords[:, 0], minlength=batch_size)
124
+ offset = torch.cumsum(seq_len, dim=0)
125
+ layout = [slice((offset[i] - seq_len[i]).item(), offset[i].item()) for i in range(batch_size)]
126
+ return layout
127
+
128
+ @property
129
+ def shape(self) -> torch.Size:
130
+ return self._shape
131
+
132
+ def dim(self) -> int:
133
+ return len(self.shape)
134
+
135
+ @property
136
+ def layout(self) -> List[slice]:
137
+ return self._layout
138
+
139
+ @property
140
+ def feats(self) -> torch.Tensor:
141
+ if BACKEND == 'torchsparse':
142
+ return self.data.F
143
+ elif BACKEND == 'spconv':
144
+ return self.data.features
145
+
146
+ @feats.setter
147
+ def feats(self, value: torch.Tensor):
148
+ if BACKEND == 'torchsparse':
149
+ self.data.F = value
150
+ elif BACKEND == 'spconv':
151
+ self.data.features = value
152
+
153
+ @property
154
+ def coords(self) -> torch.Tensor:
155
+ if BACKEND == 'torchsparse':
156
+ return self.data.C
157
+ elif BACKEND == 'spconv':
158
+ return self.data.indices
159
+
160
+ @coords.setter
161
+ def coords(self, value: torch.Tensor):
162
+ if BACKEND == 'torchsparse':
163
+ self.data.C = value
164
+ elif BACKEND == 'spconv':
165
+ self.data.indices = value
166
+
167
+ @property
168
+ def dtype(self):
169
+ return self.feats.dtype
170
+
171
+ @property
172
+ def device(self):
173
+ return self.feats.device
174
+
175
+ @overload
176
+ def to(self, dtype: torch.dtype) -> 'SparseTensor': ...
177
+
178
+ @overload
179
+ def to(self, device: Optional[Union[str, torch.device]] = None, dtype: Optional[torch.dtype] = None) -> 'SparseTensor': ...
180
+
181
+ def to(self, *args, **kwargs) -> 'SparseTensor':
182
+ device = None
183
+ dtype = None
184
+ if len(args) == 2:
185
+ device, dtype = args
186
+ elif len(args) == 1:
187
+ if isinstance(args[0], torch.dtype):
188
+ dtype = args[0]
189
+ else:
190
+ device = args[0]
191
+ if 'dtype' in kwargs:
192
+ assert dtype is None, "to() received multiple values for argument 'dtype'"
193
+ dtype = kwargs['dtype']
194
+ if 'device' in kwargs:
195
+ assert device is None, "to() received multiple values for argument 'device'"
196
+ device = kwargs['device']
197
+
198
+ new_feats = self.feats.to(device=device, dtype=dtype)
199
+ new_coords = self.coords.to(device=device)
200
+ return self.replace(new_feats, new_coords)
201
+
202
+ def type(self, dtype):
203
+ new_feats = self.feats.type(dtype)
204
+ return self.replace(new_feats)
205
+
206
+ def cpu(self) -> 'SparseTensor':
207
+ new_feats = self.feats.cpu()
208
+ new_coords = self.coords.cpu()
209
+ return self.replace(new_feats, new_coords)
210
+
211
+ def cuda(self) -> 'SparseTensor':
212
+ new_feats = self.feats.cuda()
213
+ new_coords = self.coords.cuda()
214
+ return self.replace(new_feats, new_coords)
215
+
216
+ def half(self) -> 'SparseTensor':
217
+ new_feats = self.feats.half()
218
+ return self.replace(new_feats)
219
+
220
+ def float(self) -> 'SparseTensor':
221
+ new_feats = self.feats.float()
222
+ return self.replace(new_feats)
223
+
224
+ def detach(self) -> 'SparseTensor':
225
+ new_coords = self.coords.detach()
226
+ new_feats = self.feats.detach()
227
+ return self.replace(new_feats, new_coords)
228
+
229
+ def dense(self) -> torch.Tensor:
230
+ if BACKEND == 'torchsparse':
231
+ return self.data.dense()
232
+ elif BACKEND == 'spconv':
233
+ return self.data.dense()
234
+
235
+ def reshape(self, *shape) -> 'SparseTensor':
236
+ new_feats = self.feats.reshape(self.feats.shape[0], *shape)
237
+ return self.replace(new_feats)
238
+
239
+ def unbind(self, dim: int) -> List['SparseTensor']:
240
+ return sparse_unbind(self, dim)
241
+
242
+ def replace(self, feats: torch.Tensor, coords: Optional[torch.Tensor] = None) -> 'SparseTensor':
243
+ new_shape = [self.shape[0]]
244
+ new_shape.extend(feats.shape[1:])
245
+ if BACKEND == 'torchsparse':
246
+ new_data = SparseTensorData(
247
+ feats=feats,
248
+ coords=self.data.coords if coords is None else coords,
249
+ stride=self.data.stride,
250
+ spatial_range=self.data.spatial_range,
251
+ )
252
+ new_data._caches = self.data._caches
253
+ elif BACKEND == 'spconv':
254
+ new_data = SparseTensorData(
255
+ self.data.features.reshape(self.data.features.shape[0], -1),
256
+ self.data.indices,
257
+ self.data.spatial_shape,
258
+ self.data.batch_size,
259
+ self.data.grid,
260
+ self.data.voxel_num,
261
+ self.data.indice_dict
262
+ )
263
+ new_data._features = feats
264
+ new_data.benchmark = self.data.benchmark
265
+ new_data.benchmark_record = self.data.benchmark_record
266
+ new_data.thrust_allocator = self.data.thrust_allocator
267
+ new_data._timer = self.data._timer
268
+ new_data.force_algo = self.data.force_algo
269
+ new_data.int8_scale = self.data.int8_scale
270
+ if coords is not None:
271
+ new_data.indices = coords
272
+ new_tensor = SparseTensor(new_data, shape=torch.Size(new_shape), layout=self.layout, scale=self._scale, spatial_cache=self._spatial_cache)
273
+ return new_tensor
274
+
275
+ @staticmethod
276
+ def full(aabb, dim, value, dtype=torch.float32, device=None) -> 'SparseTensor':
277
+ N, C = dim
278
+ x = torch.arange(aabb[0], aabb[3] + 1)
279
+ y = torch.arange(aabb[1], aabb[4] + 1)
280
+ z = torch.arange(aabb[2], aabb[5] + 1)
281
+ coords = torch.stack(torch.meshgrid(x, y, z, indexing='ij'), dim=-1).reshape(-1, 3)
282
+ coords = torch.cat([
283
+ torch.arange(N).view(-1, 1).repeat(1, coords.shape[0]).view(-1, 1),
284
+ coords.repeat(N, 1),
285
+ ], dim=1).to(dtype=torch.int32, device=device)
286
+ feats = torch.full((coords.shape[0], C), value, dtype=dtype, device=device)
287
+ return SparseTensor(feats=feats, coords=coords)
288
+
289
+ def __merge_sparse_cache(self, other: 'SparseTensor') -> dict:
290
+ new_cache = {}
291
+ for k in set(list(self._spatial_cache.keys()) + list(other._spatial_cache.keys())):
292
+ if k in self._spatial_cache:
293
+ new_cache[k] = self._spatial_cache[k]
294
+ if k in other._spatial_cache:
295
+ if k not in new_cache:
296
+ new_cache[k] = other._spatial_cache[k]
297
+ else:
298
+ new_cache[k].update(other._spatial_cache[k])
299
+ return new_cache
300
+
301
+ def __neg__(self) -> 'SparseTensor':
302
+ return self.replace(-self.feats)
303
+
304
+ def __elemwise__(self, other: Union[torch.Tensor, 'SparseTensor'], op: callable) -> 'SparseTensor':
305
+ if isinstance(other, torch.Tensor):
306
+ try:
307
+ other = torch.broadcast_to(other, self.shape)
308
+ other = sparse_batch_broadcast(self, other)
309
+ except:
310
+ pass
311
+ if isinstance(other, SparseTensor):
312
+ other = other.feats
313
+ new_feats = op(self.feats, other)
314
+ new_tensor = self.replace(new_feats)
315
+ if isinstance(other, SparseTensor):
316
+ new_tensor._spatial_cache = self.__merge_sparse_cache(other)
317
+ return new_tensor
318
+
319
+ def __add__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
320
+ return self.__elemwise__(other, torch.add)
321
+
322
+ def __radd__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
323
+ return self.__elemwise__(other, torch.add)
324
+
325
+ def __sub__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
326
+ return self.__elemwise__(other, torch.sub)
327
+
328
+ def __rsub__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
329
+ return self.__elemwise__(other, lambda x, y: torch.sub(y, x))
330
+
331
+ def __mul__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
332
+ return self.__elemwise__(other, torch.mul)
333
+
334
+ def __rmul__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
335
+ return self.__elemwise__(other, torch.mul)
336
+
337
+ def __truediv__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
338
+ return self.__elemwise__(other, torch.div)
339
+
340
+ def __rtruediv__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
341
+ return self.__elemwise__(other, lambda x, y: torch.div(y, x))
342
+
343
+ def __getitem__(self, idx):
344
+ if isinstance(idx, int):
345
+ idx = [idx]
346
+ elif isinstance(idx, slice):
347
+ idx = range(*idx.indices(self.shape[0]))
348
+ elif isinstance(idx, torch.Tensor):
349
+ if idx.dtype == torch.bool:
350
+ assert idx.shape == (self.shape[0],), f"Invalid index shape: {idx.shape}"
351
+ idx = idx.nonzero().squeeze(1)
352
+ elif idx.dtype in [torch.int32, torch.int64]:
353
+ assert len(idx.shape) == 1, f"Invalid index shape: {idx.shape}"
354
+ else:
355
+ raise ValueError(f"Unknown index type: {idx.dtype}")
356
+ else:
357
+ raise ValueError(f"Unknown index type: {type(idx)}")
358
+
359
+ coords = []
360
+ feats = []
361
+ for new_idx, old_idx in enumerate(idx):
362
+ coords.append(self.coords[self.layout[old_idx]].clone())
363
+ coords[-1][:, 0] = new_idx
364
+ feats.append(self.feats[self.layout[old_idx]])
365
+ coords = torch.cat(coords, dim=0).contiguous()
366
+ feats = torch.cat(feats, dim=0).contiguous()
367
+ return SparseTensor(feats=feats, coords=coords)
368
+
369
+ def register_spatial_cache(self, key, value) -> None:
370
+ """
371
+ Register a spatial cache.
372
+ The spatial cache can be any thing you want to cache.
373
+ The registery and retrieval of the cache is based on current scale.
374
+ """
375
+ scale_key = str(self._scale)
376
+ if scale_key not in self._spatial_cache:
377
+ self._spatial_cache[scale_key] = {}
378
+ self._spatial_cache[scale_key][key] = value
379
+
380
+ def get_spatial_cache(self, key=None):
381
+ """
382
+ Get a spatial cache.
383
+ """
384
+ scale_key = str(self._scale)
385
+ cur_scale_cache = self._spatial_cache.get(scale_key, {})
386
+ if key is None:
387
+ return cur_scale_cache
388
+ return cur_scale_cache.get(key, None)
389
+
390
+ def save(self, path: str) -> None:
391
+ data = {
392
+ 'feats': self.feats.cpu(),
393
+ 'coords': self.coords.cpu(),
394
+ }
395
+ torch.save(data, path)
396
+
397
+
398
+ def load(self, path: str) -> None:
399
+ data = torch.load(path)
400
+ self.feats = data['feats']
401
+ self.coords = data['coords']
402
+
403
+
404
+ def sparse_batch_broadcast(input: SparseTensor, other: torch.Tensor) -> torch.Tensor:
405
+ """
406
+ Broadcast a 1D tensor to a sparse tensor along the batch dimension then perform an operation.
407
+
408
+ Args:
409
+ input (torch.Tensor): 1D tensor to broadcast.
410
+ target (SparseTensor): Sparse tensor to broadcast to.
411
+ op (callable): Operation to perform after broadcasting. Defaults to torch.add.
412
+ """
413
+ coords, feats = input.coords, input.feats
414
+ broadcasted = torch.zeros_like(feats)
415
+ for k in range(input.shape[0]):
416
+ broadcasted[input.layout[k]] = other[k]
417
+ return broadcasted
418
+
419
+
420
+ def sparse_batch_op(input: SparseTensor, other: torch.Tensor, op: callable = torch.add) -> SparseTensor:
421
+ """
422
+ Broadcast a 1D tensor to a sparse tensor along the batch dimension then perform an operation.
423
+
424
+ Args:
425
+ input (torch.Tensor): 1D tensor to broadcast.
426
+ target (SparseTensor): Sparse tensor to broadcast to.
427
+ op (callable): Operation to perform after broadcasting. Defaults to torch.add.
428
+ """
429
+ return input.replace(op(input.feats, sparse_batch_broadcast(input, other)))
430
+
431
+
432
+ def sparse_cat(inputs: List[SparseTensor], dim: int = 0) -> SparseTensor:
433
+ """
434
+ Concatenate a list of sparse tensors.
435
+
436
+ Args:
437
+ inputs (List[SparseTensor]): List of sparse tensors to concatenate.
438
+ """
439
+ if dim == 0:
440
+ start = 0
441
+ coords = []
442
+ for input in inputs:
443
+ coords.append(input.coords.clone())
444
+ coords[-1][:, 0] += start
445
+ start += input.shape[0]
446
+ coords = torch.cat(coords, dim=0)
447
+ feats = torch.cat([input.feats for input in inputs], dim=0)
448
+ output = SparseTensor(
449
+ coords=coords,
450
+ feats=feats,
451
+ )
452
+ else:
453
+ feats = torch.cat([input.feats for input in inputs], dim=dim)
454
+ output = inputs[0].replace(feats)
455
+
456
+ return output
457
+
458
+
459
+ def sparse_unbind(input: SparseTensor, dim: int) -> List[SparseTensor]:
460
+ """
461
+ Unbind a sparse tensor along a dimension.
462
+
463
+ Args:
464
+ input (SparseTensor): Sparse tensor to unbind.
465
+ dim (int): Dimension to unbind.
466
+ """
467
+ if dim == 0:
468
+ return [input[i] for i in range(input.shape[0])]
469
+ else:
470
+ feats = input.feats.unbind(dim)
471
+ return [input.replace(f) for f in feats]
Amodal3R/modules/sparse/conv/__init__.py CHANGED
@@ -1,3 +1,21 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:65bdef30f1cb4feb77c4d00b3a27f04c2ed8748779ed09e5f4535cfa396ad102
3
- size 537
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .. import BACKEND
2
+
3
+
4
+ SPCONV_ALGO = 'auto' # 'auto', 'implicit_gemm', 'native'
5
+
6
+ def __from_env():
7
+ import os
8
+
9
+ global SPCONV_ALGO
10
+ env_spconv_algo = os.environ.get('SPCONV_ALGO')
11
+ if env_spconv_algo is not None and env_spconv_algo in ['auto', 'implicit_gemm', 'native']:
12
+ SPCONV_ALGO = env_spconv_algo
13
+ print(f"[SPARSE][CONV] spconv algo: {SPCONV_ALGO}")
14
+
15
+
16
+ __from_env()
17
+
18
+ if BACKEND == 'torchsparse':
19
+ from .conv_torchsparse import *
20
+ elif BACKEND == 'spconv':
21
+ from .conv_spconv import *
Amodal3R/modules/sparse/conv/conv_spconv.py CHANGED
@@ -1,3 +1,80 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:69174887a0b378851ced8fd35fe52f5a37c4c7ef2067daac80ad2980bdddb1b1
3
- size 3955
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from .. import SparseTensor
4
+ from .. import DEBUG
5
+ from . import SPCONV_ALGO
6
+
7
+ class SparseConv3d(nn.Module):
8
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, padding=None, bias=True, indice_key=None):
9
+ super(SparseConv3d, self).__init__()
10
+ if 'spconv' not in globals():
11
+ import spconv.pytorch as spconv
12
+ algo = None
13
+ if SPCONV_ALGO == 'native':
14
+ algo = spconv.ConvAlgo.Native
15
+ elif SPCONV_ALGO == 'implicit_gemm':
16
+ algo = spconv.ConvAlgo.MaskImplicitGemm
17
+ if stride == 1 and (padding is None):
18
+ self.conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, dilation=dilation, bias=bias, indice_key=indice_key, algo=algo)
19
+ else:
20
+ self.conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, padding=padding, bias=bias, indice_key=indice_key, algo=algo)
21
+ self.stride = tuple(stride) if isinstance(stride, (list, tuple)) else (stride, stride, stride)
22
+ self.padding = padding
23
+
24
+ def forward(self, x: SparseTensor) -> SparseTensor:
25
+ spatial_changed = any(s != 1 for s in self.stride) or (self.padding is not None)
26
+ new_data = self.conv(x.data)
27
+ new_shape = [x.shape[0], self.conv.out_channels]
28
+ new_layout = None if spatial_changed else x.layout
29
+
30
+ if spatial_changed and (x.shape[0] != 1):
31
+ # spconv was non-1 stride will break the contiguous of the output tensor, sort by the coords
32
+ fwd = new_data.indices[:, 0].argsort()
33
+ bwd = torch.zeros_like(fwd).scatter_(0, fwd, torch.arange(fwd.shape[0], device=fwd.device))
34
+ sorted_feats = new_data.features[fwd]
35
+ sorted_coords = new_data.indices[fwd]
36
+ unsorted_data = new_data
37
+ new_data = spconv.SparseConvTensor(sorted_feats, sorted_coords, unsorted_data.spatial_shape, unsorted_data.batch_size) # type: ignore
38
+
39
+ out = SparseTensor(
40
+ new_data, shape=torch.Size(new_shape), layout=new_layout,
41
+ scale=tuple([s * stride for s, stride in zip(x._scale, self.stride)]),
42
+ spatial_cache=x._spatial_cache,
43
+ )
44
+
45
+ if spatial_changed and (x.shape[0] != 1):
46
+ out.register_spatial_cache(f'conv_{self.stride}_unsorted_data', unsorted_data)
47
+ out.register_spatial_cache(f'conv_{self.stride}_sort_bwd', bwd)
48
+
49
+ return out
50
+
51
+
52
+ class SparseInverseConv3d(nn.Module):
53
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, indice_key=None):
54
+ super(SparseInverseConv3d, self).__init__()
55
+ if 'spconv' not in globals():
56
+ import spconv.pytorch as spconv
57
+ self.conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, bias=bias, indice_key=indice_key)
58
+ self.stride = tuple(stride) if isinstance(stride, (list, tuple)) else (stride, stride, stride)
59
+
60
+ def forward(self, x: SparseTensor) -> SparseTensor:
61
+ spatial_changed = any(s != 1 for s in self.stride)
62
+ if spatial_changed:
63
+ # recover the original spconv order
64
+ data = x.get_spatial_cache(f'conv_{self.stride}_unsorted_data')
65
+ bwd = x.get_spatial_cache(f'conv_{self.stride}_sort_bwd')
66
+ data = data.replace_feature(x.feats[bwd])
67
+ if DEBUG:
68
+ assert torch.equal(data.indices, x.coords[bwd]), 'Recover the original order failed'
69
+ else:
70
+ data = x.data
71
+
72
+ new_data = self.conv(data)
73
+ new_shape = [x.shape[0], self.conv.out_channels]
74
+ new_layout = None if spatial_changed else x.layout
75
+ out = SparseTensor(
76
+ new_data, shape=torch.Size(new_shape), layout=new_layout,
77
+ scale=tuple([s // stride for s, stride in zip(x._scale, self.stride)]),
78
+ spatial_cache=x._spatial_cache,
79
+ )
80
+ return out
Amodal3R/modules/sparse/conv/conv_torchsparse.py CHANGED
@@ -1,3 +1,38 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2bbbce0d55f4c8057ba8e4a8d46124ccbe9f4ea9ff0b6a3b6834cd3f285acd27
3
- size 1716
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from .. import SparseTensor
4
+
5
+
6
+ class SparseConv3d(nn.Module):
7
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, indice_key=None):
8
+ super(SparseConv3d, self).__init__()
9
+ if 'torchsparse' not in globals():
10
+ import torchsparse
11
+ self.conv = torchsparse.nn.Conv3d(in_channels, out_channels, kernel_size, stride, 0, dilation, bias)
12
+
13
+ def forward(self, x: SparseTensor) -> SparseTensor:
14
+ out = self.conv(x.data)
15
+ new_shape = [x.shape[0], self.conv.out_channels]
16
+ out = SparseTensor(out, shape=torch.Size(new_shape), layout=x.layout if all(s == 1 for s in self.conv.stride) else None)
17
+ out._spatial_cache = x._spatial_cache
18
+ out._scale = tuple([s * stride for s, stride in zip(x._scale, self.conv.stride)])
19
+ return out
20
+
21
+
22
+ class SparseInverseConv3d(nn.Module):
23
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, indice_key=None):
24
+ super(SparseInverseConv3d, self).__init__()
25
+ if 'torchsparse' not in globals():
26
+ import torchsparse
27
+ self.conv = torchsparse.nn.Conv3d(in_channels, out_channels, kernel_size, stride, 0, dilation, bias, transposed=True)
28
+
29
+ def forward(self, x: SparseTensor) -> SparseTensor:
30
+ out = self.conv(x.data)
31
+ new_shape = [x.shape[0], self.conv.out_channels]
32
+ out = SparseTensor(out, shape=torch.Size(new_shape), layout=x.layout if all(s == 1 for s in self.conv.stride) else None)
33
+ out._spatial_cache = x._spatial_cache
34
+ out._scale = tuple([s // stride for s, stride in zip(x._scale, self.conv.stride)])
35
+ return out
36
+
37
+
38
+
Amodal3R/modules/sparse/linear.py CHANGED
@@ -1,3 +1,15 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a3dc106ecc875e6ad13bbef2b543846c71c8b61bb7f8db889cbd94c56a46747
3
- size 387
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from . import SparseTensor
4
+
5
+ __all__ = [
6
+ 'SparseLinear'
7
+ ]
8
+
9
+
10
+ class SparseLinear(nn.Linear):
11
+ def __init__(self, in_features, out_features, bias=True):
12
+ super(SparseLinear, self).__init__(in_features, out_features, bias)
13
+
14
+ def forward(self, input: SparseTensor) -> SparseTensor:
15
+ return input.replace(super().forward(input.feats))
Amodal3R/modules/sparse/nonlinearity.py CHANGED
@@ -1,3 +1,35 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b686ec25498fff4a1f14c53db05a33747faed6b2dafd4d2f63be0ada26e7b082
3
- size 875
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from . import SparseTensor
4
+
5
+ __all__ = [
6
+ 'SparseReLU',
7
+ 'SparseSiLU',
8
+ 'SparseGELU',
9
+ 'SparseActivation'
10
+ ]
11
+
12
+
13
+ class SparseReLU(nn.ReLU):
14
+ def forward(self, input: SparseTensor) -> SparseTensor:
15
+ return input.replace(super().forward(input.feats))
16
+
17
+
18
+ class SparseSiLU(nn.SiLU):
19
+ def forward(self, input: SparseTensor) -> SparseTensor:
20
+ return input.replace(super().forward(input.feats))
21
+
22
+
23
+ class SparseGELU(nn.GELU):
24
+ def forward(self, input: SparseTensor) -> SparseTensor:
25
+ return input.replace(super().forward(input.feats))
26
+
27
+
28
+ class SparseActivation(nn.Module):
29
+ def __init__(self, activation: nn.Module):
30
+ super().__init__()
31
+ self.activation = activation
32
+
33
+ def forward(self, input: SparseTensor) -> SparseTensor:
34
+ return input.replace(self.activation(input.feats))
35
+
Amodal3R/modules/sparse/norm.py CHANGED
@@ -1,3 +1,58 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:04f963d39fdbbd9a7f118b6291a94abd0264a6fa947f216da46a0b502d8af226
3
- size 2151
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from . import SparseTensor
4
+ from . import DEBUG
5
+
6
+ __all__ = [
7
+ 'SparseGroupNorm',
8
+ 'SparseLayerNorm',
9
+ 'SparseGroupNorm32',
10
+ 'SparseLayerNorm32',
11
+ ]
12
+
13
+
14
+ class SparseGroupNorm(nn.GroupNorm):
15
+ def __init__(self, num_groups, num_channels, eps=1e-5, affine=True):
16
+ super(SparseGroupNorm, self).__init__(num_groups, num_channels, eps, affine)
17
+
18
+ def forward(self, input: SparseTensor) -> SparseTensor:
19
+ nfeats = torch.zeros_like(input.feats)
20
+ for k in range(input.shape[0]):
21
+ if DEBUG:
22
+ assert (input.coords[input.layout[k], 0] == k).all(), f"SparseGroupNorm: batch index mismatch"
23
+ bfeats = input.feats[input.layout[k]]
24
+ bfeats = bfeats.permute(1, 0).reshape(1, input.shape[1], -1)
25
+ bfeats = super().forward(bfeats)
26
+ bfeats = bfeats.reshape(input.shape[1], -1).permute(1, 0)
27
+ nfeats[input.layout[k]] = bfeats
28
+ return input.replace(nfeats)
29
+
30
+
31
+ class SparseLayerNorm(nn.LayerNorm):
32
+ def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
33
+ super(SparseLayerNorm, self).__init__(normalized_shape, eps, elementwise_affine)
34
+
35
+ def forward(self, input: SparseTensor) -> SparseTensor:
36
+ nfeats = torch.zeros_like(input.feats)
37
+ for k in range(input.shape[0]):
38
+ bfeats = input.feats[input.layout[k]]
39
+ bfeats = bfeats.permute(1, 0).reshape(1, input.shape[1], -1)
40
+ bfeats = super().forward(bfeats)
41
+ bfeats = bfeats.reshape(input.shape[1], -1).permute(1, 0)
42
+ nfeats[input.layout[k]] = bfeats
43
+ return input.replace(nfeats)
44
+
45
+
46
+ class SparseGroupNorm32(SparseGroupNorm):
47
+ """
48
+ A GroupNorm layer that converts to float32 before the forward pass.
49
+ """
50
+ def forward(self, x: SparseTensor) -> SparseTensor:
51
+ return super().forward(x.float()).type(x.dtype)
52
+
53
+ class SparseLayerNorm32(SparseLayerNorm):
54
+ """
55
+ A LayerNorm layer that converts to float32 before the forward pass.
56
+ """
57
+ def forward(self, x: SparseTensor) -> SparseTensor:
58
+ return super().forward(x.float()).type(x.dtype)
Amodal3R/modules/sparse/spatial.py CHANGED
@@ -1,3 +1,110 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d38197d35faaf55def3b0b0b279380c4ff2f62f0d78e620f6ed8fb1d72449440
3
- size 4567
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from . import SparseTensor
5
+
6
+ __all__ = [
7
+ 'SparseDownsample',
8
+ 'SparseUpsample',
9
+ 'SparseSubdivide'
10
+ ]
11
+
12
+
13
+ class SparseDownsample(nn.Module):
14
+ """
15
+ Downsample a sparse tensor by a factor of `factor`.
16
+ Implemented as average pooling.
17
+ """
18
+ def __init__(self, factor: Union[int, Tuple[int, ...], List[int]]):
19
+ super(SparseDownsample, self).__init__()
20
+ self.factor = tuple(factor) if isinstance(factor, (list, tuple)) else factor
21
+
22
+ def forward(self, input: SparseTensor) -> SparseTensor:
23
+ DIM = input.coords.shape[-1] - 1
24
+ factor = self.factor if isinstance(self.factor, tuple) else (self.factor,) * DIM
25
+ assert DIM == len(factor), 'Input coordinates must have the same dimension as the downsample factor.'
26
+
27
+ coord = list(input.coords.unbind(dim=-1))
28
+ for i, f in enumerate(factor):
29
+ coord[i+1] = coord[i+1] // f
30
+
31
+ MAX = [coord[i+1].max().item() + 1 for i in range(DIM)]
32
+ OFFSET = torch.cumprod(torch.tensor(MAX[::-1]), 0).tolist()[::-1] + [1]
33
+ code = sum([c * o for c, o in zip(coord, OFFSET)])
34
+ code, idx = code.unique(return_inverse=True)
35
+
36
+ new_feats = torch.scatter_reduce(
37
+ torch.zeros(code.shape[0], input.feats.shape[1], device=input.feats.device, dtype=input.feats.dtype),
38
+ dim=0,
39
+ index=idx.unsqueeze(1).expand(-1, input.feats.shape[1]),
40
+ src=input.feats,
41
+ reduce='mean'
42
+ )
43
+ new_coords = torch.stack(
44
+ [code // OFFSET[0]] +
45
+ [(code // OFFSET[i+1]) % MAX[i] for i in range(DIM)],
46
+ dim=-1
47
+ )
48
+ out = SparseTensor(new_feats, new_coords, input.shape,)
49
+ out._scale = tuple([s // f for s, f in zip(input._scale, factor)])
50
+ out._spatial_cache = input._spatial_cache
51
+
52
+ out.register_spatial_cache(f'upsample_{factor}_coords', input.coords)
53
+ out.register_spatial_cache(f'upsample_{factor}_layout', input.layout)
54
+ out.register_spatial_cache(f'upsample_{factor}_idx', idx)
55
+
56
+ return out
57
+
58
+
59
+ class SparseUpsample(nn.Module):
60
+ """
61
+ Upsample a sparse tensor by a factor of `factor`.
62
+ Implemented as nearest neighbor interpolation.
63
+ """
64
+ def __init__(self, factor: Union[int, Tuple[int, int, int], List[int]]):
65
+ super(SparseUpsample, self).__init__()
66
+ self.factor = tuple(factor) if isinstance(factor, (list, tuple)) else factor
67
+
68
+ def forward(self, input: SparseTensor) -> SparseTensor:
69
+ DIM = input.coords.shape[-1] - 1
70
+ factor = self.factor if isinstance(self.factor, tuple) else (self.factor,) * DIM
71
+ assert DIM == len(factor), 'Input coordinates must have the same dimension as the upsample factor.'
72
+
73
+ new_coords = input.get_spatial_cache(f'upsample_{factor}_coords')
74
+ new_layout = input.get_spatial_cache(f'upsample_{factor}_layout')
75
+ idx = input.get_spatial_cache(f'upsample_{factor}_idx')
76
+ if any([x is None for x in [new_coords, new_layout, idx]]):
77
+ raise ValueError('Upsample cache not found. SparseUpsample must be paired with SparseDownsample.')
78
+ new_feats = input.feats[idx]
79
+ out = SparseTensor(new_feats, new_coords, input.shape, new_layout)
80
+ out._scale = tuple([s * f for s, f in zip(input._scale, factor)])
81
+ out._spatial_cache = input._spatial_cache
82
+ return out
83
+
84
+ class SparseSubdivide(nn.Module):
85
+ """
86
+ Upsample a sparse tensor by a factor of `factor`.
87
+ Implemented as nearest neighbor interpolation.
88
+ """
89
+ def __init__(self):
90
+ super(SparseSubdivide, self).__init__()
91
+
92
+ def forward(self, input: SparseTensor) -> SparseTensor:
93
+ DIM = input.coords.shape[-1] - 1
94
+ # upsample scale=2^DIM
95
+ n_cube = torch.ones([2] * DIM, device=input.device, dtype=torch.int)
96
+ n_coords = torch.nonzero(n_cube)
97
+ n_coords = torch.cat([torch.zeros_like(n_coords[:, :1]), n_coords], dim=-1)
98
+ factor = n_coords.shape[0]
99
+ assert factor == 2 ** DIM
100
+ # print(n_coords.shape)
101
+ new_coords = input.coords.clone()
102
+ new_coords[:, 1:] *= 2
103
+ new_coords = new_coords.unsqueeze(1) + n_coords.unsqueeze(0).to(new_coords.dtype)
104
+
105
+ new_feats = input.feats.unsqueeze(1).expand(input.feats.shape[0], factor, *input.feats.shape[1:])
106
+ out = SparseTensor(new_feats.flatten(0, 1), new_coords.flatten(0, 1), input.shape)
107
+ out._scale = input._scale * 2
108
+ out._spatial_cache = input._spatial_cache
109
+ return out
110
+
Amodal3R/modules/sparse/transformer/__init__.py CHANGED
@@ -1,3 +1,2 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:debb94976ce5c7fbf196012d9dbd1c6aa70acbc37c8f97cb3ffc94ef90730bfb
3
- size 46
 
1
+ from .blocks import *
2
+ from .modulated import *
 
Amodal3R/modules/sparse/transformer/blocks.py CHANGED
@@ -1,3 +1,151 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:724d0e5e7e2284b6ab86fcbe47e3551d2966bc78dc05e0cf634f19070ad7879c
3
- size 5162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ..basic import SparseTensor
5
+ from ..linear import SparseLinear
6
+ from ..nonlinearity import SparseGELU
7
+ from ..attention import SparseMultiHeadAttention, SerializeMode
8
+ from ...norm import LayerNorm32
9
+
10
+
11
+ class SparseFeedForwardNet(nn.Module):
12
+ def __init__(self, channels: int, mlp_ratio: float = 4.0):
13
+ super().__init__()
14
+ self.mlp = nn.Sequential(
15
+ SparseLinear(channels, int(channels * mlp_ratio)),
16
+ SparseGELU(approximate="tanh"),
17
+ SparseLinear(int(channels * mlp_ratio), channels),
18
+ )
19
+
20
+ def forward(self, x: SparseTensor) -> SparseTensor:
21
+ return self.mlp(x)
22
+
23
+
24
+ class SparseTransformerBlock(nn.Module):
25
+ """
26
+ Sparse Transformer block (MSA + FFN).
27
+ """
28
+ def __init__(
29
+ self,
30
+ channels: int,
31
+ num_heads: int,
32
+ mlp_ratio: float = 4.0,
33
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
34
+ window_size: Optional[int] = None,
35
+ shift_sequence: Optional[int] = None,
36
+ shift_window: Optional[Tuple[int, int, int]] = None,
37
+ serialize_mode: Optional[SerializeMode] = None,
38
+ use_checkpoint: bool = False,
39
+ use_rope: bool = False,
40
+ qk_rms_norm: bool = False,
41
+ qkv_bias: bool = True,
42
+ ln_affine: bool = False,
43
+ ):
44
+ super().__init__()
45
+ self.use_checkpoint = use_checkpoint
46
+ self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
47
+ self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
48
+ self.attn = SparseMultiHeadAttention(
49
+ channels,
50
+ num_heads=num_heads,
51
+ attn_mode=attn_mode,
52
+ window_size=window_size,
53
+ shift_sequence=shift_sequence,
54
+ shift_window=shift_window,
55
+ serialize_mode=serialize_mode,
56
+ qkv_bias=qkv_bias,
57
+ use_rope=use_rope,
58
+ qk_rms_norm=qk_rms_norm,
59
+ )
60
+ self.mlp = SparseFeedForwardNet(
61
+ channels,
62
+ mlp_ratio=mlp_ratio,
63
+ )
64
+
65
+ def _forward(self, x: SparseTensor) -> SparseTensor:
66
+ h = x.replace(self.norm1(x.feats))
67
+ h = self.attn(h)
68
+ x = x + h
69
+ h = x.replace(self.norm2(x.feats))
70
+ h = self.mlp(h)
71
+ x = x + h
72
+ return x
73
+
74
+ def forward(self, x: SparseTensor) -> SparseTensor:
75
+ if self.use_checkpoint:
76
+ return torch.utils.checkpoint.checkpoint(self._forward, x, use_reentrant=False)
77
+ else:
78
+ return self._forward(x)
79
+
80
+
81
+ class SparseTransformerCrossBlock(nn.Module):
82
+ """
83
+ Sparse Transformer cross-attention block (MSA + MCA + FFN).
84
+ """
85
+ def __init__(
86
+ self,
87
+ channels: int,
88
+ ctx_channels: int,
89
+ num_heads: int,
90
+ mlp_ratio: float = 4.0,
91
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
92
+ window_size: Optional[int] = None,
93
+ shift_sequence: Optional[int] = None,
94
+ shift_window: Optional[Tuple[int, int, int]] = None,
95
+ serialize_mode: Optional[SerializeMode] = None,
96
+ use_checkpoint: bool = False,
97
+ use_rope: bool = False,
98
+ qk_rms_norm: bool = False,
99
+ qk_rms_norm_cross: bool = False,
100
+ qkv_bias: bool = True,
101
+ ln_affine: bool = False,
102
+ ):
103
+ super().__init__()
104
+ self.use_checkpoint = use_checkpoint
105
+ self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
106
+ self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
107
+ self.norm3 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
108
+ self.self_attn = SparseMultiHeadAttention(
109
+ channels,
110
+ num_heads=num_heads,
111
+ type="self",
112
+ attn_mode=attn_mode,
113
+ window_size=window_size,
114
+ shift_sequence=shift_sequence,
115
+ shift_window=shift_window,
116
+ serialize_mode=serialize_mode,
117
+ qkv_bias=qkv_bias,
118
+ use_rope=use_rope,
119
+ qk_rms_norm=qk_rms_norm,
120
+ )
121
+ self.cross_attn = SparseMultiHeadAttention(
122
+ channels,
123
+ ctx_channels=ctx_channels,
124
+ num_heads=num_heads,
125
+ type="cross",
126
+ attn_mode="full",
127
+ qkv_bias=qkv_bias,
128
+ qk_rms_norm=qk_rms_norm_cross,
129
+ )
130
+ self.mlp = SparseFeedForwardNet(
131
+ channels,
132
+ mlp_ratio=mlp_ratio,
133
+ )
134
+
135
+ def _forward(self, x: SparseTensor, context: torch.Tensor):
136
+ h = x.replace(self.norm1(x.feats))
137
+ h = self.self_attn(h)
138
+ x = x + h
139
+ h = x.replace(self.norm2(x.feats))
140
+ h = self.cross_attn(h, context)
141
+ x = x + h
142
+ h = x.replace(self.norm3(x.feats))
143
+ h = self.mlp(h)
144
+ x = x + h
145
+ return x
146
+
147
+ def forward(self, x: SparseTensor, context: torch.Tensor):
148
+ if self.use_checkpoint:
149
+ return torch.utils.checkpoint.checkpoint(self._forward, x, context, use_reentrant=False)
150
+ else:
151
+ return self._forward(x, context)
Amodal3R/modules/sparse/transformer/modulated.py CHANGED
@@ -1,3 +1,459 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a84a3397d35b5e7bc1476deb16b9a4d8872f014b5239fe50a467a59c6a5936df
3
- size 17209
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ..basic import SparseTensor
5
+ from ..attention import SparseMultiHeadAttention, SerializeMode, SparseMultiHeadAttentionWeighted
6
+ from ...norm import LayerNorm32
7
+ from .blocks import SparseFeedForwardNet
8
+
9
+
10
+ class ModulatedSparseTransformerBlock(nn.Module):
11
+ """
12
+ Sparse Transformer block (MSA + FFN) with adaptive layer norm conditioning.
13
+ """
14
+ def __init__(
15
+ self,
16
+ channels: int,
17
+ num_heads: int,
18
+ mlp_ratio: float = 4.0,
19
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
20
+ window_size: Optional[int] = None,
21
+ shift_sequence: Optional[int] = None,
22
+ shift_window: Optional[Tuple[int, int, int]] = None,
23
+ serialize_mode: Optional[SerializeMode] = None,
24
+ use_checkpoint: bool = False,
25
+ use_rope: bool = False,
26
+ qk_rms_norm: bool = False,
27
+ qkv_bias: bool = True,
28
+ share_mod: bool = False,
29
+ ):
30
+ super().__init__()
31
+ self.use_checkpoint = use_checkpoint
32
+ self.share_mod = share_mod
33
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
34
+ self.norm2 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
35
+ self.attn = SparseMultiHeadAttention(
36
+ channels,
37
+ num_heads=num_heads,
38
+ attn_mode=attn_mode,
39
+ window_size=window_size,
40
+ shift_sequence=shift_sequence,
41
+ shift_window=shift_window,
42
+ serialize_mode=serialize_mode,
43
+ qkv_bias=qkv_bias,
44
+ use_rope=use_rope,
45
+ qk_rms_norm=qk_rms_norm,
46
+ )
47
+ self.mlp = SparseFeedForwardNet(
48
+ channels,
49
+ mlp_ratio=mlp_ratio,
50
+ )
51
+ if not share_mod:
52
+ self.adaLN_modulation = nn.Sequential(
53
+ nn.SiLU(),
54
+ nn.Linear(channels, 6 * channels, bias=True)
55
+ )
56
+
57
+ def _forward(self, x: SparseTensor, mod: torch.Tensor) -> SparseTensor:
58
+ if self.share_mod:
59
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
60
+ else:
61
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
62
+ h = x.replace(self.norm1(x.feats))
63
+ h = h * (1 + scale_msa) + shift_msa
64
+ h = self.attn(h)
65
+ h = h * gate_msa
66
+ x = x + h
67
+ h = x.replace(self.norm2(x.feats))
68
+ h = h * (1 + scale_mlp) + shift_mlp
69
+ h = self.mlp(h)
70
+ h = h * gate_mlp
71
+ x = x + h
72
+ return x
73
+
74
+ def forward(self, x: SparseTensor, mod: torch.Tensor) -> SparseTensor:
75
+ if self.use_checkpoint:
76
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, use_reentrant=False)
77
+ else:
78
+ return self._forward(x, mod)
79
+
80
+
81
+ class ModulatedSparseTransformerCrossBlock(nn.Module):
82
+ """
83
+ Sparse Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning.
84
+ """
85
+ def __init__(
86
+ self,
87
+ channels: int,
88
+ ctx_channels: int,
89
+ num_heads: int,
90
+ mlp_ratio: float = 4.0,
91
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
92
+ window_size: Optional[int] = None,
93
+ shift_sequence: Optional[int] = None,
94
+ shift_window: Optional[Tuple[int, int, int]] = None,
95
+ serialize_mode: Optional[SerializeMode] = None,
96
+ use_checkpoint: bool = False,
97
+ use_rope: bool = False,
98
+ qk_rms_norm: bool = False,
99
+ qk_rms_norm_cross: bool = False,
100
+ qkv_bias: bool = True,
101
+ share_mod: bool = False,
102
+
103
+ ):
104
+ super().__init__()
105
+ self.use_checkpoint = use_checkpoint
106
+ self.share_mod = share_mod
107
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
108
+ self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
109
+ self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
110
+ self.self_attn = SparseMultiHeadAttention(
111
+ channels,
112
+ num_heads=num_heads,
113
+ type="self",
114
+ attn_mode=attn_mode,
115
+ window_size=window_size,
116
+ shift_sequence=shift_sequence,
117
+ shift_window=shift_window,
118
+ serialize_mode=serialize_mode,
119
+ qkv_bias=qkv_bias,
120
+ use_rope=use_rope,
121
+ qk_rms_norm=qk_rms_norm,
122
+ )
123
+ self.cross_attn = SparseMultiHeadAttention(
124
+ channels,
125
+ ctx_channels=ctx_channels,
126
+ num_heads=num_heads,
127
+ type="cross",
128
+ attn_mode="full",
129
+ qkv_bias=qkv_bias,
130
+ qk_rms_norm=qk_rms_norm_cross,
131
+ )
132
+ self.mlp = SparseFeedForwardNet(
133
+ channels,
134
+ mlp_ratio=mlp_ratio,
135
+ )
136
+ if not share_mod:
137
+ self.adaLN_modulation = nn.Sequential(
138
+ nn.SiLU(),
139
+ nn.Linear(channels, 6 * channels, bias=True)
140
+ )
141
+
142
+ def _forward(self, x: SparseTensor, mod: torch.Tensor, context: torch.Tensor) -> SparseTensor:
143
+ if self.share_mod:
144
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
145
+ else:
146
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
147
+ h = x.replace(self.norm1(x.feats))
148
+ h = h * (1 + scale_msa) + shift_msa
149
+ h = self.self_attn(h)
150
+ h = h * gate_msa
151
+ x = x + h
152
+ h = x.replace(self.norm2(x.feats))
153
+ h = self.cross_attn(h, context)
154
+ x = x + h
155
+ h = x.replace(self.norm3(x.feats))
156
+ h = h * (1 + scale_mlp) + shift_mlp
157
+ h = self.mlp(h)
158
+ h = h * gate_mlp
159
+ x = x + h
160
+ return x
161
+
162
+ def forward(self, x: SparseTensor, mod: torch.Tensor, context: torch.Tensor) -> SparseTensor:
163
+ if self.use_checkpoint:
164
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False)
165
+ else:
166
+ return self._forward(x, mod, context)
167
+
168
+
169
+
170
+ class ModulatedSparseTransformerCrossBlockMaskAsCond(nn.Module):
171
+ """
172
+ Sparse Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning.
173
+ """
174
+ def __init__(
175
+ self,
176
+ channels: int,
177
+ ctx_channels: int,
178
+ ctx_channels_mask: int,
179
+ num_heads: int,
180
+ mlp_ratio: float = 4.0,
181
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
182
+ window_size: Optional[int] = None,
183
+ shift_sequence: Optional[int] = None,
184
+ shift_window: Optional[Tuple[int, int, int]] = None,
185
+ serialize_mode: Optional[SerializeMode] = None,
186
+ use_checkpoint: bool = False,
187
+ use_rope: bool = False,
188
+ qk_rms_norm: bool = False,
189
+ qk_rms_norm_cross: bool = False,
190
+ qkv_bias: bool = True,
191
+ share_mod: bool = False,
192
+ ):
193
+ super().__init__()
194
+ self.use_checkpoint = use_checkpoint
195
+ self.share_mod = share_mod
196
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
197
+ self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
198
+ self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
199
+ self.norm_mask = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
200
+ self.self_attn = SparseMultiHeadAttention(
201
+ channels,
202
+ num_heads=num_heads,
203
+ type="self",
204
+ attn_mode=attn_mode,
205
+ window_size=window_size,
206
+ shift_sequence=shift_sequence,
207
+ shift_window=shift_window,
208
+ serialize_mode=serialize_mode,
209
+ qkv_bias=qkv_bias,
210
+ use_rope=use_rope,
211
+ qk_rms_norm=qk_rms_norm,
212
+ )
213
+ self.cross_attn = SparseMultiHeadAttention(
214
+ channels,
215
+ ctx_channels=ctx_channels,
216
+ num_heads=num_heads,
217
+ type="cross",
218
+ attn_mode="full",
219
+ qkv_bias=qkv_bias,
220
+ qk_rms_norm=qk_rms_norm_cross,
221
+ )
222
+ self.cross_attn_mask = SparseMultiHeadAttention(
223
+ channels,
224
+ ctx_channels=ctx_channels_mask,
225
+ num_heads=num_heads,
226
+ type="cross",
227
+ attn_mode="full",
228
+ qkv_bias=qkv_bias,
229
+ qk_rms_norm=qk_rms_norm_cross,
230
+ )
231
+ self.mlp = SparseFeedForwardNet(
232
+ channels,
233
+ mlp_ratio=mlp_ratio,
234
+ )
235
+ if not share_mod:
236
+ self.adaLN_modulation = nn.Sequential(
237
+ nn.SiLU(),
238
+ nn.Linear(channels, 6 * channels, bias=True)
239
+ )
240
+
241
+ def _forward(self, x: SparseTensor, mod: torch.Tensor, context: dict) -> SparseTensor:
242
+ if self.share_mod:
243
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
244
+ else:
245
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
246
+ h = x.replace(self.norm1(x.feats))
247
+ h = h * (1 + scale_msa) + shift_msa
248
+ h = self.self_attn(h)
249
+ h = h * gate_msa
250
+ x = x + h
251
+ h = x.replace(self.norm2(x.feats))
252
+ h = self.cross_attn(h, context['cond'])
253
+ x = x + h
254
+ h = x.replace(self.norm_mask(x.feats))
255
+ h = self.cross_attn_mask(h, context['mask'])
256
+ x = x + h
257
+ h = x.replace(self.norm3(x.feats))
258
+ h = h * (1 + scale_mlp) + shift_mlp
259
+ h = self.mlp(h)
260
+ h = h * gate_mlp
261
+ x = x + h
262
+ return x
263
+
264
+ def forward(self, x: SparseTensor, mod: torch.Tensor, context: dict) -> SparseTensor:
265
+ if self.use_checkpoint:
266
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False)
267
+ else:
268
+ return self._forward(x, mod, context)
269
+
270
+
271
+
272
+ class ModulatedSparseTransformerCrossBlockWeighted(nn.Module):
273
+ """
274
+ Sparse Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning.
275
+ """
276
+ def __init__(
277
+ self,
278
+ channels: int,
279
+ ctx_channels: int,
280
+ ctx_channels_mask: int,
281
+ num_heads: int,
282
+ mlp_ratio: float = 4.0,
283
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
284
+ window_size: Optional[int] = None,
285
+ shift_sequence: Optional[int] = None,
286
+ shift_window: Optional[Tuple[int, int, int]] = None,
287
+ serialize_mode: Optional[SerializeMode] = None,
288
+ use_checkpoint: bool = False,
289
+ use_rope: bool = False,
290
+ qk_rms_norm: bool = False,
291
+ qk_rms_norm_cross: bool = False,
292
+ qkv_bias: bool = True,
293
+ share_mod: bool = False,
294
+ ):
295
+ super().__init__()
296
+ self.use_checkpoint = use_checkpoint
297
+ self.share_mod = share_mod
298
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
299
+ self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
300
+ self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
301
+ self.norm_mask = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
302
+ self.self_attn = SparseMultiHeadAttention(
303
+ channels,
304
+ num_heads=num_heads,
305
+ type="self",
306
+ attn_mode=attn_mode,
307
+ window_size=window_size,
308
+ shift_sequence=shift_sequence,
309
+ shift_window=shift_window,
310
+ serialize_mode=serialize_mode,
311
+ qkv_bias=qkv_bias,
312
+ use_rope=use_rope,
313
+ qk_rms_norm=qk_rms_norm,
314
+ )
315
+ self.cross_attn = SparseMultiHeadAttentionWeighted(
316
+ channels,
317
+ ctx_channels=ctx_channels,
318
+ num_heads=num_heads,
319
+ type="cross",
320
+ attn_mode="full",
321
+ qkv_bias=qkv_bias,
322
+ qk_rms_norm=qk_rms_norm_cross,
323
+ )
324
+ self.mlp = SparseFeedForwardNet(
325
+ channels,
326
+ mlp_ratio=mlp_ratio,
327
+ )
328
+ if not share_mod:
329
+ self.adaLN_modulation = nn.Sequential(
330
+ nn.SiLU(),
331
+ nn.Linear(channels, 6 * channels, bias=True)
332
+ )
333
+
334
+ def _forward(self, x: SparseTensor, mod: torch.Tensor, context: dict) -> SparseTensor:
335
+ if self.share_mod:
336
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
337
+ else:
338
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
339
+ h = x.replace(self.norm1(x.feats))
340
+ h = h * (1 + scale_msa) + shift_msa
341
+ h = self.self_attn(h)
342
+ h = h * gate_msa
343
+ x = x + h
344
+ h = x.replace(self.norm2(x.feats))
345
+ h = self.cross_attn(h, context['cond'], context['mask'])
346
+ x = x + h
347
+ h = x.replace(self.norm3(x.feats))
348
+ h = h * (1 + scale_mlp) + shift_mlp
349
+ h = self.mlp(h)
350
+ h = h * gate_mlp
351
+ x = x + h
352
+ return x
353
+
354
+ def forward(self, x: SparseTensor, mod: torch.Tensor, context: dict) -> SparseTensor:
355
+ if self.use_checkpoint:
356
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False)
357
+ else:
358
+ return self._forward(x, mod, context)
359
+
360
+
361
+
362
+ class ModulatedSparseTransformerCrossBlockMaskAsCondWeighted(nn.Module):
363
+ """
364
+ Sparse Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning.
365
+ """
366
+ def __init__(
367
+ self,
368
+ channels: int,
369
+ ctx_channels: int,
370
+ num_heads: int,
371
+ mlp_ratio: float = 4.0,
372
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
373
+ window_size: Optional[int] = None,
374
+ shift_sequence: Optional[int] = None,
375
+ shift_window: Optional[Tuple[int, int, int]] = None,
376
+ serialize_mode: Optional[SerializeMode] = None,
377
+ use_checkpoint: bool = False,
378
+ use_rope: bool = False,
379
+ qk_rms_norm: bool = False,
380
+ qk_rms_norm_cross: bool = False,
381
+ qkv_bias: bool = True,
382
+ share_mod: bool = False,
383
+ ):
384
+ super().__init__()
385
+ self.use_checkpoint = use_checkpoint
386
+ self.share_mod = share_mod
387
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
388
+ self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
389
+ self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
390
+ self.norm_mask = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
391
+ self.self_attn = SparseMultiHeadAttention(
392
+ channels,
393
+ num_heads=num_heads,
394
+ type="self",
395
+ attn_mode=attn_mode,
396
+ window_size=window_size,
397
+ shift_sequence=shift_sequence,
398
+ shift_window=shift_window,
399
+ serialize_mode=serialize_mode,
400
+ qkv_bias=qkv_bias,
401
+ use_rope=use_rope,
402
+ qk_rms_norm=qk_rms_norm,
403
+ )
404
+ self.cross_attn = SparseMultiHeadAttentionWeighted(
405
+ channels,
406
+ ctx_channels=ctx_channels,
407
+ num_heads=num_heads,
408
+ type="cross",
409
+ attn_mode="full",
410
+ qkv_bias=qkv_bias,
411
+ qk_rms_norm=qk_rms_norm_cross,
412
+ )
413
+ self.cross_attn_mask = SparseMultiHeadAttention(
414
+ channels,
415
+ ctx_channels=ctx_channels,
416
+ num_heads=num_heads,
417
+ type="cross",
418
+ attn_mode="full",
419
+ qkv_bias=qkv_bias,
420
+ qk_rms_norm=qk_rms_norm_cross,
421
+ )
422
+ self.mlp = SparseFeedForwardNet(
423
+ channels,
424
+ mlp_ratio=mlp_ratio,
425
+ )
426
+ if not share_mod:
427
+ self.adaLN_modulation = nn.Sequential(
428
+ nn.SiLU(),
429
+ nn.Linear(channels, 6 * channels, bias=True)
430
+ )
431
+
432
+ def _forward(self, x: SparseTensor, mod: torch.Tensor, context: dict) -> SparseTensor:
433
+ if self.share_mod:
434
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
435
+ else:
436
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
437
+ h = x.replace(self.norm1(x.feats))
438
+ h = h * (1 + scale_msa) + shift_msa
439
+ h = self.self_attn(h)
440
+ h = h * gate_msa
441
+ x = x + h
442
+ h = x.replace(self.norm2(x.feats))
443
+ h = self.cross_attn(h, context['cond'], mask_weight=context['mask'])
444
+ x = x + h
445
+ h = x.replace(self.norm_mask(x.feats))
446
+ h = self.cross_attn_mask(h, context['mask_occ'])
447
+ x = x + h
448
+ h = x.replace(self.norm3(x.feats))
449
+ h = h * (1 + scale_mlp) + shift_mlp
450
+ h = self.mlp(h)
451
+ h = h * gate_mlp
452
+ x = x + h
453
+ return x
454
+
455
+ def forward(self, x: SparseTensor, mod: torch.Tensor, context: dict) -> SparseTensor:
456
+ if self.use_checkpoint:
457
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False)
458
+ else:
459
+ return self._forward(x, mod, context)
Amodal3R/modules/spatial.py CHANGED
@@ -1,3 +1,48 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfab71d7106b4fdf6cbdd2885646b5590d13439f9308c5b924c069afcdbf185d
3
- size 1763
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def pixel_shuffle_3d(x: torch.Tensor, scale_factor: int) -> torch.Tensor:
5
+ """
6
+ 3D pixel shuffle.
7
+ """
8
+ B, C, H, W, D = x.shape
9
+ C_ = C // scale_factor**3
10
+ x = x.reshape(B, C_, scale_factor, scale_factor, scale_factor, H, W, D)
11
+ x = x.permute(0, 1, 5, 2, 6, 3, 7, 4)
12
+ x = x.reshape(B, C_, H*scale_factor, W*scale_factor, D*scale_factor)
13
+ return x
14
+
15
+
16
+ def patchify(x: torch.Tensor, patch_size: int):
17
+ """
18
+ Patchify a tensor.
19
+
20
+ Args:
21
+ x (torch.Tensor): (N, C, *spatial) tensor
22
+ patch_size (int): Patch size
23
+ """
24
+ DIM = x.dim() - 2
25
+ for d in range(2, DIM + 2):
26
+ assert x.shape[d] % patch_size == 0, f"Dimension {d} of input tensor must be divisible by patch size, got {x.shape[d]} and {patch_size}"
27
+
28
+ x = x.reshape(*x.shape[:2], *sum([[x.shape[d] // patch_size, patch_size] for d in range(2, DIM + 2)], []))
29
+ x = x.permute(0, 1, *([2 * i + 3 for i in range(DIM)] + [2 * i + 2 for i in range(DIM)]))
30
+ x = x.reshape(x.shape[0], x.shape[1] * (patch_size ** DIM), *(x.shape[-DIM:]))
31
+ return x
32
+
33
+
34
+ def unpatchify(x: torch.Tensor, patch_size: int):
35
+ """
36
+ Unpatchify a tensor.
37
+
38
+ Args:
39
+ x (torch.Tensor): (N, C, *spatial) tensor
40
+ patch_size (int): Patch size
41
+ """
42
+ DIM = x.dim() - 2
43
+ assert x.shape[1] % (patch_size ** DIM) == 0, f"Second dimension of input tensor must be divisible by patch size to unpatchify, got {x.shape[1]} and {patch_size ** DIM}"
44
+
45
+ x = x.reshape(x.shape[0], x.shape[1] // (patch_size ** DIM), *([patch_size] * DIM), *(x.shape[-DIM:]))
46
+ x = x.permute(0, 1, *(sum([[2 + DIM + i, 2 + i] for i in range(DIM)], [])))
47
+ x = x.reshape(x.shape[0], x.shape[1], *[x.shape[2 + 2 * i] * patch_size for i in range(DIM)])
48
+ return x
Amodal3R/modules/transformer/__init__.py CHANGED
@@ -1,3 +1,2 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:debb94976ce5c7fbf196012d9dbd1c6aa70acbc37c8f97cb3ffc94ef90730bfb
3
- size 46
 
1
+ from .blocks import *
2
+ from .modulated import *
 
Amodal3R/modules/transformer/blocks.py CHANGED
@@ -1,3 +1,182 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c03f97d24b462bcfd54f33f97dfe056b896f76e77dd391b197c5f576b2a94b9
3
- size 5865
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ..attention import MultiHeadAttention
5
+ from ..norm import LayerNorm32
6
+
7
+
8
+ class AbsolutePositionEmbedder(nn.Module):
9
+ """
10
+ Embeds spatial positions into vector representations.
11
+ """
12
+ def __init__(self, channels: int, in_channels: int = 3):
13
+ super().__init__()
14
+ self.channels = channels
15
+ self.in_channels = in_channels
16
+ self.freq_dim = channels // in_channels // 2
17
+ self.freqs = torch.arange(self.freq_dim, dtype=torch.float32) / self.freq_dim
18
+ self.freqs = 1.0 / (10000 ** self.freqs)
19
+
20
+ def _sin_cos_embedding(self, x: torch.Tensor) -> torch.Tensor:
21
+ """
22
+ Create sinusoidal position embeddings.
23
+
24
+ Args:
25
+ x: a 1-D Tensor of N indices
26
+
27
+ Returns:
28
+ an (N, D) Tensor of positional embeddings.
29
+ """
30
+ self.freqs = self.freqs.to(x.device)
31
+ out = torch.outer(x, self.freqs)
32
+ out = torch.cat([torch.sin(out), torch.cos(out)], dim=-1)
33
+ return out
34
+
35
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
36
+ """
37
+ Args:
38
+ x (torch.Tensor): (N, D) tensor of spatial positions
39
+ """
40
+ N, D = x.shape
41
+ assert D == self.in_channels, "Input dimension must match number of input channels"
42
+ embed = self._sin_cos_embedding(x.reshape(-1))
43
+ embed = embed.reshape(N, -1)
44
+ if embed.shape[1] < self.channels:
45
+ embed = torch.cat([embed, torch.zeros(N, self.channels - embed.shape[1], device=embed.device)], dim=-1)
46
+ return embed
47
+
48
+
49
+ class FeedForwardNet(nn.Module):
50
+ def __init__(self, channels: int, mlp_ratio: float = 4.0):
51
+ super().__init__()
52
+ self.mlp = nn.Sequential(
53
+ nn.Linear(channels, int(channels * mlp_ratio)),
54
+ nn.GELU(approximate="tanh"),
55
+ nn.Linear(int(channels * mlp_ratio), channels),
56
+ )
57
+
58
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
59
+ return self.mlp(x)
60
+
61
+
62
+ class TransformerBlock(nn.Module):
63
+ """
64
+ Transformer block (MSA + FFN).
65
+ """
66
+ def __init__(
67
+ self,
68
+ channels: int,
69
+ num_heads: int,
70
+ mlp_ratio: float = 4.0,
71
+ attn_mode: Literal["full", "windowed"] = "full",
72
+ window_size: Optional[int] = None,
73
+ shift_window: Optional[int] = None,
74
+ use_checkpoint: bool = False,
75
+ use_rope: bool = False,
76
+ qk_rms_norm: bool = False,
77
+ qkv_bias: bool = True,
78
+ ln_affine: bool = False,
79
+ ):
80
+ super().__init__()
81
+ self.use_checkpoint = use_checkpoint
82
+ self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
83
+ self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
84
+ self.attn = MultiHeadAttention(
85
+ channels,
86
+ num_heads=num_heads,
87
+ attn_mode=attn_mode,
88
+ window_size=window_size,
89
+ shift_window=shift_window,
90
+ qkv_bias=qkv_bias,
91
+ use_rope=use_rope,
92
+ qk_rms_norm=qk_rms_norm,
93
+ )
94
+ self.mlp = FeedForwardNet(
95
+ channels,
96
+ mlp_ratio=mlp_ratio,
97
+ )
98
+
99
+ def _forward(self, x: torch.Tensor) -> torch.Tensor:
100
+ h = self.norm1(x)
101
+ h = self.attn(h)
102
+ x = x + h
103
+ h = self.norm2(x)
104
+ h = self.mlp(h)
105
+ x = x + h
106
+ return x
107
+
108
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
109
+ if self.use_checkpoint:
110
+ return torch.utils.checkpoint.checkpoint(self._forward, x, use_reentrant=False)
111
+ else:
112
+ return self._forward(x)
113
+
114
+
115
+ class TransformerCrossBlock(nn.Module):
116
+ """
117
+ Transformer cross-attention block (MSA + MCA + FFN).
118
+ """
119
+ def __init__(
120
+ self,
121
+ channels: int,
122
+ ctx_channels: int,
123
+ num_heads: int,
124
+ mlp_ratio: float = 4.0,
125
+ attn_mode: Literal["full", "windowed"] = "full",
126
+ window_size: Optional[int] = None,
127
+ shift_window: Optional[Tuple[int, int, int]] = None,
128
+ use_checkpoint: bool = False,
129
+ use_rope: bool = False,
130
+ qk_rms_norm: bool = False,
131
+ qk_rms_norm_cross: bool = False,
132
+ qkv_bias: bool = True,
133
+ ln_affine: bool = False,
134
+ ):
135
+ super().__init__()
136
+ self.use_checkpoint = use_checkpoint
137
+ self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
138
+ self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
139
+ self.norm3 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
140
+ self.self_attn = MultiHeadAttention(
141
+ channels,
142
+ num_heads=num_heads,
143
+ type="self",
144
+ attn_mode=attn_mode,
145
+ window_size=window_size,
146
+ shift_window=shift_window,
147
+ qkv_bias=qkv_bias,
148
+ use_rope=use_rope,
149
+ qk_rms_norm=qk_rms_norm,
150
+ )
151
+ self.cross_attn = MultiHeadAttention(
152
+ channels,
153
+ ctx_channels=ctx_channels,
154
+ num_heads=num_heads,
155
+ type="cross",
156
+ attn_mode="full",
157
+ qkv_bias=qkv_bias,
158
+ qk_rms_norm=qk_rms_norm_cross,
159
+ )
160
+ self.mlp = FeedForwardNet(
161
+ channels,
162
+ mlp_ratio=mlp_ratio,
163
+ )
164
+
165
+ def _forward(self, x: torch.Tensor, context: torch.Tensor):
166
+ h = self.norm1(x)
167
+ h = self.self_attn(h)
168
+ x = x + h
169
+ h = self.norm2(x)
170
+ h = self.cross_attn(h, context)
171
+ x = x + h
172
+ h = self.norm3(x)
173
+ h = self.mlp(h)
174
+ x = x + h
175
+ return x
176
+
177
+ def forward(self, x: torch.Tensor, context: torch.Tensor):
178
+ if self.use_checkpoint:
179
+ return torch.utils.checkpoint.checkpoint(self._forward, x, context, use_reentrant=False)
180
+ else:
181
+ return self._forward(x, context)
182
+
Amodal3R/modules/transformer/modulated.py CHANGED
@@ -1,3 +1,434 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:787b0859e4a20bd36b208621cea41c088b623d28c5ae5f8e0a12087cc89f9168
3
- size 15821
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ..attention import MultiHeadAttention, MultiHeadAttentionWeighted
5
+ from ..norm import LayerNorm32
6
+ from .blocks import FeedForwardNet
7
+
8
+
9
+ class ModulatedTransformerBlock(nn.Module):
10
+ """
11
+ Transformer block (MSA + FFN) with adaptive layer norm conditioning.
12
+ """
13
+ def __init__(
14
+ self,
15
+ channels: int,
16
+ num_heads: int,
17
+ mlp_ratio: float = 4.0,
18
+ attn_mode: Literal["full", "windowed"] = "full",
19
+ window_size: Optional[int] = None,
20
+ shift_window: Optional[Tuple[int, int, int]] = None,
21
+ use_checkpoint: bool = False,
22
+ use_rope: bool = False,
23
+ qk_rms_norm: bool = False,
24
+ qkv_bias: bool = True,
25
+ share_mod: bool = False,
26
+ ):
27
+ super().__init__()
28
+ self.use_checkpoint = use_checkpoint
29
+ self.share_mod = share_mod
30
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
31
+ self.norm2 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
32
+ self.attn = MultiHeadAttention(
33
+ channels,
34
+ num_heads=num_heads,
35
+ attn_mode=attn_mode,
36
+ window_size=window_size,
37
+ shift_window=shift_window,
38
+ qkv_bias=qkv_bias,
39
+ use_rope=use_rope,
40
+ qk_rms_norm=qk_rms_norm,
41
+ )
42
+ self.mlp = FeedForwardNet(
43
+ channels,
44
+ mlp_ratio=mlp_ratio,
45
+ )
46
+ if not share_mod:
47
+ self.adaLN_modulation = nn.Sequential(
48
+ nn.SiLU(),
49
+ nn.Linear(channels, 6 * channels, bias=True)
50
+ )
51
+
52
+ def _forward(self, x: torch.Tensor, mod: torch.Tensor) -> torch.Tensor:
53
+ if self.share_mod:
54
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
55
+ else:
56
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
57
+ h = self.norm1(x)
58
+ h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1)
59
+ h = self.attn(h)
60
+ h = h * gate_msa.unsqueeze(1)
61
+ x = x + h
62
+ h = self.norm2(x)
63
+ h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
64
+ h = self.mlp(h)
65
+ h = h * gate_mlp.unsqueeze(1)
66
+ x = x + h
67
+ return x
68
+
69
+ def forward(self, x: torch.Tensor, mod: torch.Tensor) -> torch.Tensor:
70
+ if self.use_checkpoint:
71
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, use_reentrant=False)
72
+ else:
73
+ return self._forward(x, mod)
74
+
75
+
76
+ class ModulatedTransformerCrossBlock(nn.Module):
77
+ """
78
+ Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning.
79
+ """
80
+ def __init__(
81
+ self,
82
+ channels: int,
83
+ ctx_channels: int,
84
+ num_heads: int,
85
+ mlp_ratio: float = 4.0,
86
+ attn_mode: Literal["full", "windowed"] = "full",
87
+ window_size: Optional[int] = None,
88
+ shift_window: Optional[Tuple[int, int, int]] = None,
89
+ use_checkpoint: bool = False,
90
+ use_rope: bool = False,
91
+ qk_rms_norm: bool = False,
92
+ qk_rms_norm_cross: bool = False,
93
+ qkv_bias: bool = True,
94
+ share_mod: bool = False,
95
+ ):
96
+ super().__init__()
97
+ self.use_checkpoint = use_checkpoint
98
+ self.share_mod = share_mod
99
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
100
+ self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
101
+ self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
102
+ self.self_attn = MultiHeadAttention(
103
+ channels,
104
+ num_heads=num_heads,
105
+ type="self",
106
+ attn_mode=attn_mode,
107
+ window_size=window_size,
108
+ shift_window=shift_window,
109
+ qkv_bias=qkv_bias,
110
+ use_rope=use_rope,
111
+ qk_rms_norm=qk_rms_norm,
112
+ )
113
+ self.cross_attn = MultiHeadAttention(
114
+ channels,
115
+ ctx_channels=ctx_channels,
116
+ num_heads=num_heads,
117
+ type="cross",
118
+ attn_mode="full",
119
+ qkv_bias=qkv_bias,
120
+ qk_rms_norm=qk_rms_norm_cross,
121
+ )
122
+ self.mlp = FeedForwardNet(
123
+ channels,
124
+ mlp_ratio=mlp_ratio,
125
+ )
126
+ if not share_mod:
127
+ self.adaLN_modulation = nn.Sequential(
128
+ nn.SiLU(),
129
+ nn.Linear(channels, 6 * channels, bias=True)
130
+ )
131
+
132
+ def _forward(self, x: torch.Tensor, mod: torch.Tensor, context: torch.Tensor):
133
+ if self.share_mod:
134
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
135
+ else:
136
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
137
+ h = self.norm1(x)
138
+ h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1)
139
+ h = self.self_attn(h)
140
+ h = h * gate_msa.unsqueeze(1)
141
+ x = x + h
142
+ h = self.norm2(x)
143
+ h = self.cross_attn(h, context)
144
+ x = x + h
145
+ h = self.norm3(x)
146
+ h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
147
+ h = self.mlp(h)
148
+ h = h * gate_mlp.unsqueeze(1)
149
+ x = x + h
150
+ return x
151
+
152
+ def forward(self, x: torch.Tensor, mod: torch.Tensor, context: torch.Tensor):
153
+ if self.use_checkpoint:
154
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False)
155
+ else:
156
+ return self._forward(x, mod, context)
157
+
158
+ class ModulatedTransformerCrossBlockMaskAsCond(nn.Module):
159
+ """
160
+ Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning.
161
+ """
162
+ def __init__(
163
+ self,
164
+ channels: int,
165
+ ctx_channels: int,
166
+ ctx_channels_mask: int,
167
+ num_heads: int,
168
+ mlp_ratio: float = 4.0,
169
+ attn_mode: Literal["full", "windowed"] = "full",
170
+ window_size: Optional[int] = None,
171
+ shift_window: Optional[Tuple[int, int, int]] = None,
172
+ use_checkpoint: bool = False,
173
+ use_rope: bool = False,
174
+ qk_rms_norm: bool = False,
175
+ qk_rms_norm_cross: bool = False,
176
+ qkv_bias: bool = True,
177
+ share_mod: bool = False,
178
+ ):
179
+ super().__init__()
180
+ self.use_checkpoint = use_checkpoint
181
+ self.share_mod = share_mod
182
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
183
+ self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
184
+ self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
185
+ self.norm_mask = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
186
+ self.self_attn = MultiHeadAttention(
187
+ channels,
188
+ num_heads=num_heads,
189
+ type="self",
190
+ attn_mode=attn_mode,
191
+ window_size=window_size,
192
+ shift_window=shift_window,
193
+ qkv_bias=qkv_bias,
194
+ use_rope=use_rope,
195
+ qk_rms_norm=qk_rms_norm,
196
+ )
197
+ self.cross_attn = MultiHeadAttention(
198
+ channels,
199
+ ctx_channels=ctx_channels,
200
+ num_heads=num_heads,
201
+ type="cross",
202
+ attn_mode="full",
203
+ qkv_bias=qkv_bias,
204
+ qk_rms_norm=qk_rms_norm_cross,
205
+ )
206
+ self.cross_attn_mask = MultiHeadAttention(
207
+ channels,
208
+ ctx_channels=ctx_channels_mask,
209
+ num_heads=num_heads,
210
+ type="cross",
211
+ attn_mode="full",
212
+ qkv_bias=qkv_bias,
213
+ qk_rms_norm=qk_rms_norm_cross,
214
+ )
215
+ self.mlp = FeedForwardNet(
216
+ channels,
217
+ mlp_ratio=mlp_ratio,
218
+ )
219
+ if not share_mod:
220
+ self.adaLN_modulation = nn.Sequential(
221
+ nn.SiLU(),
222
+ nn.Linear(channels, 6 * channels, bias=True)
223
+ )
224
+
225
+ def _forward(self, x: torch.Tensor, mod: torch.Tensor, context: dict):
226
+ if self.share_mod:
227
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
228
+ else:
229
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
230
+ h = self.norm1(x)
231
+ h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1)
232
+ h = self.self_attn(h)
233
+ h = h * gate_msa.unsqueeze(1)
234
+ x = x + h
235
+ h = self.norm2(x)
236
+ h = self.cross_attn(h, context['cond'])
237
+ x = x + h
238
+ h = self.norm_mask(x)
239
+ h = self.cross_attn_mask(h, context['mask'])
240
+ x = x + h
241
+ h = self.norm3(x)
242
+ h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
243
+ h = self.mlp(h)
244
+ h = h * gate_mlp.unsqueeze(1)
245
+ x = x + h
246
+ return x
247
+
248
+ def forward(self, x: torch.Tensor, mod: torch.Tensor, context: dict):
249
+ if self.use_checkpoint:
250
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False)
251
+ else:
252
+ return self._forward(x, mod, context)
253
+
254
+
255
+
256
+ class ModulatedTransformerCrossBlockWeighted(nn.Module):
257
+ """
258
+ Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning.
259
+ """
260
+ def __init__(
261
+ self,
262
+ channels: int,
263
+ ctx_channels: int,
264
+ ctx_channels_mask: int,
265
+ num_heads: int,
266
+ mlp_ratio: float = 4.0,
267
+ attn_mode: Literal["full", "windowed"] = "full",
268
+ window_size: Optional[int] = None,
269
+ shift_window: Optional[Tuple[int, int, int]] = None,
270
+ use_checkpoint: bool = False,
271
+ use_rope: bool = False,
272
+ qk_rms_norm: bool = False,
273
+ qk_rms_norm_cross: bool = False,
274
+ qkv_bias: bool = True,
275
+ share_mod: bool = False,
276
+ ):
277
+ super().__init__()
278
+ self.use_checkpoint = use_checkpoint
279
+ self.share_mod = share_mod
280
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
281
+ self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
282
+ self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
283
+ self.norm_mask = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
284
+ self.self_attn = MultiHeadAttention(
285
+ channels,
286
+ num_heads=num_heads,
287
+ type="self",
288
+ attn_mode=attn_mode,
289
+ window_size=window_size,
290
+ shift_window=shift_window,
291
+ qkv_bias=qkv_bias,
292
+ use_rope=use_rope,
293
+ qk_rms_norm=qk_rms_norm,
294
+ )
295
+ self.cross_attn = MultiHeadAttentionWeighted(
296
+ channels,
297
+ ctx_channels=ctx_channels,
298
+ num_heads=num_heads,
299
+ type="cross",
300
+ attn_mode="full",
301
+ qkv_bias=qkv_bias,
302
+ qk_rms_norm=qk_rms_norm_cross,
303
+ )
304
+ self.mlp = FeedForwardNet(
305
+ channels,
306
+ mlp_ratio=mlp_ratio,
307
+ )
308
+ if not share_mod:
309
+ self.adaLN_modulation = nn.Sequential(
310
+ nn.SiLU(),
311
+ nn.Linear(channels, 6 * channels, bias=True)
312
+ )
313
+
314
+ def _forward(self, x: torch.Tensor, mod: torch.Tensor, context: dict):
315
+ if self.share_mod:
316
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
317
+ else:
318
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
319
+ h = self.norm1(x)
320
+ h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1)
321
+ h = self.self_attn(h)
322
+ h = h * gate_msa.unsqueeze(1)
323
+ x = x + h
324
+ h = self.norm2(x)
325
+ h = self.cross_attn(h, context['cond'], mask_weight=context['mask'])
326
+ x = x + h
327
+ h = self.norm3(x)
328
+ h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
329
+ h = self.mlp(h)
330
+ h = h * gate_mlp.unsqueeze(1)
331
+ x = x + h
332
+ return x
333
+
334
+ def forward(self, x: torch.Tensor, mod: torch.Tensor, context: dict):
335
+ if self.use_checkpoint:
336
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False)
337
+ else:
338
+ return self._forward(x, mod, context)
339
+
340
+
341
+ class ModulatedTransformerCrossBlockMaskAsCondWeighted(nn.Module):
342
+ """
343
+ Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning.
344
+ """
345
+ def __init__(
346
+ self,
347
+ channels: int,
348
+ ctx_channels: int,
349
+ num_heads: int,
350
+ mlp_ratio: float = 4.0,
351
+ attn_mode: Literal["full", "windowed"] = "full",
352
+ window_size: Optional[int] = None,
353
+ shift_window: Optional[Tuple[int, int, int]] = None,
354
+ use_checkpoint: bool = False,
355
+ use_rope: bool = False,
356
+ qk_rms_norm: bool = False,
357
+ qk_rms_norm_cross: bool = False,
358
+ qkv_bias: bool = True,
359
+ share_mod: bool = False,
360
+ ):
361
+ super().__init__()
362
+ self.use_checkpoint = use_checkpoint
363
+ self.share_mod = share_mod
364
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
365
+ self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
366
+ self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
367
+ self.norm_mask = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
368
+ self.self_attn = MultiHeadAttention(
369
+ channels,
370
+ num_heads=num_heads,
371
+ type="self",
372
+ attn_mode=attn_mode,
373
+ window_size=window_size,
374
+ shift_window=shift_window,
375
+ qkv_bias=qkv_bias,
376
+ use_rope=use_rope,
377
+ qk_rms_norm=qk_rms_norm,
378
+ )
379
+ self.cross_attn = MultiHeadAttentionWeighted(
380
+ channels,
381
+ ctx_channels=ctx_channels,
382
+ num_heads=num_heads,
383
+ type="cross",
384
+ attn_mode="full",
385
+ qkv_bias=qkv_bias,
386
+ qk_rms_norm=qk_rms_norm_cross,
387
+ )
388
+ self.cross_attn_mask = MultiHeadAttention(
389
+ channels,
390
+ ctx_channels=ctx_channels,
391
+ num_heads=num_heads,
392
+ type="cross",
393
+ attn_mode="full",
394
+ qkv_bias=qkv_bias,
395
+ qk_rms_norm=qk_rms_norm_cross,
396
+ )
397
+ self.mlp = FeedForwardNet(
398
+ channels,
399
+ mlp_ratio=mlp_ratio,
400
+ )
401
+ if not share_mod:
402
+ self.adaLN_modulation = nn.Sequential(
403
+ nn.SiLU(),
404
+ nn.Linear(channels, 6 * channels, bias=True)
405
+ )
406
+
407
+ def _forward(self, x: torch.Tensor, mod: torch.Tensor, context: dict):
408
+ if self.share_mod:
409
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
410
+ else:
411
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
412
+ h = self.norm1(x)
413
+ h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1)
414
+ h = self.self_attn(h)
415
+ h = h * gate_msa.unsqueeze(1)
416
+ x = x + h
417
+ h = self.norm2(x)
418
+ h = self.cross_attn(h, context['cond'], mask_weight=context['mask'])
419
+ x = x + h
420
+ h = self.norm_mask(x)
421
+ h = self.cross_attn_mask(h, context['mask_occ'])
422
+ x = x + h
423
+ h = self.norm3(x)
424
+ h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
425
+ h = self.mlp(h)
426
+ h = h * gate_mlp.unsqueeze(1)
427
+ x = x + h
428
+ return x
429
+
430
+ def forward(self, x: torch.Tensor, mod: torch.Tensor, context: dict):
431
+ if self.use_checkpoint:
432
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False)
433
+ else:
434
+ return self._forward(x, mod, context)
Amodal3R/modules/utils.py CHANGED
@@ -1,3 +1,54 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:acfdc3769ce510f79f1762638063ff796481947d33d26a04f7c061f2eccdbf09
3
- size 1157
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ from ..modules import sparse as sp
3
+
4
+ FP16_MODULES = (
5
+ nn.Conv1d,
6
+ nn.Conv2d,
7
+ nn.Conv3d,
8
+ nn.ConvTranspose1d,
9
+ nn.ConvTranspose2d,
10
+ nn.ConvTranspose3d,
11
+ nn.Linear,
12
+ sp.SparseConv3d,
13
+ sp.SparseInverseConv3d,
14
+ sp.SparseLinear,
15
+ )
16
+
17
+ def convert_module_to_f16(l):
18
+ """
19
+ Convert primitive modules to float16.
20
+ """
21
+ if isinstance(l, FP16_MODULES):
22
+ for p in l.parameters():
23
+ p.data = p.data.half()
24
+
25
+
26
+ def convert_module_to_f32(l):
27
+ """
28
+ Convert primitive modules to float32, undoing convert_module_to_f16().
29
+ """
30
+ if isinstance(l, FP16_MODULES):
31
+ for p in l.parameters():
32
+ p.data = p.data.float()
33
+
34
+
35
+ def zero_module(module):
36
+ """
37
+ Zero out the parameters of a module and return it.
38
+ """
39
+ for p in module.parameters():
40
+ p.detach().zero_()
41
+ return module
42
+
43
+
44
+ def scale_module(module, scale):
45
+ """
46
+ Scale the parameters of a module and return it.
47
+ """
48
+ for p in module.parameters():
49
+ p.detach().mul_(scale)
50
+ return module
51
+
52
+
53
+ def modulate(x, shift, scale):
54
+ return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
Amodal3R/pipelines/Amodal3R_image_to_3d.py CHANGED
@@ -1,3 +1,435 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a67b5b716018c5ef2accf287c75ced7dea128cde5ecc8f961d0acabd38b56774
3
- size 18023
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ from contextlib import contextmanager
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ import numpy as np
7
+ from tqdm import tqdm
8
+ from easydict import EasyDict as edict
9
+ from torchvision import transforms
10
+ from PIL import Image
11
+ import rembg
12
+ from .base import Pipeline
13
+ from . import samplers
14
+ from ..modules import sparse as sp
15
+ from ..representations import Gaussian, MeshExtractResult
16
+ import cv2
17
+ import utils3d
18
+ from safetensors.torch import load_file, save_file
19
+
20
+
21
+ class mask_patcher(nn.Module):
22
+ def __init__(self):
23
+ super(mask_patcher, self).__init__()
24
+
25
+ def forward(self, mask, patch_size=14):
26
+ mask = F.interpolate(mask.float(), size=(518, 518), mode='nearest') # [B, 1, 518, 518]
27
+
28
+ patches = mask.unfold(2, patch_size, patch_size).unfold(3, patch_size, patch_size)
29
+
30
+ patch_ratio = patches.mean(dim=(-1, -2)) # [B, 1, 37, 37]
31
+
32
+ patch_ratio = patch_ratio.squeeze(1) # [B, 37, 37]
33
+
34
+ return patch_ratio
35
+
36
+
37
+ class Amodal3RImageTo3DPipeline(Pipeline):
38
+ """
39
+ Pipeline for inferring Amodal3R image-to-3D models.
40
+
41
+ Args:
42
+ models (dict[str, nn.Module]): The models to use in the pipeline.
43
+ sparse_structure_sampler (samplers.Sampler): The sampler for the sparse structure.
44
+ slat_sampler (samplers.Sampler): The sampler for the structured latent.
45
+ slat_normalization (dict): The normalization parameters for the structured latent.
46
+ image_cond_model (str): The name of the image conditioning model.
47
+ """
48
+ def __init__(
49
+ self,
50
+ models: dict[str, nn.Module] = None,
51
+ sparse_structure_sampler: samplers.Sampler = None,
52
+ slat_sampler: samplers.Sampler = None,
53
+ slat_normalization: dict = None,
54
+ image_cond_model: str = None,
55
+ ):
56
+ if models is None:
57
+ return
58
+ super().__init__(models)
59
+ self.sparse_structure_sampler = sparse_structure_sampler
60
+ self.slat_sampler = slat_sampler
61
+ self.sparse_structure_sampler_params = {}
62
+ self.slat_sampler_params = {}
63
+ self.slat_normalization = slat_normalization
64
+ self.rembg_session = None
65
+ self._init_image_cond_model(image_cond_model)
66
+
67
+ @staticmethod
68
+ def from_pretrained(path: str, if_mask_transformer: bool = False) -> "Amodal3RImageTo3DPipeline":
69
+ """
70
+ Load a pretrained model.
71
+
72
+ Args:
73
+ path (str): The path to the model. Can be either local path or a Hugging Face repository.
74
+ """
75
+ pipeline = super(Amodal3RImageTo3DPipeline, Amodal3RImageTo3DPipeline).from_pretrained(path)
76
+ new_pipeline = Amodal3RImageTo3DPipeline()
77
+ new_pipeline.__dict__ = pipeline.__dict__
78
+ args = pipeline._pretrained_args
79
+
80
+ new_pipeline.sparse_structure_sampler = getattr(samplers, args['sparse_structure_sampler']['name'])(**args['sparse_structure_sampler']['args'])
81
+ new_pipeline.sparse_structure_sampler_params = args['sparse_structure_sampler']['params']
82
+
83
+ new_pipeline.slat_sampler = getattr(samplers, args['slat_sampler']['name'])(**args['slat_sampler']['args'])
84
+ new_pipeline.slat_sampler_params = args['slat_sampler']['params']
85
+
86
+ new_pipeline.slat_normalization = args['slat_normalization']
87
+
88
+ new_pipeline._init_image_cond_model(args['image_cond_model'])
89
+
90
+ new_pipeline.mask_patcher = mask_patcher().cuda()
91
+ return new_pipeline
92
+
93
+ def _init_image_cond_model(self, name: str):
94
+ """
95
+ Initialize the image conditioning model.
96
+ """
97
+ dinov2_model = torch.hub.load('facebookresearch/dinov2', name, pretrained=True)
98
+ dinov2_model.eval()
99
+ self.models['image_cond_model'] = dinov2_model
100
+ transform = transforms.Compose([
101
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
102
+ ])
103
+ self.image_cond_model_transform = transform
104
+
105
+ def preprocess_image(self, input: Image.Image) -> Image.Image:
106
+ """
107
+ Preprocess the input image.
108
+ """
109
+ # if has alpha channel, use it directly; otherwise, remove background
110
+ has_alpha = False
111
+ if input.mode == 'RGBA':
112
+ alpha = np.array(input)[:, :, 3]
113
+ if not np.all(alpha == 255):
114
+ has_alpha = True
115
+ if has_alpha:
116
+ output = input
117
+ else:
118
+ input = input.convert('RGB')
119
+ max_size = max(input.size)
120
+ scale = min(1, 1024 / max_size)
121
+ if scale < 1:
122
+ input = input.resize((int(input.width * scale), int(input.height * scale)), Image.Resampling.LANCZOS)
123
+ if getattr(self, 'rembg_session', None) is None:
124
+ self.rembg_session = rembg.new_session('u2net')
125
+ output = rembg.remove(input, session=self.rembg_session)
126
+ output = output.resize((518, 518), Image.Resampling.LANCZOS)
127
+ output = np.array(output).astype(np.float32) / 255
128
+ output = Image.fromarray((output * 255).astype(np.uint8))
129
+ return output
130
+
131
+ def preprocess_image_w_mask(self, input: Image.Image, mask: Image.Image, kernel_size=3) -> Image.Image:
132
+ image = np.array(input).astype(np.float32) / 255
133
+ mask_ori = np.array(mask).astype(np.float32)
134
+ mask = (mask_ori < 127).astype(np.uint8)
135
+ if kernel_size > 0:
136
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
137
+ mask_occlude = cv2.dilate(mask, kernel, iterations=1)
138
+ else:
139
+ mask_occlude = mask
140
+ mask_bg = (mask_ori>230).astype(np.uint8)
141
+ mask = mask_occlude | mask_bg
142
+ image = image * (1 - mask[:, :, None])
143
+ image = Image.fromarray((image * 255).astype(np.uint8))
144
+ image = image.resize((518, 518), Image.Resampling.LANCZOS)
145
+ mask_occ = np.zeros(mask.shape)
146
+ mask_occ[mask_occlude==1] = 1
147
+ return image, mask, mask_occ
148
+
149
+ @torch.no_grad()
150
+ def encode_image(self, image: Union[torch.Tensor, list[Image.Image]]) -> torch.Tensor:
151
+ """
152
+ Encode the image.
153
+
154
+ Args:
155
+ image (Union[torch.Tensor, list[Image.Image]]): The image to encode
156
+
157
+ Returns:
158
+ torch.Tensor: The encoded features.
159
+ """
160
+ if isinstance(image, torch.Tensor):
161
+ assert image.ndim == 4, "Image tensor should be batched (B, C, H, W)"
162
+ elif isinstance(image, list):
163
+ assert all(isinstance(i, Image.Image) for i in image), "Image list should be list of PIL images"
164
+ image = [i.resize((518, 518), Image.LANCZOS) for i in image]
165
+ image = [np.array(i.convert('RGB')).astype(np.float32) / 255 for i in image]
166
+ image = [torch.from_numpy(i).permute(2, 0, 1).float() for i in image]
167
+ image = torch.stack(image).to(self.device)
168
+ else:
169
+ raise ValueError(f"Unsupported type of image: {type(image)}")
170
+
171
+ image = self.image_cond_model_transform(image).to(self.device)
172
+ features = self.models['image_cond_model'](image, is_training=True)['x_prenorm']
173
+ patchtokens = F.layer_norm(features, features.shape[-1:])
174
+ return patchtokens
175
+
176
+ def get_cond_w_masks(self, image: Union[torch.Tensor, list[Image.Image]], mask: Union[torch.Tensor, list[Image.Image]], masks_occ: Union[torch.Tensor, list[Image.Image], None], mask_encode_type: Literal['dino', 'repeat'] = 'repeat', stage: Literal['ss', 'slat'] = 'ss') -> dict:
177
+ """
178
+ Get the conditioning information for the model.
179
+
180
+ Args:
181
+ image (Union[torch.Tensor, list[Image.Image]]): The image prompts.
182
+
183
+ Returns:
184
+ dict: The conditioning information
185
+ """
186
+ cond = self.encode_image(image)
187
+ mask = [torch.from_numpy(mask).unsqueeze(0).float() for mask in mask]
188
+ mask = torch.stack(mask).to(self.device)
189
+ mask = self.mask_patcher(1-mask)
190
+ masked_feat = mask.view(mask.shape[0], -1).unsqueeze(-1).repeat(1, 1, 1024)
191
+ if masks_occ is not None:
192
+ masks_occ = self.mask_patcher(masks_occ)
193
+ masks_occ = masks_occ.view(masks_occ.shape[0], -1).unsqueeze(-1).repeat(1, 1, 1024)
194
+ cond = torch.cat([cond, masked_feat], dim=1)
195
+ if masks_occ is not None:
196
+ cond = torch.cat([cond, masks_occ], dim=1)
197
+ neg_cond = torch.zeros_like(cond)
198
+ return {
199
+ 'cond': cond,
200
+ 'neg_cond': neg_cond,
201
+ }
202
+
203
+ def get_cond(self, image: Union[torch.Tensor, list[Image.Image]]) -> dict:
204
+ """
205
+ Get the conditioning information for the model.
206
+
207
+ Args:
208
+ image (Union[torch.Tensor, list[Image.Image]]): The image prompts.
209
+
210
+ Returns:
211
+ dict: The conditioning information
212
+ """
213
+ cond = self.encode_image(image)
214
+ neg_cond = torch.zeros_like(cond)
215
+ return {
216
+ 'cond': cond,
217
+ 'neg_cond': neg_cond,
218
+ }
219
+
220
+ def sample_sparse_structure(
221
+ self,
222
+ cond: dict,
223
+ num_samples: int = 1,
224
+ sampler_params: dict = {},
225
+ ) -> torch.Tensor:
226
+ """
227
+ Sample sparse structures with the given conditioning.
228
+
229
+ Args:
230
+ cond (dict): The conditioning information.
231
+ num_samples (int): The number of samples to generate.
232
+ sampler_params (dict): Additional parameters for the sampler.
233
+ """
234
+ # Sample occupancy latent
235
+ flow_model = self.models['sparse_structure_flow_model']
236
+ reso = flow_model.resolution
237
+ noise = torch.randn(num_samples, flow_model.in_channels, reso, reso, reso).to(self.device)
238
+ sampler_params = {**self.sparse_structure_sampler_params, **sampler_params}
239
+ z_s = self.sparse_structure_sampler.sample(
240
+ flow_model,
241
+ noise,
242
+ **cond,
243
+ **sampler_params,
244
+ verbose=True
245
+ ).samples
246
+ decoder = self.models['sparse_structure_decoder']
247
+ ss = decoder(z_s)
248
+ coords = torch.argwhere(ss>0)[:, [0, 2, 3, 4]].int()
249
+ return coords
250
+
251
+ def decode_slat(
252
+ self,
253
+ slat: sp.SparseTensor,
254
+ formats: List[str] = ['mesh', 'gaussian'],
255
+ if_return_h: bool = False,
256
+ ) -> dict:
257
+ """
258
+ Decode the structured latent.
259
+
260
+ Args:
261
+ slat (sp.SparseTensor): The structured latent.
262
+ formats (List[str]): The formats to decode the structured latent to.
263
+
264
+ Returns:
265
+ dict: The decoded structured latent.
266
+ """
267
+ ret = {}
268
+ if 'mesh' in formats:
269
+ ret['mesh'] = self.models['slat_decoder_mesh'](slat)
270
+ if 'gaussian' in formats:
271
+ ret['gaussian'] = self.models['slat_decoder_gs'](slat, if_return_h=if_return_h)
272
+ return ret
273
+
274
+ def sample_slat(
275
+ self,
276
+ cond: dict,
277
+ coords: torch.Tensor,
278
+ sampler_params: dict = {},
279
+ ) -> sp.SparseTensor:
280
+ """
281
+ Sample structured latent with the given conditioning.
282
+
283
+ Args:
284
+ cond (dict): The conditioning information.
285
+ coords (torch.Tensor): The coordinates of the sparse structure.
286
+ sampler_params (dict): Additional parameters for the sampler.
287
+ """
288
+ # Sample structured latent
289
+ flow_model = self.models['slat_flow_model']
290
+ noise = sp.SparseTensor(
291
+ feats=torch.randn(coords.shape[0], flow_model.in_channels).to(self.device),
292
+ coords=coords,
293
+ )
294
+ sampler_params = {**self.slat_sampler_params, **sampler_params}
295
+ slat = self.slat_sampler.sample(
296
+ flow_model,
297
+ noise,
298
+ **cond,
299
+ **sampler_params,
300
+ verbose=True
301
+ ).samples
302
+
303
+ std = torch.tensor(self.slat_normalization['std'])[None].to(slat.device)
304
+ mean = torch.tensor(self.slat_normalization['mean'])[None].to(slat.device)
305
+
306
+ slat = slat * std + mean
307
+
308
+ return slat
309
+
310
+ @torch.no_grad()
311
+ def run(
312
+ self,
313
+ image: Image.Image,
314
+ num_samples: int = 1,
315
+ seed: int = 42,
316
+ sparse_structure_sampler_params: dict = {},
317
+ slat_sampler_params: dict = {},
318
+ formats: List[str] = ['mesh', 'gaussian'],
319
+ preprocess_image: bool = True,
320
+ ) -> dict:
321
+ """
322
+ Run the pipeline.
323
+
324
+ Args:
325
+ image (Image.Image): The image prompt.
326
+ num_samples (int): The number of samples to generate.
327
+ sparse_structure_sampler_params (dict): Additional parameters for the sparse structure sampler.
328
+ slat_sampler_params (dict): Additional parameters for the structured latent sampler.
329
+ preprocess_image (bool): Whether to preprocess the image.
330
+ """
331
+ if preprocess_image:
332
+ image = self.preprocess_image(image)
333
+ cond = self.get_cond([image])
334
+ torch.manual_seed(seed)
335
+ coords = self.sample_sparse_structure(cond, num_samples, sparse_structure_sampler_params)
336
+ slat = self.sample_slat(cond, coords, slat_sampler_params)
337
+ return self.decode_slat(slat, formats)
338
+
339
+ @contextmanager
340
+ def inject_sampler_multi_image(
341
+ self,
342
+ sampler_name: str,
343
+ num_images: int,
344
+ num_steps: int,
345
+ mode: Literal['stochastic', 'multidiffusion'] = 'stochastic',
346
+ ):
347
+ """
348
+ Inject a sampler with multiple images as condition.
349
+
350
+ Args:
351
+ sampler_name (str): The name of the sampler to inject.
352
+ num_images (int): The number of images to condition on.
353
+ num_steps (int): The number of steps to run the sampler for.
354
+ """
355
+ sampler = getattr(self, sampler_name)
356
+ setattr(sampler, f'_old_inference_model', sampler._inference_model)
357
+
358
+ if mode == 'stochastic':
359
+ if num_images > num_steps:
360
+ print(f"\033[93mWarning: number of conditioning images is greater than number of steps for {sampler_name}. "
361
+ "This may lead to performance degradation.\033[0m")
362
+
363
+ cond_indices = (np.arange(num_steps) % num_images).tolist()
364
+ def _new_inference_model(self, model, x_t, t, cond, **kwargs):
365
+ cond_idx = cond_indices.pop(0)
366
+ cond_i = cond[cond_idx:cond_idx+1]
367
+ return self._old_inference_model(model, x_t, t, cond=cond_i, **kwargs)
368
+
369
+ elif mode =='multidiffusion':
370
+ from .samplers import FlowEulerSampler
371
+ def _new_inference_model(self, model, x_t, t, cond, neg_cond, cfg_strength, cfg_interval, **kwargs):
372
+ if cfg_interval[0] <= t <= cfg_interval[1]:
373
+ preds = []
374
+ for i in range(len(cond)):
375
+ preds.append(FlowEulerSampler._inference_model(self, model, x_t, t, cond[i:i+1], **kwargs))
376
+ pred = sum(preds) / len(preds)
377
+ neg_pred = FlowEulerSampler._inference_model(self, model, x_t, t, neg_cond, **kwargs)
378
+ return (1 + cfg_strength) * pred - cfg_strength * neg_pred
379
+ else:
380
+ preds = []
381
+ for i in range(len(cond)):
382
+ preds.append(FlowEulerSampler._inference_model(self, model, x_t, t, cond[i:i+1], **kwargs))
383
+ pred = sum(preds) / len(preds)
384
+ return pred
385
+
386
+ else:
387
+ raise ValueError(f"Unsupported mode: {mode}")
388
+
389
+ sampler._inference_model = _new_inference_model.__get__(sampler, type(sampler))
390
+
391
+ yield
392
+
393
+ sampler._inference_model = sampler._old_inference_model
394
+ delattr(sampler, f'_old_inference_model')
395
+
396
+ @torch.no_grad()
397
+ def run_multi_image(
398
+ self,
399
+ images: List[Image.Image],
400
+ masks: List[Image.Image],
401
+ num_samples: int = 1,
402
+ seed: int = 42,
403
+ sparse_structure_sampler_params: dict = {},
404
+ slat_sampler_params: dict = {},
405
+ formats: List[str] = ['mesh', 'gaussian'],
406
+ preprocess_image: bool = True,
407
+ mode: Literal['stochastic', 'multidiffusion'] = 'stochastic',
408
+ ) -> dict:
409
+ """
410
+ Run the pipeline with multiple images as condition
411
+
412
+ Args:
413
+ images (List[Image.Image]): The multi-view images of the assets
414
+ num_samples (int): The number of samples to generate.
415
+ sparse_structure_sampler_params (dict): Additional parameters for the sparse structure sampler.
416
+ slat_sampler_params (dict): Additional parameters for the structured latent sampler.
417
+ preprocess_image (bool): Whether to preprocess the image.
418
+ """
419
+ if preprocess_image:
420
+ images, masks, masks_occ = zip(*[self.preprocess_image_w_mask(image, mask) for image, mask in zip(images, masks)])
421
+ images = list(images)
422
+ masks = list(masks)
423
+ masks_occ = list(masks_occ)
424
+ cond_stage_1 = self.get_cond_w_masks(images, masks, masks_occ=masks_occ, mask_encode_type="patcher", stage = "ss")
425
+ cond_stage_2 = self.get_cond_w_masks(images, masks, masks_occ=masks_occ, mask_encode_type="patcher", stage = "slat")
426
+ cond_stage_1['neg_cond'] = cond_stage_1['neg_cond'][:1]
427
+ cond_stage_2['neg_cond'] = cond_stage_2['neg_cond'][:1]
428
+ torch.manual_seed(seed)
429
+ ss_steps = {**self.sparse_structure_sampler_params, **sparse_structure_sampler_params}.get('steps')
430
+ with self.inject_sampler_multi_image('sparse_structure_sampler', len(images), ss_steps, mode=mode):
431
+ coords = self.sample_sparse_structure(cond_stage_1, num_samples, sparse_structure_sampler_params)
432
+ slat_steps = {**self.slat_sampler_params, **slat_sampler_params}.get('steps')
433
+ with self.inject_sampler_multi_image('slat_sampler', len(images), slat_steps, mode=mode):
434
+ slat = self.sample_slat(cond_stage_2, coords, slat_sampler_params)
435
+ return self.decode_slat(slat, formats)
Amodal3R/pipelines/__init__.py CHANGED
@@ -1,3 +1,24 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b727691a2e5a5d1b58bde9fda8c0775e9692d0c1b0e4e00e269e6d1aaa1f65ec
3
- size 705
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import samplers
2
+ from .Amodal3R_image_to_3d import Amodal3RImageTo3DPipeline
3
+
4
+
5
+ def from_pretrained(path: str):
6
+ """
7
+ Load a pipeline from a model folder or a Hugging Face model hub.
8
+
9
+ Args:
10
+ path: The path to the model. Can be either local path or a Hugging Face model name.
11
+ """
12
+ import os
13
+ import json
14
+ is_local = os.path.exists(f"{path}/pipeline.json")
15
+
16
+ if is_local:
17
+ config_file = f"{path}/pipeline.json"
18
+ else:
19
+ from huggingface_hub import hf_hub_download
20
+ config_file = hf_hub_download(path, "pipeline.json")
21
+
22
+ with open(config_file, 'r') as f:
23
+ config = json.load(f)
24
+ return globals()[config['name']].from_pretrained(path)
Amodal3R/pipelines/base.py CHANGED
@@ -1,3 +1,66 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a69f11de28f6f067f9baacc41437833cbe4b474ec80c8fe64576be3080ac74db
3
- size 1746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from .. import models
5
+
6
+
7
+ class Pipeline:
8
+ """
9
+ A base class for pipelines.
10
+ """
11
+ def __init__(
12
+ self,
13
+ models: dict[str, nn.Module] = None,
14
+ ):
15
+ if models is None:
16
+ return
17
+ self.models = models
18
+ for model in self.models.values():
19
+ model.eval()
20
+
21
+ @staticmethod
22
+ def from_pretrained(path: str) -> "Pipeline":
23
+ """
24
+ Load a pretrained model.
25
+ """
26
+ import os
27
+ import json
28
+ is_local = os.path.exists(f"{path}/pipeline.json")
29
+
30
+ if is_local:
31
+ config_file = f"{path}/pipeline.json"
32
+ else:
33
+ from huggingface_hub import hf_hub_download
34
+ config_file = hf_hub_download(path, "pipeline.json")
35
+
36
+ with open(config_file, 'r') as f:
37
+ args = json.load(f)['args']
38
+
39
+ _models = {
40
+ k: models.from_pretrained(f"{path}/{v}")
41
+ for k, v in args['models'].items()
42
+ }
43
+
44
+ new_pipeline = Pipeline(_models)
45
+ new_pipeline._pretrained_args = args
46
+ return new_pipeline
47
+
48
+ @property
49
+ def device(self) -> torch.device:
50
+ for model in self.models.values():
51
+ if hasattr(model, 'device'):
52
+ return model.device
53
+ for model in self.models.values():
54
+ if hasattr(model, 'parameters'):
55
+ return next(model.parameters()).device
56
+ raise RuntimeError("No device found.")
57
+
58
+ def to(self, device: torch.device) -> None:
59
+ for model in self.models.values():
60
+ model.to(device)
61
+
62
+ def cuda(self) -> None:
63
+ self.to(torch.device("cuda"))
64
+
65
+ def cpu(self) -> None:
66
+ self.to(torch.device("cpu"))
Amodal3R/pipelines/samplers/__init__.py CHANGED
@@ -1,3 +1,2 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:438e0d327767c4ed05b167347e306f3ed80b4956add8f506126937feba0989ad
3
- size 121
 
1
+ from .base import Sampler
2
+ from .flow_euler import FlowEulerSampler, FlowEulerCfgSampler, FlowEulerGuidanceIntervalSampler
 
Amodal3R/pipelines/samplers/base.py CHANGED
@@ -1,3 +1,20 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:be8530b55ea66ac58e8ab23d650f463636dd52cf7e39c7c9c66f69bf72e6a0d1
3
- size 286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ from abc import ABC, abstractmethod
3
+
4
+
5
+ class Sampler(ABC):
6
+ """
7
+ A base class for samplers.
8
+ """
9
+
10
+ @abstractmethod
11
+ def sample(
12
+ self,
13
+ model,
14
+ **kwargs
15
+ ):
16
+ """
17
+ Sample from a model.
18
+ """
19
+ pass
20
+
Amodal3R/pipelines/samplers/classifier_free_guidance_mixin.py CHANGED
@@ -1,3 +1,12 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:28af94747319a29d68f1dba864e103fcea906a24827646532d5915109de1098d
3
- size 454
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+
4
+ class ClassifierFreeGuidanceSamplerMixin:
5
+ """
6
+ A mixin class for samplers that apply classifier-free guidance.
7
+ """
8
+
9
+ def _inference_model(self, model, x_t, t, cond, neg_cond, cfg_strength, **kwargs):
10
+ pred = super()._inference_model(model, x_t, t, cond, **kwargs)
11
+ neg_pred = super()._inference_model(model, x_t, t, neg_cond, **kwargs)
12
+ return (1 + cfg_strength) * pred - cfg_strength * neg_pred
Amodal3R/pipelines/samplers/flow_euler.py CHANGED
@@ -1,3 +1,199 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f088fb78072fab70e4d7433bedf925f1fee910b9795ca12344beb21e2f4032c
3
- size 7005
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import numpy as np
4
+ from tqdm import tqdm
5
+ from easydict import EasyDict as edict
6
+ from .base import Sampler
7
+ from .classifier_free_guidance_mixin import ClassifierFreeGuidanceSamplerMixin
8
+ from .guidance_interval_mixin import GuidanceIntervalSamplerMixin
9
+
10
+
11
+ class FlowEulerSampler(Sampler):
12
+ """
13
+ Generate samples from a flow-matching model using Euler sampling.
14
+
15
+ Args:
16
+ sigma_min: The minimum scale of noise in flow.
17
+ """
18
+ def __init__(
19
+ self,
20
+ sigma_min: float,
21
+ ):
22
+ self.sigma_min = sigma_min
23
+
24
+ def _eps_to_xstart(self, x_t, t, eps):
25
+ assert x_t.shape == eps.shape
26
+ return (x_t - (self.sigma_min + (1 - self.sigma_min) * t) * eps) / (1 - t)
27
+
28
+ def _xstart_to_eps(self, x_t, t, x_0):
29
+ assert x_t.shape == x_0.shape
30
+ return (x_t - (1 - t) * x_0) / (self.sigma_min + (1 - self.sigma_min) * t)
31
+
32
+ def _v_to_xstart_eps(self, x_t, t, v):
33
+ assert x_t.shape == v.shape
34
+ eps = (1 - t) * v + x_t
35
+ x_0 = (1 - self.sigma_min) * x_t - (self.sigma_min + (1 - self.sigma_min) * t) * v
36
+ return x_0, eps
37
+
38
+ def _inference_model(self, model, x_t, t, cond=None, **kwargs):
39
+ t = torch.tensor([1000 * t] * x_t.shape[0], device=x_t.device, dtype=torch.float32)
40
+ return model(x_t, t, cond, **kwargs)
41
+
42
+ def _get_model_prediction(self, model, x_t, t, cond=None, **kwargs):
43
+ pred_v = self._inference_model(model, x_t, t, cond, **kwargs)
44
+ pred_x_0, pred_eps = self._v_to_xstart_eps(x_t=x_t, t=t, v=pred_v)
45
+ return pred_x_0, pred_eps, pred_v
46
+
47
+ @torch.no_grad()
48
+ def sample_once(
49
+ self,
50
+ model,
51
+ x_t,
52
+ t: float,
53
+ t_prev: float,
54
+ cond: Optional[Any] = None,
55
+ **kwargs
56
+ ):
57
+ """
58
+ Sample x_{t-1} from the model using Euler method.
59
+
60
+ Args:
61
+ model: The model to sample from.
62
+ x_t: The [N x C x ...] tensor of noisy inputs at time t.
63
+ t: The current timestep.
64
+ t_prev: The previous timestep.
65
+ cond: conditional information.
66
+ **kwargs: Additional arguments for model inference.
67
+
68
+ Returns:
69
+ a dict containing the following
70
+ - 'pred_x_prev': x_{t-1}.
71
+ - 'pred_x_0': a prediction of x_0.
72
+ """
73
+ pred_x_0, pred_eps, pred_v = self._get_model_prediction(model, x_t, t, cond, **kwargs)
74
+ pred_x_prev = x_t - (t - t_prev) * pred_v
75
+ return edict({"pred_x_prev": pred_x_prev, "pred_x_0": pred_x_0})
76
+
77
+ @torch.no_grad()
78
+ def sample(
79
+ self,
80
+ model,
81
+ noise,
82
+ cond: Optional[Any] = None,
83
+ steps: int = 50,
84
+ rescale_t: float = 1.0,
85
+ verbose: bool = True,
86
+ **kwargs
87
+ ):
88
+ """
89
+ Generate samples from the model using Euler method.
90
+
91
+ Args:
92
+ model: The model to sample from.
93
+ noise: The initial noise tensor.
94
+ cond: conditional information.
95
+ steps: The number of steps to sample.
96
+ rescale_t: The rescale factor for t.
97
+ verbose: If True, show a progress bar.
98
+ **kwargs: Additional arguments for model_inference.
99
+
100
+ Returns:
101
+ a dict containing the following
102
+ - 'samples': the model samples.
103
+ - 'pred_x_t': a list of prediction of x_t.
104
+ - 'pred_x_0': a list of prediction of x_0.
105
+ """
106
+ sample = noise
107
+ t_seq = np.linspace(1, 0, steps + 1)
108
+ t_seq = rescale_t * t_seq / (1 + (rescale_t - 1) * t_seq)
109
+ t_pairs = list((t_seq[i], t_seq[i + 1]) for i in range(steps))
110
+ ret = edict({"samples": None, "pred_x_t": [], "pred_x_0": []})
111
+ for t, t_prev in tqdm(t_pairs, desc="Sampling", disable=not verbose):
112
+ out = self.sample_once(model, sample, t, t_prev, cond, **kwargs)
113
+ sample = out.pred_x_prev
114
+ ret.pred_x_t.append(out.pred_x_prev)
115
+ ret.pred_x_0.append(out.pred_x_0)
116
+ ret.samples = sample
117
+ return ret
118
+
119
+
120
+ class FlowEulerCfgSampler(ClassifierFreeGuidanceSamplerMixin, FlowEulerSampler):
121
+ """
122
+ Generate samples from a flow-matching model using Euler sampling with classifier-free guidance.
123
+ """
124
+ @torch.no_grad()
125
+ def sample(
126
+ self,
127
+ model,
128
+ noise,
129
+ cond,
130
+ neg_cond,
131
+ steps: int = 50,
132
+ rescale_t: float = 1.0,
133
+ cfg_strength: float = 3.0,
134
+ verbose: bool = True,
135
+ **kwargs
136
+ ):
137
+ """
138
+ Generate samples from the model using Euler method.
139
+
140
+ Args:
141
+ model: The model to sample from.
142
+ noise: The initial noise tensor.
143
+ cond: conditional information.
144
+ neg_cond: negative conditional information.
145
+ steps: The number of steps to sample.
146
+ rescale_t: The rescale factor for t.
147
+ cfg_strength: The strength of classifier-free guidance.
148
+ verbose: If True, show a progress bar.
149
+ **kwargs: Additional arguments for model_inference.
150
+
151
+ Returns:
152
+ a dict containing the following
153
+ - 'samples': the model samples.
154
+ - 'pred_x_t': a list of prediction of x_t.
155
+ - 'pred_x_0': a list of prediction of x_0.
156
+ """
157
+ return super().sample(model, noise, cond, steps, rescale_t, verbose, neg_cond=neg_cond, cfg_strength=cfg_strength, **kwargs)
158
+
159
+
160
+ class FlowEulerGuidanceIntervalSampler(GuidanceIntervalSamplerMixin, FlowEulerSampler):
161
+ """
162
+ Generate samples from a flow-matching model using Euler sampling with classifier-free guidance and interval.
163
+ """
164
+ @torch.no_grad()
165
+ def sample(
166
+ self,
167
+ model,
168
+ noise,
169
+ cond,
170
+ neg_cond,
171
+ steps: int = 50,
172
+ rescale_t: float = 1.0,
173
+ cfg_strength: float = 3.0,
174
+ cfg_interval: Tuple[float, float] = (0.0, 1.0),
175
+ verbose: bool = True,
176
+ **kwargs
177
+ ):
178
+ """
179
+ Generate samples from the model using Euler method.
180
+
181
+ Args:
182
+ model: The model to sample from.
183
+ noise: The initial noise tensor.
184
+ cond: conditional information.
185
+ neg_cond: negative conditional information.
186
+ steps: The number of steps to sample.
187
+ rescale_t: The rescale factor for t.
188
+ cfg_strength: The strength of classifier-free guidance.
189
+ cfg_interval: The interval for classifier-free guidance.
190
+ verbose: If True, show a progress bar.
191
+ **kwargs: Additional arguments for model_inference.
192
+
193
+ Returns:
194
+ a dict containing the following
195
+ - 'samples': the model samples.
196
+ - 'pred_x_t': a list of prediction of x_t.
197
+ - 'pred_x_0': a list of prediction of x_0.
198
+ """
199
+ return super().sample(model, noise, cond, steps, rescale_t, verbose, neg_cond=neg_cond, cfg_strength=cfg_strength, cfg_interval=cfg_interval, **kwargs)
Amodal3R/pipelines/samplers/guidance_interval_mixin.py CHANGED
@@ -1,3 +1,15 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d290aef03b58b2967e5edaaee3578cdde39e4471689dca3429ffb08ff632c21
3
- size 629
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+
4
+ class GuidanceIntervalSamplerMixin:
5
+ """
6
+ A mixin class for samplers that apply classifier-free guidance with interval.
7
+ """
8
+
9
+ def _inference_model(self, model, x_t, t, cond, neg_cond, cfg_strength, cfg_interval, **kwargs):
10
+ if cfg_interval[0] <= t <= cfg_interval[1]:
11
+ pred = super()._inference_model(model, x_t, t, cond, **kwargs)
12
+ neg_pred = super()._inference_model(model, x_t, t, neg_cond, **kwargs)
13
+ return (1 + cfg_strength) * pred - cfg_strength * neg_pred
14
+ else:
15
+ return super()._inference_model(model, x_t, t, cond, **kwargs)
Amodal3R/renderers/__init__.py CHANGED
@@ -1,3 +1,29 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b9722782b2f4d0d25fbbdc43415ade6039d02ecd4a3ecb55cade31b142fe5f6
3
- size 841
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+
3
+ __attributes = {
4
+ 'GaussianRenderer': 'gaussian_render',
5
+ 'MeshRenderer': 'mesh_renderer',
6
+ }
7
+
8
+ __submodules = []
9
+
10
+ __all__ = list(__attributes.keys()) + __submodules
11
+
12
+ def __getattr__(name):
13
+ if name not in globals():
14
+ if name in __attributes:
15
+ module_name = __attributes[name]
16
+ module = importlib.import_module(f".{module_name}", __name__)
17
+ globals()[name] = getattr(module, name)
18
+ elif name in __submodules:
19
+ module = importlib.import_module(f".{name}", __name__)
20
+ globals()[name] = module
21
+ else:
22
+ raise AttributeError(f"module {__name__} has no attribute {name}")
23
+ return globals()[name]
24
+
25
+
26
+ # For Pylance
27
+ if __name__ == '__main__':
28
+ from .gaussian_render import GaussianRenderer
29
+ from .mesh_renderer import MeshRenderer
Amodal3R/renderers/gaussian_render.py CHANGED
@@ -1,3 +1,231 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e9344700e86ac5d32c1a3343bedb4eef106b8ba961d412555202f2da4e75fc5
3
- size 8126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (C) 2023, Inria
3
+ # GRAPHDECO research group, https://team.inria.fr/graphdeco
4
+ # All rights reserved.
5
+ #
6
+ # This software is free for non-commercial, research and evaluation use
7
+ # under the terms of the LICENSE.md file.
8
+ #
9
+ # For inquiries contact [email protected]
10
+ #
11
+
12
+ import torch
13
+ import math
14
+ from easydict import EasyDict as edict
15
+ import numpy as np
16
+ from ..representations.gaussian import Gaussian
17
+ from .sh_utils import eval_sh
18
+ import torch.nn.functional as F
19
+ from easydict import EasyDict as edict
20
+
21
+
22
+ def intrinsics_to_projection(
23
+ intrinsics: torch.Tensor,
24
+ near: float,
25
+ far: float,
26
+ ) -> torch.Tensor:
27
+ """
28
+ OpenCV intrinsics to OpenGL perspective matrix
29
+
30
+ Args:
31
+ intrinsics (torch.Tensor): [3, 3] OpenCV intrinsics matrix
32
+ near (float): near plane to clip
33
+ far (float): far plane to clip
34
+ Returns:
35
+ (torch.Tensor): [4, 4] OpenGL perspective matrix
36
+ """
37
+ fx, fy = intrinsics[0, 0], intrinsics[1, 1]
38
+ cx, cy = intrinsics[0, 2], intrinsics[1, 2]
39
+ ret = torch.zeros((4, 4), dtype=intrinsics.dtype, device=intrinsics.device)
40
+ ret[0, 0] = 2 * fx
41
+ ret[1, 1] = 2 * fy
42
+ ret[0, 2] = 2 * cx - 1
43
+ ret[1, 2] = - 2 * cy + 1
44
+ ret[2, 2] = far / (far - near)
45
+ ret[2, 3] = near * far / (near - far)
46
+ ret[3, 2] = 1.
47
+ return ret
48
+
49
+
50
+ def render(viewpoint_camera, pc : Gaussian, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None):
51
+ """
52
+ Render the scene.
53
+
54
+ Background tensor (bg_color) must be on GPU!
55
+ """
56
+ # lazy import
57
+ if 'GaussianRasterizer' not in globals():
58
+ from diff_gaussian_rasterization import GaussianRasterizer, GaussianRasterizationSettings
59
+
60
+ # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
61
+ screenspace_points = torch.zeros_like(pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda") + 0
62
+ try:
63
+ screenspace_points.retain_grad()
64
+ except:
65
+ pass
66
+ # Set up rasterization configuration
67
+ tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
68
+ tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
69
+
70
+ kernel_size = pipe.kernel_size
71
+ subpixel_offset = torch.zeros((int(viewpoint_camera.image_height), int(viewpoint_camera.image_width), 2), dtype=torch.float32, device="cuda")
72
+
73
+ raster_settings = GaussianRasterizationSettings(
74
+ image_height=int(viewpoint_camera.image_height),
75
+ image_width=int(viewpoint_camera.image_width),
76
+ tanfovx=tanfovx,
77
+ tanfovy=tanfovy,
78
+ kernel_size=kernel_size,
79
+ subpixel_offset=subpixel_offset,
80
+ bg=bg_color,
81
+ scale_modifier=scaling_modifier,
82
+ viewmatrix=viewpoint_camera.world_view_transform,
83
+ projmatrix=viewpoint_camera.full_proj_transform,
84
+ sh_degree=pc.active_sh_degree,
85
+ campos=viewpoint_camera.camera_center,
86
+ prefiltered=False,
87
+ debug=pipe.debug
88
+ )
89
+
90
+ rasterizer = GaussianRasterizer(raster_settings=raster_settings)
91
+
92
+ means3D = pc.get_xyz
93
+ means2D = screenspace_points
94
+ opacity = pc.get_opacity
95
+
96
+ # If precomputed 3d covariance is provided, use it. If not, then it will be computed from
97
+ # scaling / rotation by the rasterizer.
98
+ scales = None
99
+ rotations = None
100
+ cov3D_precomp = None
101
+ if pipe.compute_cov3D_python:
102
+ cov3D_precomp = pc.get_covariance(scaling_modifier)
103
+ else:
104
+ scales = pc.get_scaling
105
+ rotations = pc.get_rotation
106
+
107
+ # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
108
+ # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
109
+ shs = None
110
+ colors_precomp = None
111
+ if override_color is None:
112
+ if pipe.convert_SHs_python:
113
+ shs_view = pc.get_features.transpose(1, 2).view(-1, 3, (pc.max_sh_degree+1)**2)
114
+ dir_pp = (pc.get_xyz - viewpoint_camera.camera_center.repeat(pc.get_features.shape[0], 1))
115
+ dir_pp_normalized = dir_pp/dir_pp.norm(dim=1, keepdim=True)
116
+ sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized)
117
+ colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0)
118
+ else:
119
+ shs = pc.get_features
120
+ else:
121
+ colors_precomp = override_color
122
+
123
+ # Rasterize visible Gaussians to image, obtain their radii (on screen).
124
+ rendered_image, radii = rasterizer(
125
+ means3D = means3D,
126
+ means2D = means2D,
127
+ shs = shs,
128
+ colors_precomp = colors_precomp,
129
+ opacities = opacity,
130
+ scales = scales,
131
+ rotations = rotations,
132
+ cov3D_precomp = cov3D_precomp
133
+ )
134
+
135
+ # Those Gaussians that were frustum culled or had a radius of 0 were not visible.
136
+ # They will be excluded from value updates used in the splitting criteria.
137
+ return edict({"render": rendered_image,
138
+ "viewspace_points": screenspace_points,
139
+ "visibility_filter" : radii > 0,
140
+ "radii": radii})
141
+
142
+
143
+ class GaussianRenderer:
144
+ """
145
+ Renderer for the Voxel representation.
146
+
147
+ Args:
148
+ rendering_options (dict): Rendering options.
149
+ """
150
+
151
+ def __init__(self, rendering_options={}) -> None:
152
+ self.pipe = edict({
153
+ "kernel_size": 0.1,
154
+ "convert_SHs_python": False,
155
+ "compute_cov3D_python": False,
156
+ "scale_modifier": 1.0,
157
+ "debug": False
158
+ })
159
+ self.rendering_options = edict({
160
+ "resolution": None,
161
+ "near": None,
162
+ "far": None,
163
+ "ssaa": 1,
164
+ "bg_color": 'random',
165
+ })
166
+ self.rendering_options.update(rendering_options)
167
+ self.bg_color = None
168
+
169
+ def render(
170
+ self,
171
+ gausssian: Gaussian,
172
+ extrinsics: torch.Tensor,
173
+ intrinsics: torch.Tensor,
174
+ colors_overwrite: torch.Tensor = None
175
+ ) -> edict:
176
+ """
177
+ Render the gausssian.
178
+
179
+ Args:
180
+ gaussian : gaussianmodule
181
+ extrinsics (torch.Tensor): (4, 4) camera extrinsics
182
+ intrinsics (torch.Tensor): (3, 3) camera intrinsics
183
+ colors_overwrite (torch.Tensor): (N, 3) override color
184
+
185
+ Returns:
186
+ edict containing:
187
+ color (torch.Tensor): (3, H, W) rendered color image
188
+ """
189
+ resolution = self.rendering_options["resolution"]
190
+ near = self.rendering_options["near"]
191
+ far = self.rendering_options["far"]
192
+ ssaa = self.rendering_options["ssaa"]
193
+
194
+ if self.rendering_options["bg_color"] == 'random':
195
+ self.bg_color = torch.zeros(3, dtype=torch.float32, device="cuda")
196
+ if np.random.rand() < 0.5:
197
+ self.bg_color += 1
198
+ else:
199
+ self.bg_color = torch.tensor(self.rendering_options["bg_color"], dtype=torch.float32, device="cuda")
200
+
201
+ view = extrinsics
202
+ perspective = intrinsics_to_projection(intrinsics, near, far)
203
+ camera = torch.inverse(view)[:3, 3]
204
+ focalx = intrinsics[0, 0]
205
+ focaly = intrinsics[1, 1]
206
+ fovx = 2 * torch.atan(0.5 / focalx)
207
+ fovy = 2 * torch.atan(0.5 / focaly)
208
+
209
+ camera_dict = edict({
210
+ "image_height": resolution * ssaa,
211
+ "image_width": resolution * ssaa,
212
+ "FoVx": fovx,
213
+ "FoVy": fovy,
214
+ "znear": near,
215
+ "zfar": far,
216
+ "world_view_transform": view.T.contiguous(),
217
+ "projection_matrix": perspective.T.contiguous(),
218
+ "full_proj_transform": (perspective @ view).T.contiguous(),
219
+ "camera_center": camera
220
+ })
221
+
222
+ # Render
223
+ render_ret = render(camera_dict, gausssian, self.pipe, self.bg_color, override_color=colors_overwrite, scaling_modifier=self.pipe.scale_modifier)
224
+
225
+ if ssaa > 1:
226
+ render_ret.render = F.interpolate(render_ret.render[None], size=(resolution, resolution), mode='bilinear', align_corners=False, antialias=True).squeeze()
227
+
228
+ ret = edict({
229
+ 'color': render_ret['render']
230
+ })
231
+ return ret
Amodal3R/renderers/mesh_renderer.py CHANGED
@@ -1,3 +1,137 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c3bcb6ba03a93ef66d9c484ce6e3a4799b8b44fc12c42db95a2482d9a285116b
3
- size 5491
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ try:
3
+ import kaolin as kal
4
+ import nvdiffrast.torch as dr
5
+ except :
6
+ print("Kaolin and nvdiffrast are not installed. Please install them to use the mesh renderer.")
7
+ from easydict import EasyDict as edict
8
+ from ..representations.mesh import MeshExtractResult
9
+ import torch.nn.functional as F
10
+
11
+
12
+ def intrinsics_to_projection(
13
+ intrinsics: torch.Tensor,
14
+ near: float,
15
+ far: float,
16
+ ) -> torch.Tensor:
17
+ """
18
+ OpenCV intrinsics to OpenGL perspective matrix
19
+
20
+ Args:
21
+ intrinsics (torch.Tensor): [3, 3] OpenCV intrinsics matrix
22
+ near (float): near plane to clip
23
+ far (float): far plane to clip
24
+ Returns:
25
+ (torch.Tensor): [4, 4] OpenGL perspective matrix
26
+ """
27
+ fx, fy = intrinsics[0, 0], intrinsics[1, 1]
28
+ cx, cy = intrinsics[0, 2], intrinsics[1, 2]
29
+ ret = torch.zeros((4, 4), dtype=intrinsics.dtype, device=intrinsics.device)
30
+ ret[0, 0] = 2 * fx
31
+ ret[1, 1] = 2 * fy
32
+ ret[0, 2] = 2 * cx - 1
33
+ ret[1, 2] = - 2 * cy + 1
34
+ ret[2, 2] = far / (far - near)
35
+ ret[2, 3] = near * far / (near - far)
36
+ ret[3, 2] = 1.
37
+ return ret
38
+
39
+
40
+ class MeshRenderer:
41
+ """
42
+ Renderer for the Mesh representation.
43
+
44
+ Args:
45
+ rendering_options (dict): Rendering options.
46
+ glctx (nvdiffrast.torch.RasterizeGLContext): RasterizeGLContext object for CUDA/OpenGL interop.
47
+ """
48
+ def __init__(self, rendering_options={}, device='cuda'):
49
+ self.rendering_options = edict({
50
+ "resolution": None,
51
+ "near": None,
52
+ "far": None,
53
+ "ssaa": 1
54
+ })
55
+ self.rendering_options.update(rendering_options)
56
+ self.glctx = dr.RasterizeCudaContext(device=device)
57
+ self.device=device
58
+
59
+ def render(
60
+ self,
61
+ mesh : MeshExtractResult,
62
+ extrinsics: torch.Tensor,
63
+ intrinsics: torch.Tensor,
64
+ return_types = ["mask", "normal", "depth"]
65
+ ) -> edict:
66
+ """
67
+ Render the mesh.
68
+
69
+ Args:
70
+ mesh : meshmodel
71
+ extrinsics (torch.Tensor): (4, 4) camera extrinsics
72
+ intrinsics (torch.Tensor): (3, 3) camera intrinsics
73
+ return_types (list): list of return types, can be "mask", "depth", "normal_map", "normal", "color"
74
+
75
+ Returns:
76
+ edict based on return_types containing:
77
+ color (torch.Tensor): [3, H, W] rendered color image
78
+ depth (torch.Tensor): [H, W] rendered depth image
79
+ normal (torch.Tensor): [3, H, W] rendered normal image
80
+ normal_map (torch.Tensor): [3, H, W] rendered normal map image
81
+ mask (torch.Tensor): [H, W] rendered mask image
82
+ """
83
+ resolution = self.rendering_options["resolution"]
84
+ near = self.rendering_options["near"]
85
+ far = self.rendering_options["far"]
86
+ ssaa = self.rendering_options["ssaa"]
87
+
88
+ if mesh.vertices.shape[0] == 0 or mesh.faces.shape[0] == 0:
89
+ default_img = torch.zeros((1, resolution, resolution, 3), dtype=torch.float32, device=self.device)
90
+ ret_dict = {k : default_img if k in ['normal', 'normal_map', 'color'] else default_img[..., :1] for k in return_types}
91
+ return ret_dict
92
+
93
+ perspective = intrinsics_to_projection(intrinsics, near, far)
94
+
95
+ RT = extrinsics.unsqueeze(0)
96
+ full_proj = (perspective @ extrinsics).unsqueeze(0)
97
+
98
+ vertices = mesh.vertices.unsqueeze(0)
99
+
100
+ vertices_homo = torch.cat([vertices, torch.ones_like(vertices[..., :1])], dim=-1)
101
+ vertices_camera = torch.bmm(vertices_homo, RT.transpose(-1, -2))
102
+ vertices_clip = torch.bmm(vertices_homo, full_proj.transpose(-1, -2))
103
+ faces_int = mesh.faces.int()
104
+ rast, _ = dr.rasterize(
105
+ self.glctx, vertices_clip, faces_int, (resolution * ssaa, resolution * ssaa))
106
+
107
+ out_dict = edict()
108
+ for type in return_types:
109
+ img = None
110
+ if type == "mask" :
111
+ img = dr.antialias((rast[..., -1:] > 0).float(), rast, vertices_clip, faces_int)
112
+ elif type == "depth":
113
+ img = dr.interpolate(vertices_camera[..., 2:3].contiguous(), rast, faces_int)[0]
114
+ img = dr.antialias(img, rast, vertices_clip, faces_int)
115
+ elif type == "normal" :
116
+ img = dr.interpolate(
117
+ mesh.face_normal.reshape(1, -1, 3), rast,
118
+ torch.arange(mesh.faces.shape[0] * 3, device=self.device, dtype=torch.int).reshape(-1, 3)
119
+ )[0]
120
+ img = dr.antialias(img, rast, vertices_clip, faces_int)
121
+ # normalize norm pictures
122
+ img = (img + 1) / 2
123
+ elif type == "normal_map" :
124
+ img = dr.interpolate(mesh.vertex_attrs[:, 3:].contiguous(), rast, faces_int)[0]
125
+ img = dr.antialias(img, rast, vertices_clip, faces_int)
126
+ elif type == "color" :
127
+ img = dr.interpolate(mesh.vertex_attrs[:, :3].contiguous(), rast, faces_int)[0]
128
+ img = dr.antialias(img, rast, vertices_clip, faces_int)
129
+
130
+ if ssaa > 1:
131
+ img = F.interpolate(img.permute(0, 3, 1, 2), (resolution, resolution), mode='bilinear', align_corners=False, antialias=True)
132
+ img = img.squeeze()
133
+ else:
134
+ img = img.permute(0, 3, 1, 2).squeeze()
135
+ out_dict[type] = img
136
+
137
+ return out_dict
Amodal3R/renderers/sh_utils.py CHANGED
@@ -1,3 +1,118 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d1ff267546390635e6d1f68c4f88c4a8b052482d5c9be1d32f06bc69e9a96e7
3
- size 4371
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The PlenOctree Authors.
2
+ # Redistribution and use in source and binary forms, with or without
3
+ # modification, are permitted provided that the following conditions are met:
4
+ #
5
+ # 1. Redistributions of source code must retain the above copyright notice,
6
+ # this list of conditions and the following disclaimer.
7
+ #
8
+ # 2. Redistributions in binary form must reproduce the above copyright notice,
9
+ # this list of conditions and the following disclaimer in the documentation
10
+ # and/or other materials provided with the distribution.
11
+ #
12
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
13
+ # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
14
+ # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
16
+ # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17
+ # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18
+ # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20
+ # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21
+ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22
+ # POSSIBILITY OF SUCH DAMAGE.
23
+
24
+ import torch
25
+
26
+ C0 = 0.28209479177387814
27
+ C1 = 0.4886025119029199
28
+ C2 = [
29
+ 1.0925484305920792,
30
+ -1.0925484305920792,
31
+ 0.31539156525252005,
32
+ -1.0925484305920792,
33
+ 0.5462742152960396
34
+ ]
35
+ C3 = [
36
+ -0.5900435899266435,
37
+ 2.890611442640554,
38
+ -0.4570457994644658,
39
+ 0.3731763325901154,
40
+ -0.4570457994644658,
41
+ 1.445305721320277,
42
+ -0.5900435899266435
43
+ ]
44
+ C4 = [
45
+ 2.5033429417967046,
46
+ -1.7701307697799304,
47
+ 0.9461746957575601,
48
+ -0.6690465435572892,
49
+ 0.10578554691520431,
50
+ -0.6690465435572892,
51
+ 0.47308734787878004,
52
+ -1.7701307697799304,
53
+ 0.6258357354491761,
54
+ ]
55
+
56
+
57
+ def eval_sh(deg, sh, dirs):
58
+ """
59
+ Evaluate spherical harmonics at unit directions
60
+ using hardcoded SH polynomials.
61
+ Works with torch/np/jnp.
62
+ ... Can be 0 or more batch dimensions.
63
+ Args:
64
+ deg: int SH deg. Currently, 0-3 supported
65
+ sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2]
66
+ dirs: jnp.ndarray unit directions [..., 3]
67
+ Returns:
68
+ [..., C]
69
+ """
70
+ assert deg <= 4 and deg >= 0
71
+ coeff = (deg + 1) ** 2
72
+ assert sh.shape[-1] >= coeff
73
+
74
+ result = C0 * sh[..., 0]
75
+ if deg > 0:
76
+ x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3]
77
+ result = (result -
78
+ C1 * y * sh[..., 1] +
79
+ C1 * z * sh[..., 2] -
80
+ C1 * x * sh[..., 3])
81
+
82
+ if deg > 1:
83
+ xx, yy, zz = x * x, y * y, z * z
84
+ xy, yz, xz = x * y, y * z, x * z
85
+ result = (result +
86
+ C2[0] * xy * sh[..., 4] +
87
+ C2[1] * yz * sh[..., 5] +
88
+ C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] +
89
+ C2[3] * xz * sh[..., 7] +
90
+ C2[4] * (xx - yy) * sh[..., 8])
91
+
92
+ if deg > 2:
93
+ result = (result +
94
+ C3[0] * y * (3 * xx - yy) * sh[..., 9] +
95
+ C3[1] * xy * z * sh[..., 10] +
96
+ C3[2] * y * (4 * zz - xx - yy)* sh[..., 11] +
97
+ C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] +
98
+ C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] +
99
+ C3[5] * z * (xx - yy) * sh[..., 14] +
100
+ C3[6] * x * (xx - 3 * yy) * sh[..., 15])
101
+
102
+ if deg > 3:
103
+ result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] +
104
+ C4[1] * yz * (3 * xx - yy) * sh[..., 17] +
105
+ C4[2] * xy * (7 * zz - 1) * sh[..., 18] +
106
+ C4[3] * yz * (7 * zz - 3) * sh[..., 19] +
107
+ C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] +
108
+ C4[5] * xz * (7 * zz - 3) * sh[..., 21] +
109
+ C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] +
110
+ C4[7] * xz * (xx - 3 * yy) * sh[..., 23] +
111
+ C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24])
112
+ return result
113
+
114
+ def RGB2SH(rgb):
115
+ return (rgb - 0.5) / C0
116
+
117
+ def SH2RGB(sh):
118
+ return sh * C0 + 0.5