Spaces:
Paused
Paused
Upload 5 files
Browse files- uno/flux/math.py +45 -0
- uno/flux/model.py +222 -0
- uno/flux/pipeline.py +322 -0
- uno/flux/sampling.py +252 -0
- uno/flux/util.py +411 -0
uno/flux/math.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved.
|
| 2 |
+
# Copyright (c) 2024 Black Forest Labs and The XLabs-AI Team. All rights reserved.
|
| 3 |
+
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
from einops import rearrange
|
| 18 |
+
from torch import Tensor
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor) -> Tensor:
|
| 22 |
+
q, k = apply_rope(q, k, pe)
|
| 23 |
+
|
| 24 |
+
x = torch.nn.functional.scaled_dot_product_attention(q, k, v)
|
| 25 |
+
x = rearrange(x, "B H L D -> B L (H D)")
|
| 26 |
+
|
| 27 |
+
return x
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
|
| 31 |
+
assert dim % 2 == 0
|
| 32 |
+
scale = torch.arange(0, dim, 2, dtype=torch.float64, device=pos.device) / dim
|
| 33 |
+
omega = 1.0 / (theta**scale)
|
| 34 |
+
out = torch.einsum("...n,d->...nd", pos, omega)
|
| 35 |
+
out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1)
|
| 36 |
+
out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2)
|
| 37 |
+
return out.float()
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor) -> tuple[Tensor, Tensor]:
|
| 41 |
+
xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2)
|
| 42 |
+
xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2)
|
| 43 |
+
xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1]
|
| 44 |
+
xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1]
|
| 45 |
+
return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk)
|
uno/flux/model.py
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved.
|
| 2 |
+
# Copyright (c) 2024 Black Forest Labs and The XLabs-AI Team. All rights reserved.
|
| 3 |
+
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
from dataclasses import dataclass
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from torch import Tensor, nn
|
| 20 |
+
|
| 21 |
+
from .modules.layers import DoubleStreamBlock, EmbedND, LastLayer, MLPEmbedder, SingleStreamBlock, timestep_embedding
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@dataclass
|
| 25 |
+
class FluxParams:
|
| 26 |
+
in_channels: int
|
| 27 |
+
vec_in_dim: int
|
| 28 |
+
context_in_dim: int
|
| 29 |
+
hidden_size: int
|
| 30 |
+
mlp_ratio: float
|
| 31 |
+
num_heads: int
|
| 32 |
+
depth: int
|
| 33 |
+
depth_single_blocks: int
|
| 34 |
+
axes_dim: list[int]
|
| 35 |
+
theta: int
|
| 36 |
+
qkv_bias: bool
|
| 37 |
+
guidance_embed: bool
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class Flux(nn.Module):
|
| 41 |
+
"""
|
| 42 |
+
Transformer model for flow matching on sequences.
|
| 43 |
+
"""
|
| 44 |
+
_supports_gradient_checkpointing = True
|
| 45 |
+
|
| 46 |
+
def __init__(self, params: FluxParams):
|
| 47 |
+
super().__init__()
|
| 48 |
+
|
| 49 |
+
self.params = params
|
| 50 |
+
self.in_channels = params.in_channels
|
| 51 |
+
self.out_channels = self.in_channels
|
| 52 |
+
if params.hidden_size % params.num_heads != 0:
|
| 53 |
+
raise ValueError(
|
| 54 |
+
f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}"
|
| 55 |
+
)
|
| 56 |
+
pe_dim = params.hidden_size // params.num_heads
|
| 57 |
+
if sum(params.axes_dim) != pe_dim:
|
| 58 |
+
raise ValueError(f"Got {params.axes_dim} but expected positional dim {pe_dim}")
|
| 59 |
+
self.hidden_size = params.hidden_size
|
| 60 |
+
self.num_heads = params.num_heads
|
| 61 |
+
self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim)
|
| 62 |
+
self.img_in = nn.Linear(self.in_channels, self.hidden_size, bias=True)
|
| 63 |
+
self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size)
|
| 64 |
+
self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size)
|
| 65 |
+
self.guidance_in = (
|
| 66 |
+
MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) if params.guidance_embed else nn.Identity()
|
| 67 |
+
)
|
| 68 |
+
self.txt_in = nn.Linear(params.context_in_dim, self.hidden_size)
|
| 69 |
+
|
| 70 |
+
self.double_blocks = nn.ModuleList(
|
| 71 |
+
[
|
| 72 |
+
DoubleStreamBlock(
|
| 73 |
+
self.hidden_size,
|
| 74 |
+
self.num_heads,
|
| 75 |
+
mlp_ratio=params.mlp_ratio,
|
| 76 |
+
qkv_bias=params.qkv_bias,
|
| 77 |
+
)
|
| 78 |
+
for _ in range(params.depth)
|
| 79 |
+
]
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
self.single_blocks = nn.ModuleList(
|
| 83 |
+
[
|
| 84 |
+
SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio)
|
| 85 |
+
for _ in range(params.depth_single_blocks)
|
| 86 |
+
]
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
self.final_layer = LastLayer(self.hidden_size, 1, self.out_channels)
|
| 90 |
+
self.gradient_checkpointing = False
|
| 91 |
+
|
| 92 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
| 93 |
+
if hasattr(module, "gradient_checkpointing"):
|
| 94 |
+
module.gradient_checkpointing = value
|
| 95 |
+
|
| 96 |
+
@property
|
| 97 |
+
def attn_processors(self):
|
| 98 |
+
# set recursively
|
| 99 |
+
processors = {} # type: dict[str, nn.Module]
|
| 100 |
+
|
| 101 |
+
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors):
|
| 102 |
+
if hasattr(module, "set_processor"):
|
| 103 |
+
processors[f"{name}.processor"] = module.processor
|
| 104 |
+
|
| 105 |
+
for sub_name, child in module.named_children():
|
| 106 |
+
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
|
| 107 |
+
|
| 108 |
+
return processors
|
| 109 |
+
|
| 110 |
+
for name, module in self.named_children():
|
| 111 |
+
fn_recursive_add_processors(name, module, processors)
|
| 112 |
+
|
| 113 |
+
return processors
|
| 114 |
+
|
| 115 |
+
def set_attn_processor(self, processor):
|
| 116 |
+
r"""
|
| 117 |
+
Sets the attention processor to use to compute attention.
|
| 118 |
+
|
| 119 |
+
Parameters:
|
| 120 |
+
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
|
| 121 |
+
The instantiated processor class or a dictionary of processor classes that will be set as the processor
|
| 122 |
+
for **all** `Attention` layers.
|
| 123 |
+
|
| 124 |
+
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
|
| 125 |
+
processor. This is strongly recommended when setting trainable attention processors.
|
| 126 |
+
|
| 127 |
+
"""
|
| 128 |
+
count = len(self.attn_processors.keys())
|
| 129 |
+
|
| 130 |
+
if isinstance(processor, dict) and len(processor) != count:
|
| 131 |
+
raise ValueError(
|
| 132 |
+
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
|
| 133 |
+
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
|
| 137 |
+
if hasattr(module, "set_processor"):
|
| 138 |
+
if not isinstance(processor, dict):
|
| 139 |
+
module.set_processor(processor)
|
| 140 |
+
else:
|
| 141 |
+
module.set_processor(processor.pop(f"{name}.processor"))
|
| 142 |
+
|
| 143 |
+
for sub_name, child in module.named_children():
|
| 144 |
+
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
|
| 145 |
+
|
| 146 |
+
for name, module in self.named_children():
|
| 147 |
+
fn_recursive_attn_processor(name, module, processor)
|
| 148 |
+
|
| 149 |
+
def forward(
|
| 150 |
+
self,
|
| 151 |
+
img: Tensor,
|
| 152 |
+
img_ids: Tensor,
|
| 153 |
+
txt: Tensor,
|
| 154 |
+
txt_ids: Tensor,
|
| 155 |
+
timesteps: Tensor,
|
| 156 |
+
y: Tensor,
|
| 157 |
+
guidance: Tensor | None = None,
|
| 158 |
+
ref_img: Tensor | None = None,
|
| 159 |
+
ref_img_ids: Tensor | None = None,
|
| 160 |
+
) -> Tensor:
|
| 161 |
+
if img.ndim != 3 or txt.ndim != 3:
|
| 162 |
+
raise ValueError("Input img and txt tensors must have 3 dimensions.")
|
| 163 |
+
|
| 164 |
+
# running on sequences img
|
| 165 |
+
img = self.img_in(img)
|
| 166 |
+
vec = self.time_in(timestep_embedding(timesteps, 256))
|
| 167 |
+
if self.params.guidance_embed:
|
| 168 |
+
if guidance is None:
|
| 169 |
+
raise ValueError("Didn't get guidance strength for guidance distilled model.")
|
| 170 |
+
vec = vec + self.guidance_in(timestep_embedding(guidance, 256))
|
| 171 |
+
vec = vec + self.vector_in(y)
|
| 172 |
+
txt = self.txt_in(txt)
|
| 173 |
+
|
| 174 |
+
ids = torch.cat((txt_ids, img_ids), dim=1)
|
| 175 |
+
|
| 176 |
+
# concat ref_img/img
|
| 177 |
+
img_end = img.shape[1]
|
| 178 |
+
if ref_img is not None:
|
| 179 |
+
if isinstance(ref_img, tuple) or isinstance(ref_img, list):
|
| 180 |
+
img_in = [img] + [self.img_in(ref) for ref in ref_img]
|
| 181 |
+
img_ids = [ids] + [ref_ids for ref_ids in ref_img_ids]
|
| 182 |
+
img = torch.cat(img_in, dim=1)
|
| 183 |
+
ids = torch.cat(img_ids, dim=1)
|
| 184 |
+
else:
|
| 185 |
+
img = torch.cat((img, self.img_in(ref_img)), dim=1)
|
| 186 |
+
ids = torch.cat((ids, ref_img_ids), dim=1)
|
| 187 |
+
pe = self.pe_embedder(ids)
|
| 188 |
+
|
| 189 |
+
for index_block, block in enumerate(self.double_blocks):
|
| 190 |
+
if self.training and self.gradient_checkpointing:
|
| 191 |
+
img, txt = torch.utils.checkpoint.checkpoint(
|
| 192 |
+
block,
|
| 193 |
+
img=img,
|
| 194 |
+
txt=txt,
|
| 195 |
+
vec=vec,
|
| 196 |
+
pe=pe,
|
| 197 |
+
use_reentrant=False,
|
| 198 |
+
)
|
| 199 |
+
else:
|
| 200 |
+
img, txt = block(
|
| 201 |
+
img=img,
|
| 202 |
+
txt=txt,
|
| 203 |
+
vec=vec,
|
| 204 |
+
pe=pe
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
img = torch.cat((txt, img), 1)
|
| 208 |
+
for block in self.single_blocks:
|
| 209 |
+
if self.training and self.gradient_checkpointing:
|
| 210 |
+
img = torch.utils.checkpoint.checkpoint(
|
| 211 |
+
block,
|
| 212 |
+
img, vec=vec, pe=pe,
|
| 213 |
+
use_reentrant=False
|
| 214 |
+
)
|
| 215 |
+
else:
|
| 216 |
+
img = block(img, vec=vec, pe=pe)
|
| 217 |
+
img = img[:, txt.shape[1] :, ...]
|
| 218 |
+
# index img
|
| 219 |
+
img = img[:, :img_end, ...]
|
| 220 |
+
|
| 221 |
+
img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels)
|
| 222 |
+
return img
|
uno/flux/pipeline.py
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved.
|
| 2 |
+
# Copyright (c) 2024 Black Forest Labs and The XLabs-AI Team. All rights reserved.
|
| 3 |
+
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import os
|
| 17 |
+
from typing import Literal
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
from einops import rearrange
|
| 21 |
+
from PIL import ExifTags, Image
|
| 22 |
+
import torchvision.transforms.functional as TVF
|
| 23 |
+
|
| 24 |
+
from uno.flux.modules.layers import (
|
| 25 |
+
DoubleStreamBlockLoraProcessor,
|
| 26 |
+
DoubleStreamBlockProcessor,
|
| 27 |
+
SingleStreamBlockLoraProcessor,
|
| 28 |
+
SingleStreamBlockProcessor,
|
| 29 |
+
)
|
| 30 |
+
from uno.flux.sampling import denoise, get_noise, get_schedule, prepare_multi_ip, unpack
|
| 31 |
+
from uno.flux.util import (
|
| 32 |
+
get_lora_rank,
|
| 33 |
+
load_ae,
|
| 34 |
+
load_checkpoint,
|
| 35 |
+
load_clip,
|
| 36 |
+
load_flow_model,
|
| 37 |
+
load_flow_model_only_lora,
|
| 38 |
+
load_flow_model_quintized,
|
| 39 |
+
load_t5,
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def find_nearest_scale(image_h, image_w, predefined_scales):
|
| 44 |
+
"""
|
| 45 |
+
根据图片的高度和宽度,找到最近的预定义尺度。
|
| 46 |
+
|
| 47 |
+
:param image_h: 图片的高度
|
| 48 |
+
:param image_w: 图片的宽度
|
| 49 |
+
:param predefined_scales: 预定义尺度列表 [(h1, w1), (h2, w2), ...]
|
| 50 |
+
:return: 最近的预定义尺度 (h, w)
|
| 51 |
+
"""
|
| 52 |
+
# 计算输入图片的长宽比
|
| 53 |
+
image_ratio = image_h / image_w
|
| 54 |
+
|
| 55 |
+
# 初始化变量以存储最小差异和最近的尺度
|
| 56 |
+
min_diff = float('inf')
|
| 57 |
+
nearest_scale = None
|
| 58 |
+
|
| 59 |
+
# 遍历所有预定义尺度,找到与输入图片长宽比最接近的尺度
|
| 60 |
+
for scale_h, scale_w in predefined_scales:
|
| 61 |
+
predefined_ratio = scale_h / scale_w
|
| 62 |
+
diff = abs(predefined_ratio - image_ratio)
|
| 63 |
+
|
| 64 |
+
if diff < min_diff:
|
| 65 |
+
min_diff = diff
|
| 66 |
+
nearest_scale = (scale_h, scale_w)
|
| 67 |
+
|
| 68 |
+
return nearest_scale
|
| 69 |
+
|
| 70 |
+
def preprocess_ref(raw_image: Image.Image, long_size: int = 512):
|
| 71 |
+
# 获取原始图像的宽度和高度
|
| 72 |
+
image_w, image_h = raw_image.size
|
| 73 |
+
|
| 74 |
+
# 计算长边和短边
|
| 75 |
+
if image_w >= image_h:
|
| 76 |
+
new_w = long_size
|
| 77 |
+
new_h = int((long_size / image_w) * image_h)
|
| 78 |
+
else:
|
| 79 |
+
new_h = long_size
|
| 80 |
+
new_w = int((long_size / image_h) * image_w)
|
| 81 |
+
|
| 82 |
+
# 按新的宽高进行等比例缩放
|
| 83 |
+
raw_image = raw_image.resize((new_w, new_h), resample=Image.LANCZOS)
|
| 84 |
+
target_w = new_w // 16 * 16
|
| 85 |
+
target_h = new_h // 16 * 16
|
| 86 |
+
|
| 87 |
+
# 计算裁剪的起始坐标以实现中心裁剪
|
| 88 |
+
left = (new_w - target_w) // 2
|
| 89 |
+
top = (new_h - target_h) // 2
|
| 90 |
+
right = left + target_w
|
| 91 |
+
bottom = top + target_h
|
| 92 |
+
|
| 93 |
+
# 进行中心裁剪
|
| 94 |
+
raw_image = raw_image.crop((left, top, right, bottom))
|
| 95 |
+
|
| 96 |
+
# 转换为 RGB 模式
|
| 97 |
+
raw_image = raw_image.convert("RGB")
|
| 98 |
+
return raw_image
|
| 99 |
+
|
| 100 |
+
class UNOPipeline:
|
| 101 |
+
def __init__(
|
| 102 |
+
self,
|
| 103 |
+
model_type: str,
|
| 104 |
+
device: torch.device,
|
| 105 |
+
offload: bool = False,
|
| 106 |
+
only_lora: bool = False,
|
| 107 |
+
lora_rank: int = 16
|
| 108 |
+
):
|
| 109 |
+
self.device = device
|
| 110 |
+
self.offload = offload
|
| 111 |
+
self.model_type = model_type
|
| 112 |
+
|
| 113 |
+
self.clip = load_clip(self.device)
|
| 114 |
+
self.t5 = load_t5(self.device, max_length=512)
|
| 115 |
+
self.ae = load_ae(model_type, device="cpu" if offload else self.device)
|
| 116 |
+
self.use_fp8 = "fp8" in model_type
|
| 117 |
+
if only_lora:
|
| 118 |
+
self.model = load_flow_model_only_lora(
|
| 119 |
+
model_type,
|
| 120 |
+
device="cpu" if offload else self.device,
|
| 121 |
+
lora_rank=lora_rank,
|
| 122 |
+
use_fp8=self.use_fp8
|
| 123 |
+
)
|
| 124 |
+
else:
|
| 125 |
+
self.model = load_flow_model(model_type, device="cpu" if offload else self.device)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def load_ckpt(self, ckpt_path):
|
| 129 |
+
if ckpt_path is not None:
|
| 130 |
+
from safetensors.torch import load_file as load_sft
|
| 131 |
+
print("Loading checkpoint to replace old keys")
|
| 132 |
+
# load_sft doesn't support torch.device
|
| 133 |
+
if ckpt_path.endswith('safetensors'):
|
| 134 |
+
sd = load_sft(ckpt_path, device='cpu')
|
| 135 |
+
missing, unexpected = self.model.load_state_dict(sd, strict=False, assign=True)
|
| 136 |
+
else:
|
| 137 |
+
dit_state = torch.load(ckpt_path, map_location='cpu')
|
| 138 |
+
sd = {}
|
| 139 |
+
for k in dit_state.keys():
|
| 140 |
+
sd[k.replace('module.','')] = dit_state[k]
|
| 141 |
+
missing, unexpected = self.model.load_state_dict(sd, strict=False, assign=True)
|
| 142 |
+
self.model.to(str(self.device))
|
| 143 |
+
print(f"missing keys: {missing}\n\n\n\n\nunexpected keys: {unexpected}")
|
| 144 |
+
|
| 145 |
+
def set_lora(self, local_path: str = None, repo_id: str = None,
|
| 146 |
+
name: str = None, lora_weight: int = 0.7):
|
| 147 |
+
checkpoint = load_checkpoint(local_path, repo_id, name)
|
| 148 |
+
self.update_model_with_lora(checkpoint, lora_weight)
|
| 149 |
+
|
| 150 |
+
def set_lora_from_collection(self, lora_type: str = "realism", lora_weight: int = 0.7):
|
| 151 |
+
checkpoint = load_checkpoint(
|
| 152 |
+
None, self.hf_lora_collection, self.lora_types_to_names[lora_type]
|
| 153 |
+
)
|
| 154 |
+
self.update_model_with_lora(checkpoint, lora_weight)
|
| 155 |
+
|
| 156 |
+
def update_model_with_lora(self, checkpoint, lora_weight):
|
| 157 |
+
rank = get_lora_rank(checkpoint)
|
| 158 |
+
lora_attn_procs = {}
|
| 159 |
+
|
| 160 |
+
for name, _ in self.model.attn_processors.items():
|
| 161 |
+
lora_state_dict = {}
|
| 162 |
+
for k in checkpoint.keys():
|
| 163 |
+
if name in k:
|
| 164 |
+
lora_state_dict[k[len(name) + 1:]] = checkpoint[k] * lora_weight
|
| 165 |
+
|
| 166 |
+
if len(lora_state_dict):
|
| 167 |
+
if name.startswith("single_blocks"):
|
| 168 |
+
lora_attn_procs[name] = SingleStreamBlockLoraProcessor(dim=3072, rank=rank)
|
| 169 |
+
else:
|
| 170 |
+
lora_attn_procs[name] = DoubleStreamBlockLoraProcessor(dim=3072, rank=rank)
|
| 171 |
+
lora_attn_procs[name].load_state_dict(lora_state_dict)
|
| 172 |
+
lora_attn_procs[name].to(self.device)
|
| 173 |
+
else:
|
| 174 |
+
if name.startswith("single_blocks"):
|
| 175 |
+
lora_attn_procs[name] = SingleStreamBlockProcessor()
|
| 176 |
+
else:
|
| 177 |
+
lora_attn_procs[name] = DoubleStreamBlockProcessor()
|
| 178 |
+
|
| 179 |
+
self.model.set_attn_processor(lora_attn_procs)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def __call__(
|
| 183 |
+
self,
|
| 184 |
+
prompt: str,
|
| 185 |
+
width: int = 512,
|
| 186 |
+
height: int = 512,
|
| 187 |
+
guidance: float = 4,
|
| 188 |
+
num_steps: int = 50,
|
| 189 |
+
seed: int = 123456789,
|
| 190 |
+
**kwargs
|
| 191 |
+
):
|
| 192 |
+
width = 16 * (width // 16)
|
| 193 |
+
height = 16 * (height // 16)
|
| 194 |
+
|
| 195 |
+
device_type = self.device if isinstance(self.device, str) else self.device.type
|
| 196 |
+
with torch.autocast(enabled=self.use_fp8, device_type=device_type, dtype=torch.bfloat16):
|
| 197 |
+
return self.forward(
|
| 198 |
+
prompt,
|
| 199 |
+
width,
|
| 200 |
+
height,
|
| 201 |
+
guidance,
|
| 202 |
+
num_steps,
|
| 203 |
+
seed,
|
| 204 |
+
**kwargs
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
@torch.inference_mode()
|
| 208 |
+
def gradio_generate(
|
| 209 |
+
self,
|
| 210 |
+
prompt: str,
|
| 211 |
+
width: int,
|
| 212 |
+
height: int,
|
| 213 |
+
guidance: float,
|
| 214 |
+
num_steps: int,
|
| 215 |
+
seed: int,
|
| 216 |
+
image_prompt1: Image.Image,
|
| 217 |
+
image_prompt2: Image.Image,
|
| 218 |
+
image_prompt3: Image.Image,
|
| 219 |
+
image_prompt4: Image.Image,
|
| 220 |
+
):
|
| 221 |
+
ref_imgs = [image_prompt1, image_prompt2, image_prompt3, image_prompt4]
|
| 222 |
+
ref_imgs = [img for img in ref_imgs if isinstance(img, Image.Image)]
|
| 223 |
+
ref_long_side = 512 if len(ref_imgs) <= 1 else 320
|
| 224 |
+
ref_imgs = [preprocess_ref(img, ref_long_side) for img in ref_imgs]
|
| 225 |
+
|
| 226 |
+
# ✅ If seed is -1 (user wants random), sample a long int
|
| 227 |
+
if seed == -1 or seed is None:
|
| 228 |
+
seed = int(torch.randint(0, 2**63 - 1, (1,)).item())
|
| 229 |
+
else:
|
| 230 |
+
seed = int(seed) # make sure it's a Python int
|
| 231 |
+
|
| 232 |
+
print(f"🧪 [DEBUG] Using seed: {seed} for image generation")
|
| 233 |
+
|
| 234 |
+
img = self(prompt=prompt, width=width, height=height, guidance=guidance,
|
| 235 |
+
num_steps=num_steps, seed=seed, ref_imgs=ref_imgs)
|
| 236 |
+
|
| 237 |
+
filename = f"output/gradio/{seed}_{prompt[:20]}.png"
|
| 238 |
+
os.makedirs(os.path.dirname(filename), exist_ok=True)
|
| 239 |
+
exif_data = Image.Exif()
|
| 240 |
+
exif_data[ExifTags.Base.Make] = "UNO"
|
| 241 |
+
exif_data[ExifTags.Base.Model] = self.model_type
|
| 242 |
+
info = f"{prompt=}, {seed=}, {width=}, {height=}, {guidance=}, {num_steps=}"
|
| 243 |
+
exif_data[ExifTags.Base.ImageDescription] = info
|
| 244 |
+
img.save(filename, format="png", exif=exif_data)
|
| 245 |
+
return img, filename
|
| 246 |
+
|
| 247 |
+
@torch.inference_mode
|
| 248 |
+
def forward(
|
| 249 |
+
self,
|
| 250 |
+
prompt: str,
|
| 251 |
+
width: int,
|
| 252 |
+
height: int,
|
| 253 |
+
guidance: float,
|
| 254 |
+
num_steps: int,
|
| 255 |
+
seed: int,
|
| 256 |
+
ref_imgs: list[Image.Image] | None = None,
|
| 257 |
+
pe: Literal['d', 'h', 'w', 'o'] = 'd',
|
| 258 |
+
):
|
| 259 |
+
|
| 260 |
+
# ✅ Ensure seed is always a valid integer
|
| 261 |
+
if seed == -1 or seed is None:
|
| 262 |
+
seed = int(torch.randint(0, 2**63 - 1, (1,)).item())
|
| 263 |
+
else:
|
| 264 |
+
seed = int(seed)
|
| 265 |
+
print(f"🧪 [DEBUG] Using seed: {seed}")
|
| 266 |
+
|
| 267 |
+
x = get_noise(
|
| 268 |
+
1, height, width, device=self.device,
|
| 269 |
+
dtype=torch.bfloat16, seed=seed
|
| 270 |
+
)
|
| 271 |
+
timesteps = get_schedule(
|
| 272 |
+
num_steps,
|
| 273 |
+
(width // 8) * (height // 8) // (16 * 16),
|
| 274 |
+
shift=True,
|
| 275 |
+
)
|
| 276 |
+
if self.offload:
|
| 277 |
+
self.ae.encoder = self.ae.encoder.to(self.device)
|
| 278 |
+
x_1_refs = [
|
| 279 |
+
self.ae.encode(
|
| 280 |
+
(TVF.to_tensor(ref_img) * 2.0 - 1.0)
|
| 281 |
+
.unsqueeze(0).to(self.device, torch.float32)
|
| 282 |
+
).to(torch.bfloat16)
|
| 283 |
+
for ref_img in ref_imgs
|
| 284 |
+
]
|
| 285 |
+
|
| 286 |
+
if self.offload:
|
| 287 |
+
self.offload_model_to_cpu(self.ae.encoder)
|
| 288 |
+
self.t5, self.clip = self.t5.to(self.device), self.clip.to(self.device)
|
| 289 |
+
inp_cond = prepare_multi_ip(
|
| 290 |
+
t5=self.t5, clip=self.clip,
|
| 291 |
+
img=x,
|
| 292 |
+
prompt=prompt, ref_imgs=x_1_refs, pe=pe
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
if self.offload:
|
| 296 |
+
self.offload_model_to_cpu(self.t5, self.clip)
|
| 297 |
+
self.model = self.model.to(self.device)
|
| 298 |
+
|
| 299 |
+
x = denoise(
|
| 300 |
+
self.model,
|
| 301 |
+
**inp_cond,
|
| 302 |
+
timesteps=timesteps,
|
| 303 |
+
guidance=guidance,
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
if self.offload:
|
| 307 |
+
self.offload_model_to_cpu(self.model)
|
| 308 |
+
self.ae.decoder.to(x.device)
|
| 309 |
+
x = unpack(x.float(), height, width)
|
| 310 |
+
x = self.ae.decode(x)
|
| 311 |
+
self.offload_model_to_cpu(self.ae.decoder)
|
| 312 |
+
|
| 313 |
+
x1 = x.clamp(-1, 1)
|
| 314 |
+
x1 = rearrange(x1[-1], "c h w -> h w c")
|
| 315 |
+
output_img = Image.fromarray((127.5 * (x1 + 1.0)).cpu().byte().numpy())
|
| 316 |
+
return output_img
|
| 317 |
+
|
| 318 |
+
def offload_model_to_cpu(self, *models):
|
| 319 |
+
if not self.offload: return
|
| 320 |
+
for model in models:
|
| 321 |
+
model.cpu()
|
| 322 |
+
torch.cuda.empty_cache()
|
uno/flux/sampling.py
ADDED
|
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved.
|
| 2 |
+
# Copyright (c) 2024 Black Forest Labs and The XLabs-AI Team. All rights reserved.
|
| 3 |
+
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import math
|
| 17 |
+
from typing import Literal
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
from einops import rearrange, repeat
|
| 21 |
+
from torch import Tensor
|
| 22 |
+
from tqdm import tqdm
|
| 23 |
+
|
| 24 |
+
from .model import Flux
|
| 25 |
+
from .modules.conditioner import HFEmbedder
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def get_noise(
|
| 29 |
+
num_samples: int,
|
| 30 |
+
height: int,
|
| 31 |
+
width: int,
|
| 32 |
+
device: torch.device,
|
| 33 |
+
dtype: torch.dtype,
|
| 34 |
+
seed: int,
|
| 35 |
+
):
|
| 36 |
+
return torch.randn(
|
| 37 |
+
num_samples,
|
| 38 |
+
16,
|
| 39 |
+
# allow for packing
|
| 40 |
+
2 * math.ceil(height / 16),
|
| 41 |
+
2 * math.ceil(width / 16),
|
| 42 |
+
device=device,
|
| 43 |
+
dtype=dtype,
|
| 44 |
+
generator=torch.Generator(device=device).manual_seed(seed),
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def prepare(
|
| 49 |
+
t5: HFEmbedder,
|
| 50 |
+
clip: HFEmbedder,
|
| 51 |
+
img: Tensor,
|
| 52 |
+
prompt: str | list[str],
|
| 53 |
+
ref_img: None | Tensor=None,
|
| 54 |
+
pe: Literal['d', 'h', 'w', 'o'] ='d'
|
| 55 |
+
) -> dict[str, Tensor]:
|
| 56 |
+
assert pe in ['d', 'h', 'w', 'o']
|
| 57 |
+
bs, c, h, w = img.shape
|
| 58 |
+
if bs == 1 and not isinstance(prompt, str):
|
| 59 |
+
bs = len(prompt)
|
| 60 |
+
|
| 61 |
+
img = rearrange(img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
|
| 62 |
+
if img.shape[0] == 1 and bs > 1:
|
| 63 |
+
img = repeat(img, "1 ... -> bs ...", bs=bs)
|
| 64 |
+
|
| 65 |
+
img_ids = torch.zeros(h // 2, w // 2, 3)
|
| 66 |
+
img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2)[:, None]
|
| 67 |
+
img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2)[None, :]
|
| 68 |
+
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs)
|
| 69 |
+
|
| 70 |
+
if ref_img is not None:
|
| 71 |
+
_, _, ref_h, ref_w = ref_img.shape
|
| 72 |
+
ref_img = rearrange(ref_img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
|
| 73 |
+
if ref_img.shape[0] == 1 and bs > 1:
|
| 74 |
+
ref_img = repeat(ref_img, "1 ... -> bs ...", bs=bs)
|
| 75 |
+
ref_img_ids = torch.zeros(ref_h // 2, ref_w // 2, 3)
|
| 76 |
+
# img id分别在宽高偏移各自最大值
|
| 77 |
+
h_offset = h // 2 if pe in {'d', 'h'} else 0
|
| 78 |
+
w_offset = w // 2 if pe in {'d', 'w'} else 0
|
| 79 |
+
ref_img_ids[..., 1] = ref_img_ids[..., 1] + torch.arange(ref_h // 2)[:, None] + h_offset
|
| 80 |
+
ref_img_ids[..., 2] = ref_img_ids[..., 2] + torch.arange(ref_w // 2)[None, :] + w_offset
|
| 81 |
+
ref_img_ids = repeat(ref_img_ids, "h w c -> b (h w) c", b=bs)
|
| 82 |
+
|
| 83 |
+
if isinstance(prompt, str):
|
| 84 |
+
prompt = [prompt]
|
| 85 |
+
txt = t5(prompt)
|
| 86 |
+
if txt.shape[0] == 1 and bs > 1:
|
| 87 |
+
txt = repeat(txt, "1 ... -> bs ...", bs=bs)
|
| 88 |
+
txt_ids = torch.zeros(bs, txt.shape[1], 3)
|
| 89 |
+
|
| 90 |
+
vec = clip(prompt)
|
| 91 |
+
if vec.shape[0] == 1 and bs > 1:
|
| 92 |
+
vec = repeat(vec, "1 ... -> bs ...", bs=bs)
|
| 93 |
+
|
| 94 |
+
if ref_img is not None:
|
| 95 |
+
return {
|
| 96 |
+
"img": img,
|
| 97 |
+
"img_ids": img_ids.to(img.device),
|
| 98 |
+
"ref_img": ref_img,
|
| 99 |
+
"ref_img_ids": ref_img_ids.to(img.device),
|
| 100 |
+
"txt": txt.to(img.device),
|
| 101 |
+
"txt_ids": txt_ids.to(img.device),
|
| 102 |
+
"vec": vec.to(img.device),
|
| 103 |
+
}
|
| 104 |
+
else:
|
| 105 |
+
return {
|
| 106 |
+
"img": img,
|
| 107 |
+
"img_ids": img_ids.to(img.device),
|
| 108 |
+
"txt": txt.to(img.device),
|
| 109 |
+
"txt_ids": txt_ids.to(img.device),
|
| 110 |
+
"vec": vec.to(img.device),
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
def prepare_multi_ip(
|
| 114 |
+
t5: HFEmbedder,
|
| 115 |
+
clip: HFEmbedder,
|
| 116 |
+
img: Tensor,
|
| 117 |
+
prompt: str | list[str],
|
| 118 |
+
ref_imgs: list[Tensor] | None = None,
|
| 119 |
+
pe: Literal['d', 'h', 'w', 'o'] = 'd'
|
| 120 |
+
) -> dict[str, Tensor]:
|
| 121 |
+
assert pe in ['d', 'h', 'w', 'o']
|
| 122 |
+
bs, c, h, w = img.shape
|
| 123 |
+
if bs == 1 and not isinstance(prompt, str):
|
| 124 |
+
bs = len(prompt)
|
| 125 |
+
|
| 126 |
+
img = rearrange(img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
|
| 127 |
+
if img.shape[0] == 1 and bs > 1:
|
| 128 |
+
img = repeat(img, "1 ... -> bs ...", bs=bs)
|
| 129 |
+
|
| 130 |
+
img_ids = torch.zeros(h // 2, w // 2, 3)
|
| 131 |
+
img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2)[:, None]
|
| 132 |
+
img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2)[None, :]
|
| 133 |
+
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs)
|
| 134 |
+
|
| 135 |
+
ref_img_ids = []
|
| 136 |
+
ref_imgs_list = []
|
| 137 |
+
pe_shift_w, pe_shift_h = w // 2, h // 2
|
| 138 |
+
for ref_img in ref_imgs:
|
| 139 |
+
_, _, ref_h1, ref_w1 = ref_img.shape
|
| 140 |
+
ref_img = rearrange(ref_img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
|
| 141 |
+
if ref_img.shape[0] == 1 and bs > 1:
|
| 142 |
+
ref_img = repeat(ref_img, "1 ... -> bs ...", bs=bs)
|
| 143 |
+
ref_img_ids1 = torch.zeros(ref_h1 // 2, ref_w1 // 2, 3)
|
| 144 |
+
# img id分别���宽高偏移各自最大值
|
| 145 |
+
h_offset = pe_shift_h if pe in {'d', 'h'} else 0
|
| 146 |
+
w_offset = pe_shift_w if pe in {'d', 'w'} else 0
|
| 147 |
+
ref_img_ids1[..., 1] = ref_img_ids1[..., 1] + torch.arange(ref_h1 // 2)[:, None] + h_offset
|
| 148 |
+
ref_img_ids1[..., 2] = ref_img_ids1[..., 2] + torch.arange(ref_w1 // 2)[None, :] + w_offset
|
| 149 |
+
ref_img_ids1 = repeat(ref_img_ids1, "h w c -> b (h w) c", b=bs)
|
| 150 |
+
ref_img_ids.append(ref_img_ids1)
|
| 151 |
+
ref_imgs_list.append(ref_img)
|
| 152 |
+
|
| 153 |
+
# 更新pe shift
|
| 154 |
+
pe_shift_h += ref_h1 // 2
|
| 155 |
+
pe_shift_w += ref_w1 // 2
|
| 156 |
+
|
| 157 |
+
if isinstance(prompt, str):
|
| 158 |
+
prompt = [prompt]
|
| 159 |
+
txt = t5(prompt)
|
| 160 |
+
if txt.shape[0] == 1 and bs > 1:
|
| 161 |
+
txt = repeat(txt, "1 ... -> bs ...", bs=bs)
|
| 162 |
+
txt_ids = torch.zeros(bs, txt.shape[1], 3)
|
| 163 |
+
|
| 164 |
+
vec = clip(prompt)
|
| 165 |
+
if vec.shape[0] == 1 and bs > 1:
|
| 166 |
+
vec = repeat(vec, "1 ... -> bs ...", bs=bs)
|
| 167 |
+
|
| 168 |
+
return {
|
| 169 |
+
"img": img,
|
| 170 |
+
"img_ids": img_ids.to(img.device),
|
| 171 |
+
"ref_img": tuple(ref_imgs_list),
|
| 172 |
+
"ref_img_ids": [ref_img_id.to(img.device) for ref_img_id in ref_img_ids],
|
| 173 |
+
"txt": txt.to(img.device),
|
| 174 |
+
"txt_ids": txt_ids.to(img.device),
|
| 175 |
+
"vec": vec.to(img.device),
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def time_shift(mu: float, sigma: float, t: Tensor):
|
| 180 |
+
return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def get_lin_function(
|
| 184 |
+
x1: float = 256, y1: float = 0.5, x2: float = 4096, y2: float = 1.15
|
| 185 |
+
):
|
| 186 |
+
m = (y2 - y1) / (x2 - x1)
|
| 187 |
+
b = y1 - m * x1
|
| 188 |
+
return lambda x: m * x + b
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def get_schedule(
|
| 192 |
+
num_steps: int,
|
| 193 |
+
image_seq_len: int,
|
| 194 |
+
base_shift: float = 0.5,
|
| 195 |
+
max_shift: float = 1.15,
|
| 196 |
+
shift: bool = True,
|
| 197 |
+
) -> list[float]:
|
| 198 |
+
# extra step for zero
|
| 199 |
+
timesteps = torch.linspace(1, 0, num_steps + 1)
|
| 200 |
+
|
| 201 |
+
# shifting the schedule to favor high timesteps for higher signal images
|
| 202 |
+
if shift:
|
| 203 |
+
# eastimate mu based on linear estimation between two points
|
| 204 |
+
mu = get_lin_function(y1=base_shift, y2=max_shift)(image_seq_len)
|
| 205 |
+
timesteps = time_shift(mu, 1.0, timesteps)
|
| 206 |
+
|
| 207 |
+
return timesteps.tolist()
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def denoise(
|
| 211 |
+
model: Flux,
|
| 212 |
+
# model input
|
| 213 |
+
img: Tensor,
|
| 214 |
+
img_ids: Tensor,
|
| 215 |
+
txt: Tensor,
|
| 216 |
+
txt_ids: Tensor,
|
| 217 |
+
vec: Tensor,
|
| 218 |
+
# sampling parameters
|
| 219 |
+
timesteps: list[float],
|
| 220 |
+
guidance: float = 4.0,
|
| 221 |
+
ref_img: Tensor=None,
|
| 222 |
+
ref_img_ids: Tensor=None,
|
| 223 |
+
):
|
| 224 |
+
i = 0
|
| 225 |
+
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
|
| 226 |
+
for t_curr, t_prev in tqdm(zip(timesteps[:-1], timesteps[1:]), total=len(timesteps) - 1):
|
| 227 |
+
t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
|
| 228 |
+
pred = model(
|
| 229 |
+
img=img,
|
| 230 |
+
img_ids=img_ids,
|
| 231 |
+
ref_img=ref_img,
|
| 232 |
+
ref_img_ids=ref_img_ids,
|
| 233 |
+
txt=txt,
|
| 234 |
+
txt_ids=txt_ids,
|
| 235 |
+
y=vec,
|
| 236 |
+
timesteps=t_vec,
|
| 237 |
+
guidance=guidance_vec
|
| 238 |
+
)
|
| 239 |
+
img = img + (t_prev - t_curr) * pred
|
| 240 |
+
i += 1
|
| 241 |
+
return img
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def unpack(x: Tensor, height: int, width: int) -> Tensor:
|
| 245 |
+
return rearrange(
|
| 246 |
+
x,
|
| 247 |
+
"b (h w) (c ph pw) -> b c (h ph) (w pw)",
|
| 248 |
+
h=math.ceil(height / 16),
|
| 249 |
+
w=math.ceil(width / 16),
|
| 250 |
+
ph=2,
|
| 251 |
+
pw=2,
|
| 252 |
+
)
|
uno/flux/util.py
ADDED
|
@@ -0,0 +1,411 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved.
|
| 2 |
+
# Copyright (c) 2024 Black Forest Labs and The XLabs-AI Team. All rights reserved.
|
| 3 |
+
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import os
|
| 17 |
+
from dataclasses import dataclass
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
import json
|
| 21 |
+
import numpy as np
|
| 22 |
+
from huggingface_hub import hf_hub_download
|
| 23 |
+
from safetensors import safe_open
|
| 24 |
+
from safetensors.torch import load_file as load_sft
|
| 25 |
+
|
| 26 |
+
from .model import Flux, FluxParams
|
| 27 |
+
from .modules.autoencoder import AutoEncoder, AutoEncoderParams
|
| 28 |
+
from .modules.conditioner import HFEmbedder
|
| 29 |
+
|
| 30 |
+
import re
|
| 31 |
+
from uno.flux.modules.layers import DoubleStreamBlockLoraProcessor, SingleStreamBlockLoraProcessor
|
| 32 |
+
def load_model(ckpt, device='cpu'):
|
| 33 |
+
if ckpt.endswith('safetensors'):
|
| 34 |
+
from safetensors import safe_open
|
| 35 |
+
pl_sd = {}
|
| 36 |
+
with safe_open(ckpt, framework="pt", device=device) as f:
|
| 37 |
+
for k in f.keys():
|
| 38 |
+
pl_sd[k] = f.get_tensor(k)
|
| 39 |
+
else:
|
| 40 |
+
pl_sd = torch.load(ckpt, map_location=device)
|
| 41 |
+
return pl_sd
|
| 42 |
+
|
| 43 |
+
def load_safetensors(path):
|
| 44 |
+
tensors = {}
|
| 45 |
+
with safe_open(path, framework="pt", device="cpu") as f:
|
| 46 |
+
for key in f.keys():
|
| 47 |
+
tensors[key] = f.get_tensor(key)
|
| 48 |
+
return tensors
|
| 49 |
+
|
| 50 |
+
def get_lora_rank(checkpoint):
|
| 51 |
+
for k in checkpoint.keys():
|
| 52 |
+
if k.endswith(".down.weight"):
|
| 53 |
+
return checkpoint[k].shape[0]
|
| 54 |
+
|
| 55 |
+
def load_checkpoint(local_path, repo_id, name):
|
| 56 |
+
if local_path is not None:
|
| 57 |
+
if '.safetensors' in local_path:
|
| 58 |
+
print(f"Loading .safetensors checkpoint from {local_path}")
|
| 59 |
+
checkpoint = load_safetensors(local_path)
|
| 60 |
+
else:
|
| 61 |
+
print(f"Loading checkpoint from {local_path}")
|
| 62 |
+
checkpoint = torch.load(local_path, map_location='cpu')
|
| 63 |
+
elif repo_id is not None and name is not None:
|
| 64 |
+
print(f"Loading checkpoint {name} from repo id {repo_id}")
|
| 65 |
+
checkpoint = load_from_repo_id(repo_id, name)
|
| 66 |
+
else:
|
| 67 |
+
raise ValueError(
|
| 68 |
+
"LOADING ERROR: you must specify local_path or repo_id with name in HF to download"
|
| 69 |
+
)
|
| 70 |
+
return checkpoint
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def c_crop(image):
|
| 74 |
+
width, height = image.size
|
| 75 |
+
new_size = min(width, height)
|
| 76 |
+
left = (width - new_size) / 2
|
| 77 |
+
top = (height - new_size) / 2
|
| 78 |
+
right = (width + new_size) / 2
|
| 79 |
+
bottom = (height + new_size) / 2
|
| 80 |
+
return image.crop((left, top, right, bottom))
|
| 81 |
+
|
| 82 |
+
def pad64(x):
|
| 83 |
+
return int(np.ceil(float(x) / 64.0) * 64 - x)
|
| 84 |
+
|
| 85 |
+
def HWC3(x):
|
| 86 |
+
assert x.dtype == np.uint8
|
| 87 |
+
if x.ndim == 2:
|
| 88 |
+
x = x[:, :, None]
|
| 89 |
+
assert x.ndim == 3
|
| 90 |
+
H, W, C = x.shape
|
| 91 |
+
assert C == 1 or C == 3 or C == 4
|
| 92 |
+
if C == 3:
|
| 93 |
+
return x
|
| 94 |
+
if C == 1:
|
| 95 |
+
return np.concatenate([x, x, x], axis=2)
|
| 96 |
+
if C == 4:
|
| 97 |
+
color = x[:, :, 0:3].astype(np.float32)
|
| 98 |
+
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
|
| 99 |
+
y = color * alpha + 255.0 * (1.0 - alpha)
|
| 100 |
+
y = y.clip(0, 255).astype(np.uint8)
|
| 101 |
+
return y
|
| 102 |
+
|
| 103 |
+
@dataclass
|
| 104 |
+
class ModelSpec:
|
| 105 |
+
params: FluxParams
|
| 106 |
+
ae_params: AutoEncoderParams
|
| 107 |
+
ckpt_path: str | None
|
| 108 |
+
ae_path: str | None
|
| 109 |
+
repo_id: str | None
|
| 110 |
+
repo_flow: str | None
|
| 111 |
+
repo_ae: str | None
|
| 112 |
+
repo_id_ae: str | None
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
configs = {
|
| 116 |
+
"flux-dev": ModelSpec(
|
| 117 |
+
repo_id="black-forest-labs/FLUX.1-dev",
|
| 118 |
+
repo_id_ae="black-forest-labs/FLUX.1-dev",
|
| 119 |
+
repo_flow="flux1-dev.safetensors",
|
| 120 |
+
repo_ae="ae.safetensors",
|
| 121 |
+
ckpt_path=os.getenv("FLUX_DEV"),
|
| 122 |
+
params=FluxParams(
|
| 123 |
+
in_channels=64,
|
| 124 |
+
vec_in_dim=768,
|
| 125 |
+
context_in_dim=4096,
|
| 126 |
+
hidden_size=3072,
|
| 127 |
+
mlp_ratio=4.0,
|
| 128 |
+
num_heads=24,
|
| 129 |
+
depth=19,
|
| 130 |
+
depth_single_blocks=38,
|
| 131 |
+
axes_dim=[16, 56, 56],
|
| 132 |
+
theta=10_000,
|
| 133 |
+
qkv_bias=True,
|
| 134 |
+
guidance_embed=True,
|
| 135 |
+
),
|
| 136 |
+
ae_path=os.getenv("AE"),
|
| 137 |
+
ae_params=AutoEncoderParams(
|
| 138 |
+
resolution=256,
|
| 139 |
+
in_channels=3,
|
| 140 |
+
ch=128,
|
| 141 |
+
out_ch=3,
|
| 142 |
+
ch_mult=[1, 2, 4, 4],
|
| 143 |
+
num_res_blocks=2,
|
| 144 |
+
z_channels=16,
|
| 145 |
+
scale_factor=0.3611,
|
| 146 |
+
shift_factor=0.1159,
|
| 147 |
+
),
|
| 148 |
+
),
|
| 149 |
+
"flux-dev-fp8": ModelSpec(
|
| 150 |
+
repo_id="black-forest-labs/FLUX.1-dev",
|
| 151 |
+
repo_id_ae="black-forest-labs/FLUX.1-dev",
|
| 152 |
+
repo_flow="flux1-dev.safetensors",
|
| 153 |
+
repo_ae="ae.safetensors",
|
| 154 |
+
ckpt_path=os.getenv("FLUX_DEV_FP8"),
|
| 155 |
+
params=FluxParams(
|
| 156 |
+
in_channels=64,
|
| 157 |
+
vec_in_dim=768,
|
| 158 |
+
context_in_dim=4096,
|
| 159 |
+
hidden_size=3072,
|
| 160 |
+
mlp_ratio=4.0,
|
| 161 |
+
num_heads=24,
|
| 162 |
+
depth=19,
|
| 163 |
+
depth_single_blocks=38,
|
| 164 |
+
axes_dim=[16, 56, 56],
|
| 165 |
+
theta=10_000,
|
| 166 |
+
qkv_bias=True,
|
| 167 |
+
guidance_embed=True,
|
| 168 |
+
),
|
| 169 |
+
ae_path=os.getenv("AE"),
|
| 170 |
+
ae_params=AutoEncoderParams(
|
| 171 |
+
resolution=256,
|
| 172 |
+
in_channels=3,
|
| 173 |
+
ch=128,
|
| 174 |
+
out_ch=3,
|
| 175 |
+
ch_mult=[1, 2, 4, 4],
|
| 176 |
+
num_res_blocks=2,
|
| 177 |
+
z_channels=16,
|
| 178 |
+
scale_factor=0.3611,
|
| 179 |
+
shift_factor=0.1159,
|
| 180 |
+
),
|
| 181 |
+
),
|
| 182 |
+
"flux-schnell": ModelSpec(
|
| 183 |
+
repo_id="black-forest-labs/FLUX.1-schnell",
|
| 184 |
+
repo_id_ae="black-forest-labs/FLUX.1-dev",
|
| 185 |
+
repo_flow="flux1-schnell.safetensors",
|
| 186 |
+
repo_ae="ae.safetensors",
|
| 187 |
+
ckpt_path=os.getenv("FLUX_SCHNELL"),
|
| 188 |
+
params=FluxParams(
|
| 189 |
+
in_channels=64,
|
| 190 |
+
vec_in_dim=768,
|
| 191 |
+
context_in_dim=4096,
|
| 192 |
+
hidden_size=3072,
|
| 193 |
+
mlp_ratio=4.0,
|
| 194 |
+
num_heads=24,
|
| 195 |
+
depth=19,
|
| 196 |
+
depth_single_blocks=38,
|
| 197 |
+
axes_dim=[16, 56, 56],
|
| 198 |
+
theta=10_000,
|
| 199 |
+
qkv_bias=True,
|
| 200 |
+
guidance_embed=False,
|
| 201 |
+
),
|
| 202 |
+
ae_path=os.getenv("AE"),
|
| 203 |
+
ae_params=AutoEncoderParams(
|
| 204 |
+
resolution=256,
|
| 205 |
+
in_channels=3,
|
| 206 |
+
ch=128,
|
| 207 |
+
out_ch=3,
|
| 208 |
+
ch_mult=[1, 2, 4, 4],
|
| 209 |
+
num_res_blocks=2,
|
| 210 |
+
z_channels=16,
|
| 211 |
+
scale_factor=0.3611,
|
| 212 |
+
shift_factor=0.1159,
|
| 213 |
+
),
|
| 214 |
+
),
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def print_load_warning(missing: list[str], unexpected: list[str]) -> None:
|
| 219 |
+
if len(missing) > 0 and len(unexpected) > 0:
|
| 220 |
+
print(f"Got {len(missing)} missing keys:\n\t" + "\n\t".join(missing))
|
| 221 |
+
print("\n" + "-" * 79 + "\n")
|
| 222 |
+
print(f"Got {len(unexpected)} unexpected keys:\n\t" + "\n\t".join(unexpected))
|
| 223 |
+
elif len(missing) > 0:
|
| 224 |
+
print(f"Got {len(missing)} missing keys:\n\t" + "\n\t".join(missing))
|
| 225 |
+
elif len(unexpected) > 0:
|
| 226 |
+
print(f"Got {len(unexpected)} unexpected keys:\n\t" + "\n\t".join(unexpected))
|
| 227 |
+
|
| 228 |
+
def load_from_repo_id(repo_id, checkpoint_name):
|
| 229 |
+
ckpt_path = hf_hub_download(repo_id, checkpoint_name)
|
| 230 |
+
sd = load_sft(ckpt_path, device='cpu')
|
| 231 |
+
return sd
|
| 232 |
+
|
| 233 |
+
def load_flow_model(name: str, device: str | torch.device = "cuda", hf_download: bool = True):
|
| 234 |
+
# Loading Flux
|
| 235 |
+
print("Init model")
|
| 236 |
+
ckpt_path = configs[name].ckpt_path
|
| 237 |
+
if (
|
| 238 |
+
ckpt_path is None
|
| 239 |
+
and configs[name].repo_id is not None
|
| 240 |
+
and configs[name].repo_flow is not None
|
| 241 |
+
and hf_download
|
| 242 |
+
):
|
| 243 |
+
ckpt_path = hf_hub_download(configs[name].repo_id, configs[name].repo_flow)
|
| 244 |
+
|
| 245 |
+
with torch.device("meta" if ckpt_path is not None else device):
|
| 246 |
+
model = Flux(configs[name].params).to(torch.bfloat16)
|
| 247 |
+
|
| 248 |
+
if ckpt_path is not None:
|
| 249 |
+
print("Loading checkpoint")
|
| 250 |
+
# load_sft doesn't support torch.device
|
| 251 |
+
sd = load_model(ckpt_path, device=str(device))
|
| 252 |
+
missing, unexpected = model.load_state_dict(sd, strict=False, assign=True)
|
| 253 |
+
print_load_warning(missing, unexpected)
|
| 254 |
+
return model
|
| 255 |
+
|
| 256 |
+
def load_flow_model_only_lora(
|
| 257 |
+
name: str,
|
| 258 |
+
device: str | torch.device = "cuda",
|
| 259 |
+
hf_download: bool = True,
|
| 260 |
+
lora_rank: int = 16,
|
| 261 |
+
use_fp8: bool = False
|
| 262 |
+
):
|
| 263 |
+
# Loading Flux
|
| 264 |
+
print("Init model")
|
| 265 |
+
ckpt_path = configs[name].ckpt_path
|
| 266 |
+
if (
|
| 267 |
+
ckpt_path is None
|
| 268 |
+
and configs[name].repo_id is not None
|
| 269 |
+
and configs[name].repo_flow is not None
|
| 270 |
+
and hf_download
|
| 271 |
+
):
|
| 272 |
+
ckpt_path = hf_hub_download(configs[name].repo_id, configs[name].repo_flow.replace("sft", "safetensors"))
|
| 273 |
+
|
| 274 |
+
if hf_download:
|
| 275 |
+
try:
|
| 276 |
+
lora_ckpt_path = hf_hub_download("bytedance-research/UNO", "dit_lora.safetensors")
|
| 277 |
+
except:
|
| 278 |
+
lora_ckpt_path = os.environ.get("LORA", None)
|
| 279 |
+
else:
|
| 280 |
+
lora_ckpt_path = os.environ.get("LORA", None)
|
| 281 |
+
|
| 282 |
+
with torch.device("meta" if ckpt_path is not None else device):
|
| 283 |
+
model = Flux(configs[name].params)
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
model = set_lora(model, lora_rank, device="meta" if lora_ckpt_path is not None else device)
|
| 287 |
+
|
| 288 |
+
if ckpt_path is not None:
|
| 289 |
+
print("Loading lora")
|
| 290 |
+
lora_sd = load_sft(lora_ckpt_path, device=str(device)) if lora_ckpt_path.endswith("safetensors")\
|
| 291 |
+
else torch.load(lora_ckpt_path, map_location='cpu')
|
| 292 |
+
|
| 293 |
+
print("Loading main checkpoint")
|
| 294 |
+
# load_sft doesn't support torch.device
|
| 295 |
+
|
| 296 |
+
if ckpt_path.endswith('safetensors'):
|
| 297 |
+
if use_fp8:
|
| 298 |
+
print(
|
| 299 |
+
"####\n"
|
| 300 |
+
"We are in fp8 mode right now, since the fp8 checkpoint of XLabs-AI/flux-dev-fp8 seems broken\n"
|
| 301 |
+
"we convert the fp8 checkpoint on flight from bf16 checkpoint\n"
|
| 302 |
+
"If your storage is constrained"
|
| 303 |
+
"you can save the fp8 checkpoint and replace the bf16 checkpoint by yourself\n"
|
| 304 |
+
)
|
| 305 |
+
sd = load_sft(ckpt_path, device="cpu")
|
| 306 |
+
sd = {k: v.to(dtype=torch.float8_e4m3fn, device=device) for k, v in sd.items()}
|
| 307 |
+
else:
|
| 308 |
+
sd = load_sft(ckpt_path, device=str(device))
|
| 309 |
+
|
| 310 |
+
sd.update(lora_sd)
|
| 311 |
+
missing, unexpected = model.load_state_dict(sd, strict=False, assign=True)
|
| 312 |
+
else:
|
| 313 |
+
dit_state = torch.load(ckpt_path, map_location='cpu')
|
| 314 |
+
sd = {}
|
| 315 |
+
for k in dit_state.keys():
|
| 316 |
+
sd[k.replace('module.','')] = dit_state[k]
|
| 317 |
+
sd.update(lora_sd)
|
| 318 |
+
missing, unexpected = model.load_state_dict(sd, strict=False, assign=True)
|
| 319 |
+
model.to(str(device))
|
| 320 |
+
print_load_warning(missing, unexpected)
|
| 321 |
+
return model
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def set_lora(
|
| 325 |
+
model: Flux,
|
| 326 |
+
lora_rank: int,
|
| 327 |
+
double_blocks_indices: list[int] | None = None,
|
| 328 |
+
single_blocks_indices: list[int] | None = None,
|
| 329 |
+
device: str | torch.device = "cpu",
|
| 330 |
+
) -> Flux:
|
| 331 |
+
double_blocks_indices = list(range(model.params.depth)) if double_blocks_indices is None else double_blocks_indices
|
| 332 |
+
single_blocks_indices = list(range(model.params.depth_single_blocks)) if single_blocks_indices is None \
|
| 333 |
+
else single_blocks_indices
|
| 334 |
+
|
| 335 |
+
lora_attn_procs = {}
|
| 336 |
+
with torch.device(device):
|
| 337 |
+
for name, attn_processor in model.attn_processors.items():
|
| 338 |
+
match = re.search(r'\.(\d+)\.', name)
|
| 339 |
+
if match:
|
| 340 |
+
layer_index = int(match.group(1))
|
| 341 |
+
|
| 342 |
+
if name.startswith("double_blocks") and layer_index in double_blocks_indices:
|
| 343 |
+
lora_attn_procs[name] = DoubleStreamBlockLoraProcessor(dim=model.params.hidden_size, rank=lora_rank)
|
| 344 |
+
elif name.startswith("single_blocks") and layer_index in single_blocks_indices:
|
| 345 |
+
lora_attn_procs[name] = SingleStreamBlockLoraProcessor(dim=model.params.hidden_size, rank=lora_rank)
|
| 346 |
+
else:
|
| 347 |
+
lora_attn_procs[name] = attn_processor
|
| 348 |
+
model.set_attn_processor(lora_attn_procs)
|
| 349 |
+
return model
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
def load_flow_model_quintized(name: str, device: str | torch.device = "cuda", hf_download: bool = True):
|
| 353 |
+
# Loading Flux
|
| 354 |
+
from optimum.quanto import requantize
|
| 355 |
+
print("Init model")
|
| 356 |
+
ckpt_path = configs[name].ckpt_path
|
| 357 |
+
if (
|
| 358 |
+
ckpt_path is None
|
| 359 |
+
and configs[name].repo_id is not None
|
| 360 |
+
and configs[name].repo_flow is not None
|
| 361 |
+
and hf_download
|
| 362 |
+
):
|
| 363 |
+
ckpt_path = hf_hub_download(configs[name].repo_id, configs[name].repo_flow)
|
| 364 |
+
# json_path = hf_hub_download(configs[name].repo_id, 'flux_dev_quantization_map.json')
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
model = Flux(configs[name].params).to(torch.bfloat16)
|
| 368 |
+
|
| 369 |
+
print("Loading checkpoint")
|
| 370 |
+
# load_sft doesn't support torch.device
|
| 371 |
+
sd = load_sft(ckpt_path, device='cpu')
|
| 372 |
+
sd = {k: v.to(dtype=torch.float8_e4m3fn, device=device) for k, v in sd.items()}
|
| 373 |
+
model.load_state_dict(sd, assign=True)
|
| 374 |
+
return model
|
| 375 |
+
with open(json_path, "r") as f:
|
| 376 |
+
quantization_map = json.load(f)
|
| 377 |
+
print("Start a quantization process...")
|
| 378 |
+
requantize(model, sd, quantization_map, device=device)
|
| 379 |
+
print("Model is quantized!")
|
| 380 |
+
return model
|
| 381 |
+
|
| 382 |
+
def load_t5(device: str | torch.device = "cuda", max_length: int = 512) -> HFEmbedder:
|
| 383 |
+
# max length 64, 128, 256 and 512 should work (if your sequence is short enough)
|
| 384 |
+
version = os.environ.get("T5", "xlabs-ai/xflux_text_encoders")
|
| 385 |
+
return HFEmbedder(version, max_length=max_length, torch_dtype=torch.bfloat16).to(device)
|
| 386 |
+
|
| 387 |
+
def load_clip(device: str | torch.device = "cuda") -> HFEmbedder:
|
| 388 |
+
version = os.environ.get("CLIP", "openai/clip-vit-large-patch14")
|
| 389 |
+
return HFEmbedder(version, max_length=77, torch_dtype=torch.bfloat16).to(device)
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
def load_ae(name: str, device: str | torch.device = "cuda", hf_download: bool = True) -> AutoEncoder:
|
| 393 |
+
ckpt_path = configs[name].ae_path
|
| 394 |
+
if (
|
| 395 |
+
ckpt_path is None
|
| 396 |
+
and configs[name].repo_id is not None
|
| 397 |
+
and configs[name].repo_ae is not None
|
| 398 |
+
and hf_download
|
| 399 |
+
):
|
| 400 |
+
ckpt_path = hf_hub_download(configs[name].repo_id_ae, configs[name].repo_ae)
|
| 401 |
+
|
| 402 |
+
# Loading the autoencoder
|
| 403 |
+
print("Init AE")
|
| 404 |
+
with torch.device("meta" if ckpt_path is not None else device):
|
| 405 |
+
ae = AutoEncoder(configs[name].ae_params)
|
| 406 |
+
|
| 407 |
+
if ckpt_path is not None:
|
| 408 |
+
sd = load_sft(ckpt_path, device=str(device))
|
| 409 |
+
missing, unexpected = ae.load_state_dict(sd, strict=False, assign=True)
|
| 410 |
+
print_load_warning(missing, unexpected)
|
| 411 |
+
return ae
|