Spaces:
Running
on
Zero
Running
on
Zero
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +19 -0
- OSS/OSS.py +357 -0
- OSS/__init__.py +0 -0
- OSS/model_wrap.py +124 -0
- OSS/utils.py +22 -0
- app.py +306 -0
- app_os.py +90 -0
- generate-pi-i2v-myinfer-oss-stu.py +452 -0
- generate-pi-i2v-myinfer-oss-tea.py +453 -0
- generate-pi-i2v.py +418 -0
- get-med.py +25 -0
- note-webui.txt +10 -0
- preprocess/extract-clip.py +57 -0
- preprocess/extract-t5.py +46 -0
- preprocess/extract-vae1.py +62 -0
- preprocess/extract-vae_all.py +64 -0
- pyproject.toml +41 -0
- req-fastvideo.txt +59 -0
- requirements.txt +15 -0
- scripts/distill/distill_cog.sh +40 -0
- scripts/distill/distill_cog720-49.sh +40 -0
- scripts/distill/distill_cog720-49mix246adv.sh +40 -0
- scripts/distill/distill_cog720-49mix26.sh +42 -0
- scripts/distill/distill_cog720-49mix26b.sh +42 -0
- scripts/distill/distill_hunyuan.sh +40 -0
- scripts/distill/distill_mochi.sh +38 -0
- scripts/finetune/finetune_wan.sh +39 -0
- scripts/huggingface/download_hf.py +39 -0
- scripts/huggingface/upload_hf.py +9 -0
- scripts/inference/inference_diffusers_hunyuan.sh +20 -0
- scripts/inference/inference_hunyuan.sh +19 -0
- scripts/inference/inference_mochi_sp.sh +19 -0
- scripts/preprocess/preprocess_cog_data.sh +35 -0
- scripts/preprocess/preprocess_hunyuan_data.sh +33 -0
- scripts/preprocess/preprocess_mochi_data.sh +33 -0
- wan/__init__.py +3 -0
- wan/configs/__init__.py +44 -0
- wan/configs/shared_config.py +19 -0
- wan/configs/wan_i2v_14B.py +35 -0
- wan/configs/wan_t2v_14B.py +29 -0
- wan/configs/wan_t2v_1_3B.py +29 -0
- wan/distributed/__init__.py +0 -0
- wan/distributed/fsdp.py +32 -0
- wan/distributed/xdit_context_parallel.py +192 -0
- wan/image2video.py +345 -0
- wan/image2video_if_oss.py +380 -0
- wan/image2video_mdinfer_oss_stu.py +454 -0
- wan/image2video_mdinfer_oss_tea.py +513 -0
- wan/modules/__init__.py +16 -0
- wan/modules/attention.py +208 -0
.gitignore
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*output*
|
2 |
+
*__pycache__*
|
3 |
+
*samples*
|
4 |
+
*runs*
|
5 |
+
*checkpoints*
|
6 |
+
master_ip
|
7 |
+
*logs*
|
8 |
+
*.DS_Store
|
9 |
+
.idea
|
10 |
+
.ipynb_checkpoints
|
11 |
+
*ckpts*
|
12 |
+
*kernel_meta*
|
13 |
+
*egg*
|
14 |
+
*wandb*
|
15 |
+
output_root*
|
16 |
+
fastvideo
|
17 |
+
outputs*
|
18 |
+
wandb*
|
19 |
+
*test_data*
|
OSS/OSS.py
ADDED
@@ -0,0 +1,357 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import numpy as np
|
3 |
+
import logging
|
4 |
+
|
5 |
+
|
6 |
+
from .utils import _broadcast_tensor
|
7 |
+
|
8 |
+
def cal_medium(oss_steps_all):
|
9 |
+
ave_steps = []
|
10 |
+
for k in range(len(oss_steps_all[0])):
|
11 |
+
l = []
|
12 |
+
for i in range(len(oss_steps_all)):
|
13 |
+
l.append(oss_steps_all[i][k])
|
14 |
+
l.sort()
|
15 |
+
ave_steps.append((l[len(l)//2] + l[len(l)//2 - 1])//2)
|
16 |
+
return ave_steps
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
@torch.no_grad()
|
22 |
+
def infer_OSS(oss_steps, model, z, class_emb, device, renorm_flag=False, max_amp=None, min_amp=None, float32=True, model_kwargs=None):
|
23 |
+
# z [B,C.H,W]
|
24 |
+
|
25 |
+
N = len(oss_steps)
|
26 |
+
B = z.shape[0]
|
27 |
+
oss_steps = [0] + oss_steps
|
28 |
+
ori_z = z.clone()
|
29 |
+
|
30 |
+
# First is zero
|
31 |
+
timesteps = model.fm_steps
|
32 |
+
|
33 |
+
if model_kwargs is None:
|
34 |
+
model_kwargs = {}
|
35 |
+
|
36 |
+
for i in reversed(range(1,N+1)):
|
37 |
+
logging.info(f"steps {i}")
|
38 |
+
t = torch.ones((B,), device=device, dtype=torch.long) * oss_steps[i]
|
39 |
+
|
40 |
+
if renorm_flag:
|
41 |
+
|
42 |
+
max_s = torch.quantile(z.reshape((z.shape[0], -1)), 0.95, dim=1)
|
43 |
+
min_s = torch.quantile(z.reshape((z.shape[0], -1)), 0.05, dim=1)
|
44 |
+
|
45 |
+
|
46 |
+
max_amp_tmp = max_amp[oss_steps[i]-1]
|
47 |
+
min_amp_tmp = min_amp[oss_steps[i]-1]
|
48 |
+
|
49 |
+
ak = (max_amp_tmp - min_amp_tmp)/(max_s - min_s)
|
50 |
+
ab = min_amp_tmp - ak*min_s
|
51 |
+
|
52 |
+
z = _broadcast_tensor(ak, z.shape) *z + _broadcast_tensor(ab, z.shape)
|
53 |
+
|
54 |
+
vt = model(z, t, class_emb, model_kwargs)
|
55 |
+
if float32:
|
56 |
+
z = z.to(torch.float32)
|
57 |
+
z = z + (timesteps[oss_steps[i-1]] - timesteps[oss_steps[i]]) * vt
|
58 |
+
if float32:
|
59 |
+
z = z.to(ori_z.dtype)
|
60 |
+
|
61 |
+
return z
|
62 |
+
|
63 |
+
@torch.no_grad()
|
64 |
+
def search_OSS_video(model, z, batch_size, class_emb, device, teacher_steps=200, student_steps=5, norm=2, model_kwargs=None, frame_type="6", channel_type="4", random_channel=False, float32=True):
|
65 |
+
# z [B,C.H,W]
|
66 |
+
# model_kwargs doesn't contain class_embedding, which is another seperate input
|
67 |
+
# the class_embedding is the same for all the searching samples here.
|
68 |
+
|
69 |
+
B = batch_size
|
70 |
+
N = teacher_steps
|
71 |
+
STEP = student_steps
|
72 |
+
assert z.shape[0] == B
|
73 |
+
channel_size = z.shape[1]
|
74 |
+
frame_size = z.shape[2]
|
75 |
+
|
76 |
+
|
77 |
+
# First is zero
|
78 |
+
timesteps = model.fm_steps
|
79 |
+
|
80 |
+
|
81 |
+
if model_kwargs is None:
|
82 |
+
model_kwargs = {}
|
83 |
+
|
84 |
+
# Compute the teacher trajectory
|
85 |
+
traj_tea = torch.stack([torch.ones_like(z)]*(N+1), dim = 0)
|
86 |
+
traj_tea[N] = z
|
87 |
+
z_tea = z.clone()
|
88 |
+
|
89 |
+
for i in reversed(range(1,N+1)):
|
90 |
+
print("teachering,%s"%i)
|
91 |
+
|
92 |
+
t = torch.ones((B,), device=device, dtype=torch.long) * i
|
93 |
+
vt = model(z_tea, t, class_emb, model_kwargs)
|
94 |
+
if float32:
|
95 |
+
z_tea = z_tea.to(torch.float32)
|
96 |
+
z_tea = z_tea + vt * (timesteps[i-1] - timesteps[i])
|
97 |
+
if float32:
|
98 |
+
z_tea = z_tea.to(z.dtype)
|
99 |
+
|
100 |
+
|
101 |
+
traj_tea[i-1] = z_tea.clone()
|
102 |
+
|
103 |
+
|
104 |
+
# solving dynamic programming
|
105 |
+
all_steps = []
|
106 |
+
|
107 |
+
|
108 |
+
for i_batch in range(B): # process each image separately
|
109 |
+
z_cur = z[i_batch].clone().unsqueeze(0)
|
110 |
+
if class_emb is not None:
|
111 |
+
class_emb_cur = class_emb[i_batch].clone().unsqueeze(0)
|
112 |
+
else:
|
113 |
+
class_emb_cur = None
|
114 |
+
|
115 |
+
tracestep = [torch.ones(N+1, device=device, dtype=torch.long)*N for _ in range(STEP+1)]
|
116 |
+
dp = torch.ones((STEP+1,N+1), device=device)*torch.inf
|
117 |
+
z_prev = torch.cat([z_cur]*(N+1),dim=0)
|
118 |
+
z_next = z_prev.clone()
|
119 |
+
|
120 |
+
for k in range(STEP):
|
121 |
+
print("studenting,%s" % k)
|
122 |
+
logging.info(f"Doing k step solving {k}")
|
123 |
+
|
124 |
+
if random_channel and channel_type != "all":
|
125 |
+
channel_select = np.random.choice(range(channel_size), size=int(channel_type), replace=False)
|
126 |
+
|
127 |
+
for i in reversed(range(1,N+1)):
|
128 |
+
z_i = z_prev[i].unsqueeze(0)
|
129 |
+
t = torch.ones((1,), device=device, dtype=torch.long) * i
|
130 |
+
vt = model(z_i, t, class_emb_cur, model_kwargs)
|
131 |
+
|
132 |
+
for j in reversed(range(0,i)):
|
133 |
+
if float32:
|
134 |
+
z_i = z_i.to(torch.float32)
|
135 |
+
z_nxt = z_i + vt * (timesteps[j] - timesteps[i])
|
136 |
+
if float32:
|
137 |
+
z_nxt = z_nxt.to(z.dtype)
|
138 |
+
|
139 |
+
|
140 |
+
if random_channel:
|
141 |
+
pass
|
142 |
+
elif channel_type == "all":
|
143 |
+
channel_select = list(range(channel_size))
|
144 |
+
else:
|
145 |
+
channel_select = list(range(int(channel_type)))
|
146 |
+
|
147 |
+
if frame_type == "all":
|
148 |
+
frame_select = list(range(frame_size))
|
149 |
+
else:
|
150 |
+
frame_select = torch.linspace(0,frame_size-1,int(frame_type),dtype=torch.long).tolist()
|
151 |
+
|
152 |
+
|
153 |
+
|
154 |
+
channel_select_tensor = torch.tensor(channel_select, dtype=torch.long, device=device)
|
155 |
+
frame_select_tensor = torch.tensor(frame_select, dtype=torch.long, device=device)
|
156 |
+
|
157 |
+
z_nxt_select = z_nxt[0, channel_select_tensor,...]
|
158 |
+
traj_tea_select = traj_tea[j, i_batch, channel_select_tensor, ...]
|
159 |
+
|
160 |
+
z_nxt_select = z_nxt_select[:,frame_select_tensor,... ]
|
161 |
+
traj_tea_select = traj_tea_select[:,frame_select_tensor,... ]
|
162 |
+
|
163 |
+
|
164 |
+
cost = (torch.abs(z_nxt_select - traj_tea_select))**norm
|
165 |
+
cost = cost.mean()
|
166 |
+
|
167 |
+
if cost < dp[k][j]:
|
168 |
+
dp[k][j] = cost
|
169 |
+
tracestep[k][j] = i
|
170 |
+
z_next[j] = z_nxt
|
171 |
+
|
172 |
+
dp[k+1] = dp[k].clone()
|
173 |
+
tracestep[k+1] = tracestep[k].clone()
|
174 |
+
z_prev = z_next.clone()
|
175 |
+
|
176 |
+
|
177 |
+
logging.info(f"finish {k} steps")
|
178 |
+
|
179 |
+
cur_step = [0]
|
180 |
+
for kk in reversed(range(k+1)):
|
181 |
+
j = cur_step[-1]
|
182 |
+
cur_step.append(int(tracestep[kk][j].item()))
|
183 |
+
|
184 |
+
logging.info(cur_step)
|
185 |
+
|
186 |
+
|
187 |
+
# trace back
|
188 |
+
final_step = [0]
|
189 |
+
for k in reversed(range(STEP)):
|
190 |
+
j = final_step[-1]
|
191 |
+
final_step.append(int(tracestep[k][j].item()))
|
192 |
+
logging.info(final_step)
|
193 |
+
all_steps.append(final_step[1:])
|
194 |
+
|
195 |
+
return all_steps[0]
|
196 |
+
|
197 |
+
|
198 |
+
|
199 |
+
@torch.no_grad()
|
200 |
+
def search_OSS(model, z, batch_size, class_emb, device, teacher_steps=200, student_steps=5, model_kwargs=None):
|
201 |
+
# z [B,C.H,W]
|
202 |
+
# model_kwargs doesn't contain class_embedding, which is another seperate input
|
203 |
+
# the class_embedding is the same for all the searching samples here.
|
204 |
+
|
205 |
+
B = batch_size
|
206 |
+
N = teacher_steps
|
207 |
+
STEP = student_steps
|
208 |
+
assert z.shape[0] == B
|
209 |
+
|
210 |
+
# First is zero
|
211 |
+
timesteps = model.fm_steps
|
212 |
+
|
213 |
+
|
214 |
+
if model_kwargs is None:
|
215 |
+
model_kwargs = {}
|
216 |
+
|
217 |
+
# Compute the teacher trajectory
|
218 |
+
traj_tea = torch.stack([torch.ones_like(z)]*(N+1), dim = 0)
|
219 |
+
traj_tea[N] = z
|
220 |
+
z_tea = z.clone()
|
221 |
+
|
222 |
+
for i in reversed(range(1,N+1)):
|
223 |
+
|
224 |
+
t = torch.ones((B,), device=device, dtype=torch.long) * i
|
225 |
+
vt = model(z_tea, t, class_emb, model_kwargs)
|
226 |
+
z_tea = z_tea + vt * (timesteps[i-1] - timesteps[i])
|
227 |
+
traj_tea[i-1] = z_tea.clone()
|
228 |
+
|
229 |
+
# solving dynamic programming
|
230 |
+
all_steps = []
|
231 |
+
|
232 |
+
for i_batch in range(B): # process each image separately
|
233 |
+
z_cur = z[i_batch].clone().unsqueeze(0)
|
234 |
+
if class_emb is not None:
|
235 |
+
class_emb_cur = class_emb[i_batch].clone().unsqueeze(0)
|
236 |
+
else:
|
237 |
+
class_emb_cur = None
|
238 |
+
tracestep = [torch.ones(N+1, device=device, dtype=torch.long)*N for _ in range(STEP+1)]
|
239 |
+
dp = torch.ones((STEP+1,N+1), device=device)*torch.inf
|
240 |
+
z_prev = torch.cat([z_cur]*(N+1),dim=0)
|
241 |
+
z_next = z_prev.clone()
|
242 |
+
|
243 |
+
|
244 |
+
for k in range(STEP):
|
245 |
+
logging.info(f"Doing k step solving {k}")
|
246 |
+
for i in reversed(range(1,N+1)):
|
247 |
+
z_i = z_prev[i].unsqueeze(0)
|
248 |
+
t = torch.ones((1,), device=device, dtype=torch.long) * i
|
249 |
+
vt = model(z_i, t, class_emb_cur, model_kwargs)
|
250 |
+
|
251 |
+
for j in reversed(range(0,i)):
|
252 |
+
z_j = z_i + vt * (timesteps[j] - timesteps[i])
|
253 |
+
cost = (z_j - traj_tea[j, i_batch])**2
|
254 |
+
cost = cost.mean()
|
255 |
+
|
256 |
+
if cost < dp[k][j]:
|
257 |
+
dp[k][j] = cost
|
258 |
+
tracestep[k][j] = i
|
259 |
+
z_next[j] = z_j
|
260 |
+
|
261 |
+
dp[k+1] = dp[k].clone()
|
262 |
+
tracestep[k+1] = tracestep[k].clone()
|
263 |
+
z_prev = z_next.clone()
|
264 |
+
|
265 |
+
# trace back
|
266 |
+
final_step = [0]
|
267 |
+
for k in reversed(range(STEP)):
|
268 |
+
j = final_step[-1]
|
269 |
+
final_step.append(int(tracestep[k][j].item()))
|
270 |
+
logging.info(final_step)
|
271 |
+
all_steps.append(final_step[1:])
|
272 |
+
|
273 |
+
return all_steps
|
274 |
+
|
275 |
+
|
276 |
+
|
277 |
+
|
278 |
+
@torch.no_grad()
|
279 |
+
def search_OSS_batch(model, z, batch_size, class_emb, device, teacher_steps=200, student_steps=5, model_kwargs=None):
|
280 |
+
# z [B,C.H,W]
|
281 |
+
# model_kwargs doesn't contain class_embedding, which is another seperate input
|
282 |
+
# the class_embedding is the same for all the searching samples here.
|
283 |
+
|
284 |
+
B = batch_size
|
285 |
+
N = teacher_steps
|
286 |
+
STEP = student_steps
|
287 |
+
|
288 |
+
|
289 |
+
# First is zero
|
290 |
+
timesteps = model.fm_steps
|
291 |
+
|
292 |
+
if model_kwargs is None:
|
293 |
+
model_kwargs = {}
|
294 |
+
|
295 |
+
# Compute the teacher trajectory
|
296 |
+
traj_tea = torch.stack([torch.ones_like(z)]*(N+1), dim = 0)
|
297 |
+
traj_tea[N] = z
|
298 |
+
z_tea = z.clone()
|
299 |
+
|
300 |
+
for i in reversed(range(1,N+1)):
|
301 |
+
t = torch.ones((B,), device=device, dtype=torch.long) * i
|
302 |
+
vt = model(z_tea, t, class_emb, model_kwargs)
|
303 |
+
z_tea = z_tea + (timesteps[i-1] - timesteps[i]) * vt
|
304 |
+
traj_tea[i-1] = z_tea.clone()
|
305 |
+
|
306 |
+
|
307 |
+
|
308 |
+
times = torch.linspace(0,N,N+1, device=device).long()
|
309 |
+
t_in = torch.linspace(1, N, N, device=device).long()
|
310 |
+
# solving dynamic programming
|
311 |
+
all_steps = []
|
312 |
+
|
313 |
+
for i_batch in range(B): # process each image separately
|
314 |
+
z_cur = z[i_batch].clone().unsqueeze(0)
|
315 |
+
if class_emb is not None:
|
316 |
+
class_emb_cur = class_emb[i_batch].clone().unsqueeze(0).expand(N)
|
317 |
+
else:
|
318 |
+
class_emb_cur = None
|
319 |
+
tracestep = [torch.ones(N+1, device=device, dtype=torch.long)*N for _ in range(STEP+1)]
|
320 |
+
dp = torch.ones((STEP+1,N+1), device=device)*torch.inf
|
321 |
+
z_prev = torch.cat([z_cur]*(N+1),dim=0)
|
322 |
+
z_next = z_prev.clone()
|
323 |
+
|
324 |
+
|
325 |
+
for k in range(STEP):
|
326 |
+
logging.info(f"Doing k step solving {k}")
|
327 |
+
vt = model(z_prev[1:], t_in, class_emb_cur, model_kwargs)
|
328 |
+
|
329 |
+
for i in reversed(range(1,N+1)):
|
330 |
+
t = torch.ones((i,), device=device, dtype=torch.long) * i
|
331 |
+
z_i_batch = torch.stack([z_prev[i]]*i ,dim=0)
|
332 |
+
dt = timesteps[times[:i]] - timesteps[t]
|
333 |
+
z_j_batch = z_i_batch[:i] + _broadcast_tensor(dt, z_i_batch.shape) * vt[i-1].unsqueeze(0)
|
334 |
+
|
335 |
+
cost = (z_j_batch - traj_tea[:i,i_batch,...])**2
|
336 |
+
cost = cost.mean(dim = tuple(range(1, len(cost.shape))))
|
337 |
+
mask = cost < dp[k, :i]
|
338 |
+
|
339 |
+
dp[k, :i] = torch.where(mask, cost, dp[k, :i])
|
340 |
+
tracestep[k][:i] = torch.where(mask, i, tracestep[k][:i])
|
341 |
+
expanded_mask = _broadcast_tensor(mask,z_i_batch.shape)
|
342 |
+
z_next[:i] = torch.where(expanded_mask, z_j_batch, z_next[:i])
|
343 |
+
|
344 |
+
|
345 |
+
dp[k+1] = dp[k].clone()
|
346 |
+
tracestep[k+1] = tracestep[k].clone()
|
347 |
+
z_prev = z_next.clone()
|
348 |
+
|
349 |
+
# trace back
|
350 |
+
final_step = [0]
|
351 |
+
for k in reversed(range(STEP)):
|
352 |
+
j = final_step[-1]
|
353 |
+
final_step.append(int(tracestep[k][j].item()))
|
354 |
+
logging.info(final_step)
|
355 |
+
all_steps.append(final_step[1:])
|
356 |
+
|
357 |
+
return all_steps
|
OSS/__init__.py
ADDED
File without changes
|
OSS/model_wrap.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pdb
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
from .utils import _broadcast_tensor, _extract_into_tensor
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
class _WrappedModel_DiT:
|
10 |
+
def __init__(self, model, diffusion, device=None, class_emb_null=None):
|
11 |
+
self.model = model
|
12 |
+
self.diffusion = diffusion
|
13 |
+
self._predict_xstart_from_eps = diffusion._predict_xstart_from_eps
|
14 |
+
|
15 |
+
self.diffusion_t_map = list(diffusion.use_timesteps)
|
16 |
+
self.diffusion_t_map.sort()
|
17 |
+
|
18 |
+
self.diffusion_t = [self.diffusion_t_map[i] for i in range(diffusion.num_timesteps)] # list(range(diffusion.num_timesteps))
|
19 |
+
self.diffusion_t = np.array(self.diffusion_t)
|
20 |
+
|
21 |
+
self.diffusion_sqrt_alpha_cumprod = np.array([diffusion.sqrt_alphas_cumprod[i] for i in range(diffusion.num_timesteps)])
|
22 |
+
self.fm_steps = [(1 - self.diffusion_sqrt_alpha_cumprod[i]**2)**0.5/(self.diffusion_sqrt_alpha_cumprod[i] + (1 - self.diffusion_sqrt_alpha_cumprod[i]**2)**0.5) for i in range(len(self.diffusion_t))]
|
23 |
+
|
24 |
+
self.fm_steps = torch.tensor([0] + self.fm_steps, device=device)
|
25 |
+
self.y_null = class_emb_null
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
def __call__(self, x, t, y, kwargs):
|
32 |
+
|
33 |
+
N = len(self.diffusion_t)
|
34 |
+
B,C,H,W = x.shape
|
35 |
+
diffusion_x = torch.zeros_like(x)
|
36 |
+
diffusion_t = _extract_into_tensor(self.diffusion_t, t-1, t.shape).long()
|
37 |
+
|
38 |
+
|
39 |
+
t_fm = self.fm_steps[t]
|
40 |
+
diffusion_x_tmp = _extract_into_tensor(self.diffusion.sqrt_alphas_cumprod, t-1, x.shape) * x / ( 1 + 1e-4 - _broadcast_tensor(t_fm,x.shape))
|
41 |
+
diffusion_x_tmp = diffusion_x_tmp.to(torch.float)
|
42 |
+
diffusion_x = torch.where(_broadcast_tensor(t,x.shape) == N, x, diffusion_x_tmp)
|
43 |
+
|
44 |
+
|
45 |
+
y_null_batch = torch.cat([self.y_null[0].unsqueeze(0)]*B, dim=0)
|
46 |
+
y_new = torch.cat([y, y_null_batch], 0)
|
47 |
+
|
48 |
+
|
49 |
+
model_output = self.model(torch.cat([diffusion_x,diffusion_x],dim=0), torch.cat([diffusion_t,diffusion_t],dim=0), y_new, **kwargs)
|
50 |
+
model_output = model_output[:B]
|
51 |
+
model_output, _ = torch.split(model_output, C, dim=1)
|
52 |
+
x0_diffusion = self._predict_xstart_from_eps(x_t=diffusion_x, t=t-1, eps=model_output)
|
53 |
+
vt = (x - x0_diffusion) / (_broadcast_tensor(t_fm,x.shape))
|
54 |
+
vt = vt.to(diffusion_x.dtype)
|
55 |
+
return vt
|
56 |
+
|
57 |
+
|
58 |
+
|
59 |
+
class _WrappedModel_Sora:
|
60 |
+
def __init__(self, model, guidance_scale, y_null, timesteps, num_timesteps, mask_t):
|
61 |
+
self.model = model
|
62 |
+
self.guidance_scale = guidance_scale
|
63 |
+
self.y_null = y_null
|
64 |
+
|
65 |
+
self.timesteps = [torch.tensor([0], device=model.device)] + timesteps[::-1]
|
66 |
+
self.timesteps = torch.cat(self.timesteps, dim=0)
|
67 |
+
self.fm_steps = [x/num_timesteps for x in self.timesteps]
|
68 |
+
self.mask_t = mask_t
|
69 |
+
|
70 |
+
def __call__(self, x, t, y, kwargs):
|
71 |
+
y = torch.cat([y, self.y_null], dim=0)
|
72 |
+
|
73 |
+
t_in = self.timesteps[t]
|
74 |
+
|
75 |
+
x_in = torch.cat([x,x], dim=0)
|
76 |
+
# breakpoint()
|
77 |
+
mask_t_upper = self.mask_t >= t_in.unsqueeze(1)
|
78 |
+
kwargs["x_mask"] = mask_t_upper.repeat(2, 1)
|
79 |
+
|
80 |
+
t_in = torch.cat([t_in,t_in], dim=0)
|
81 |
+
with torch.no_grad():
|
82 |
+
pred = self.model(x_in, t_in, y, **kwargs).chunk(2, dim=1)[0]
|
83 |
+
# breakpoint()
|
84 |
+
pred_cond, pred_uncond = pred.chunk(2, dim=0)
|
85 |
+
v_pred = pred_uncond + self.guidance_scale * (pred_cond - pred_uncond)
|
86 |
+
|
87 |
+
return -v_pred
|
88 |
+
|
89 |
+
|
90 |
+
|
91 |
+
class _WrappedModel_Wan:
|
92 |
+
def __init__(self, model, timesteps, num_timesteps, context_null, guide_scale):
|
93 |
+
self.model = model
|
94 |
+
self.context_null = context_null
|
95 |
+
self.guide_scale = guide_scale
|
96 |
+
fm_steps = torch.cat([timesteps,torch.zeros_like(timesteps[0]).view(1)])
|
97 |
+
self.time_steps = torch.flip(fm_steps, dims=[0])
|
98 |
+
self.fm_steps = self.time_steps/num_timesteps
|
99 |
+
|
100 |
+
|
101 |
+
def __call__(self, x, t, y, kwargs):
|
102 |
+
self.time_steps = self.time_steps.to(t.device)
|
103 |
+
t = self.time_steps[t]
|
104 |
+
noise_pred_cond = self.model(x, t=t, context=y, **kwargs)[0]
|
105 |
+
noise_pred_uncond = self.model(x, t=t, context=self.context_null, **kwargs)[0]
|
106 |
+
noise_pred = noise_pred_uncond + self.guide_scale * (noise_pred_cond - noise_pred_uncond)
|
107 |
+
return noise_pred
|
108 |
+
|
109 |
+
|
110 |
+
|
111 |
+
|
112 |
+
class _WrappedModel_FLUX:
|
113 |
+
def __init__(self, model, timesteps, num_timesteps):
|
114 |
+
self.model = model
|
115 |
+
fm_steps = torch.cat([timesteps,torch.zeros_like(timesteps[0]).view(1)])
|
116 |
+
self.time_steps = torch.flip(fm_steps, dims=[0])
|
117 |
+
self.fm_steps = self.time_steps/num_timesteps
|
118 |
+
|
119 |
+
|
120 |
+
def __call__(self, x, t, y, kwargs):
|
121 |
+
t = self.time_steps[t]
|
122 |
+
t = t.expand(x.shape[0]).to(x.dtype) / 1000
|
123 |
+
pred = self.model(hidden_states=x, timestep=t, **kwargs)[0]
|
124 |
+
return pred
|
OSS/utils.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import torch
|
3 |
+
|
4 |
+
|
5 |
+
def _broadcast_tensor(a, broadcast_shape):
|
6 |
+
while len(a.shape) < len(broadcast_shape):
|
7 |
+
a = a[..., None]
|
8 |
+
return a.expand(broadcast_shape)
|
9 |
+
|
10 |
+
def _extract_into_tensor(arr, timesteps, broadcast_shape):
|
11 |
+
"""
|
12 |
+
Extract values from a 1-D numpy array for a batch of indices.
|
13 |
+
:param arr: the 1-D numpy array.
|
14 |
+
:param timesteps: a tensor of indices into the array to extract.
|
15 |
+
:param broadcast_shape: a larger shape of K dimensions with the batch
|
16 |
+
dimension equal to the length of timesteps.
|
17 |
+
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
|
18 |
+
"""
|
19 |
+
res = torch.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
|
20 |
+
while len(res.shape) < len(broadcast_shape):
|
21 |
+
res = res[..., None]
|
22 |
+
return res + torch.zeros(broadcast_shape, device=timesteps.device)
|
app.py
ADDED
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
'''
|
3 |
+
可以自选帧数、
|
4 |
+
49 3s
|
5 |
+
57 3.5s
|
6 |
+
65 4s
|
7 |
+
73 4.5s
|
8 |
+
81 5s
|
9 |
+
加速模式
|
10 |
+
40/tea40/10(暂时不支持)
|
11 |
+
|
12 |
+
image2video_teacache1是支持是否传tea参并且能传秒数的版本
|
13 |
+
'''
|
14 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved
|
15 |
+
import logging,os
|
16 |
+
os.makedirs("/root/weights",exist_ok=True)
|
17 |
+
cmd="huggingface-cli download IndexTeam/AnisoraV3 --include=\"14B/*\" --local-dir=/root/weights --token %s"%os.environ['token']
|
18 |
+
os.system(cmd)
|
19 |
+
|
20 |
+
# os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
|
21 |
+
from time import time as ttime
|
22 |
+
import argparse
|
23 |
+
from datetime import datetime
|
24 |
+
import logging
|
25 |
+
import sys
|
26 |
+
import warnings
|
27 |
+
from fastapi import FastAPI
|
28 |
+
import uvicorn
|
29 |
+
import gradio as gr
|
30 |
+
warnings.filterwarnings('ignore')
|
31 |
+
|
32 |
+
import torch, random
|
33 |
+
import torch.distributed as dist
|
34 |
+
from PIL import Image
|
35 |
+
|
36 |
+
import wan
|
37 |
+
from wan.image2video_if_oss import WanI2V
|
38 |
+
from wan.configs import WAN_CONFIGS, SIZE_CONFIGS, MAX_AREA_CONFIGS, SUPPORTED_SIZES
|
39 |
+
from wan.utils.prompt_extend import DashScopePromptExpander, QwenPromptExpander
|
40 |
+
from wan.utils.utils import cache_video, cache_image, str2bool
|
41 |
+
|
42 |
+
value2speed={
|
43 |
+
"原版":0,
|
44 |
+
"加速版":1,
|
45 |
+
}
|
46 |
+
EXAMPLE_PROMPT = {
|
47 |
+
"t2v-1.3B": {
|
48 |
+
"prompt": "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage.",
|
49 |
+
},
|
50 |
+
"t2v-14B": {
|
51 |
+
"prompt": "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage.",
|
52 |
+
},
|
53 |
+
"t2i-14B": {
|
54 |
+
"prompt": "一个朴素端庄的美人",
|
55 |
+
},
|
56 |
+
"i2v-14B": {
|
57 |
+
"prompt":
|
58 |
+
"Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside.",
|
59 |
+
"image":
|
60 |
+
"examples/i2v_input.JPG",
|
61 |
+
},
|
62 |
+
}
|
63 |
+
|
64 |
+
|
65 |
+
def _validate_args(args):
|
66 |
+
# Basic check
|
67 |
+
assert args.ckpt_dir is not None, "Please specify the checkpoint directory."
|
68 |
+
assert args.task in WAN_CONFIGS, f"Unsupport task: {args.task}"
|
69 |
+
assert args.task in EXAMPLE_PROMPT, f"Unsupport task: {args.task}"
|
70 |
+
|
71 |
+
# The default sampling steps are 40 for image-to-video tasks and 50 for text-to-video tasks.
|
72 |
+
if args.sample_steps is None:
|
73 |
+
args.sample_steps = 40 if "i2v" in args.task else 50
|
74 |
+
|
75 |
+
if args.sample_shift is None:
|
76 |
+
args.sample_shift = 5.0
|
77 |
+
if "i2v" in args.task and args.size in ["832*480", "480*832"]:
|
78 |
+
args.sample_shift = 3.0
|
79 |
+
|
80 |
+
# The default number of frames are 1 for text-to-image tasks and 81 for other tasks.
|
81 |
+
if args.frame_num is None:
|
82 |
+
args.frame_num = 1 if "t2i" in args.task else 81
|
83 |
+
|
84 |
+
# T2I frame_num check
|
85 |
+
if "t2i" in args.task:
|
86 |
+
assert args.frame_num == 1, f"Unsupport frame_num {args.frame_num} for task {args.task}"
|
87 |
+
|
88 |
+
args.base_seed = args.base_seed if args.base_seed >= 0 else random.randint(
|
89 |
+
0, sys.maxsize)
|
90 |
+
# Size check
|
91 |
+
assert args.size in SUPPORTED_SIZES[
|
92 |
+
args.
|
93 |
+
task], f"Unsupport size {args.size} for task {args.task}, supported sizes are: {', '.join(SUPPORTED_SIZES[args.task])}"
|
94 |
+
|
95 |
+
|
96 |
+
def _parse_args():
|
97 |
+
parser = argparse.ArgumentParser(
|
98 |
+
description="Generate a image or video from a text prompt or image using Wan"
|
99 |
+
)
|
100 |
+
parser.add_argument(
|
101 |
+
"--task",
|
102 |
+
type=str,
|
103 |
+
default="t2v-14B",
|
104 |
+
choices=list(WAN_CONFIGS.keys()),
|
105 |
+
help="The task to run.")
|
106 |
+
parser.add_argument(
|
107 |
+
"--size",
|
108 |
+
type=str,
|
109 |
+
default="1280*720",
|
110 |
+
choices=list(SIZE_CONFIGS.keys()),
|
111 |
+
help="The area (width*height) of the generated video. For the I2V task, the aspect ratio of the output video will follow that of the input image."
|
112 |
+
)
|
113 |
+
parser.add_argument(
|
114 |
+
"--frame_num",
|
115 |
+
type=int,
|
116 |
+
default=None,
|
117 |
+
help="How many frames to sample from a image or video. The number should be 4n+1"
|
118 |
+
)
|
119 |
+
parser.add_argument(
|
120 |
+
"--ckpt_dir",
|
121 |
+
type=str,
|
122 |
+
default=None,
|
123 |
+
help="The path to the checkpoint directory.")
|
124 |
+
parser.add_argument(
|
125 |
+
"--offload_model",
|
126 |
+
type=str2bool,
|
127 |
+
default=None,
|
128 |
+
help="Whether to offload the model to CPU after each model forward, reducing GPU memory usage."
|
129 |
+
)
|
130 |
+
parser.add_argument(
|
131 |
+
"--ulysses_size",
|
132 |
+
type=int,
|
133 |
+
default=1,
|
134 |
+
help="The size of the ulysses parallelism in DiT.")
|
135 |
+
parser.add_argument(
|
136 |
+
"--ring_size",
|
137 |
+
type=int,
|
138 |
+
default=1,
|
139 |
+
help="The size of the ring attention parallelism in DiT.")
|
140 |
+
parser.add_argument(
|
141 |
+
"--t5_fsdp",
|
142 |
+
action="store_true",
|
143 |
+
default=False,
|
144 |
+
help="Whether to use FSDP for T5.")
|
145 |
+
parser.add_argument(
|
146 |
+
"--t5_cpu",
|
147 |
+
action="store_true",
|
148 |
+
default=False,
|
149 |
+
help="Whether to place T5 model on CPU.")
|
150 |
+
parser.add_argument(
|
151 |
+
"--dit_fsdp",
|
152 |
+
action="store_true",
|
153 |
+
default=False,
|
154 |
+
help="Whether to use FSDP for DiT.")
|
155 |
+
parser.add_argument(
|
156 |
+
"--save_file",
|
157 |
+
type=str,
|
158 |
+
default=None,
|
159 |
+
help="The file to save the generated image or video to.")
|
160 |
+
parser.add_argument(
|
161 |
+
"--prompt",
|
162 |
+
type=str,
|
163 |
+
default=None,
|
164 |
+
help="The prompt to generate the image or video from.")
|
165 |
+
parser.add_argument(
|
166 |
+
"--use_prompt_extend",
|
167 |
+
action="store_true",
|
168 |
+
default=False,
|
169 |
+
help="Whether to use prompt extend.")
|
170 |
+
parser.add_argument(
|
171 |
+
"--prompt_extend_method",
|
172 |
+
type=str,
|
173 |
+
default="local_qwen",
|
174 |
+
choices=["dashscope", "local_qwen"],
|
175 |
+
help="The prompt extend method to use.")
|
176 |
+
parser.add_argument(
|
177 |
+
"--prompt_extend_model",
|
178 |
+
type=str,
|
179 |
+
default=None,
|
180 |
+
help="The prompt extend model to use.")
|
181 |
+
parser.add_argument(
|
182 |
+
"--prompt_extend_target_lang",
|
183 |
+
type=str,
|
184 |
+
default="ch",
|
185 |
+
choices=["ch", "en"],
|
186 |
+
help="The target language of prompt extend.")
|
187 |
+
parser.add_argument(
|
188 |
+
"--base_seed",
|
189 |
+
type=int,
|
190 |
+
default=-1,
|
191 |
+
help="The seed to use for generating the image or video.")
|
192 |
+
parser.add_argument(
|
193 |
+
"--image",
|
194 |
+
type=str,
|
195 |
+
default=None,
|
196 |
+
help="The image to generate the video from.")
|
197 |
+
parser.add_argument(
|
198 |
+
"--sample_solver",
|
199 |
+
type=str,
|
200 |
+
default='unipc',
|
201 |
+
choices=['unipc', 'dpm++'],
|
202 |
+
help="The solver used to sample.")
|
203 |
+
parser.add_argument(
|
204 |
+
"--sample_steps", type=int, default=None, help="The sampling steps.")
|
205 |
+
parser.add_argument(
|
206 |
+
"--sample_shift",
|
207 |
+
type=float,
|
208 |
+
default=None,
|
209 |
+
help="Sampling shift factor for flow matching schedulers.")
|
210 |
+
parser.add_argument(
|
211 |
+
"--sample_guide_scale",
|
212 |
+
type=float,
|
213 |
+
default=5.0,
|
214 |
+
help="Classifier free guidance scale.")
|
215 |
+
|
216 |
+
args = parser.parse_args()
|
217 |
+
|
218 |
+
_validate_args(args)
|
219 |
+
|
220 |
+
return args
|
221 |
+
|
222 |
+
|
223 |
+
def _init_logging(rank):
|
224 |
+
# logging
|
225 |
+
if rank == 0:
|
226 |
+
# set format
|
227 |
+
logging.basicConfig(
|
228 |
+
level=logging.INFO,
|
229 |
+
format="[%(asctime)s] %(levelname)s: %(message)s",
|
230 |
+
handlers=[logging.StreamHandler(stream=sys.stdout)])
|
231 |
+
else:
|
232 |
+
logging.basicConfig(level=logging.ERROR)
|
233 |
+
|
234 |
+
def generate(args):
|
235 |
+
rank = int(os.getenv("RANK", 0))
|
236 |
+
world_size = int(os.getenv("WORLD_SIZE", 1))
|
237 |
+
local_rank = int(os.getenv("LOCAL_RANK", 0))
|
238 |
+
device = local_rank
|
239 |
+
_init_logging(rank)
|
240 |
+
|
241 |
+
if args.offload_model is None:
|
242 |
+
args.offload_model = False if world_size > 1 else True
|
243 |
+
logging.info(
|
244 |
+
f"offload_model is not specified, set to {args.offload_model}.")
|
245 |
+
|
246 |
+
cfg = WAN_CONFIGS[args.task]
|
247 |
+
if args.ulysses_size > 1:
|
248 |
+
assert cfg.num_heads % args.ulysses_size == 0, f"`num_heads` must be divisible by `ulysses_size`."
|
249 |
+
|
250 |
+
logging.info(f"Generation job args: {args}")
|
251 |
+
logging.info(f"Generation model config: {cfg}")
|
252 |
+
|
253 |
+
if dist.is_initialized():
|
254 |
+
base_seed = [args.base_seed] if rank == 0 else [None]
|
255 |
+
dist.broadcast_object_list(base_seed, src=0)
|
256 |
+
args.base_seed = base_seed[0]
|
257 |
+
|
258 |
+
logging.info("Creating WanI2V pipeline.")
|
259 |
+
# wan_i2v = wan.WanI2V(
|
260 |
+
wan_i2v = WanI2V(
|
261 |
+
config=cfg,
|
262 |
+
checkpoint_dir=args.ckpt_dir,
|
263 |
+
device_id=device,
|
264 |
+
rank=rank,
|
265 |
+
t5_fsdp=args.t5_fsdp,
|
266 |
+
dit_fsdp=args.dit_fsdp,
|
267 |
+
use_usp=(args.ulysses_size > 1 or args.ring_size > 1),
|
268 |
+
t5_cpu=args.t5_cpu,
|
269 |
+
)
|
270 |
+
|
271 |
+
def generate_i2v(prompt,img,seed,nf,speed):
|
272 |
+
logging.info("Generating video ...")
|
273 |
+
save_file="output/%s-%s-%s-%s.mp4"%(seed,nf,speed,int(ttime()))
|
274 |
+
video = wan_i2v.generate(
|
275 |
+
prompt,
|
276 |
+
img,
|
277 |
+
max_area=MAX_AREA_CONFIGS[args.size],
|
278 |
+
frame_num=int(nf)*16+1,#args.frame_num
|
279 |
+
shift=args.sample_shift,
|
280 |
+
sample_solver=args.sample_solver,
|
281 |
+
sampling_steps=args.sample_steps,
|
282 |
+
guide_scale=args.sample_guide_scale,
|
283 |
+
seed=seed,#args.base_seed,
|
284 |
+
offload_model=args.offload_model,
|
285 |
+
speed=value2speed[speed]
|
286 |
+
)
|
287 |
+
if rank==0:
|
288 |
+
video_update = gr.update(visible=True, value=save_file)
|
289 |
+
seed_update = gr.update(visible=True, value=seed)
|
290 |
+
cache_video(
|
291 |
+
tensor=video[None],
|
292 |
+
save_file=save_file,
|
293 |
+
fps=cfg.sample_fps,
|
294 |
+
nrow=1,
|
295 |
+
normalize=True,
|
296 |
+
value_range=(-1, 1))
|
297 |
+
return save_file, video_update, seed_update
|
298 |
+
if rank == 0:
|
299 |
+
from app_os import DEMO
|
300 |
+
demo=DEMO(generate_i2v).demo
|
301 |
+
demo.launch()
|
302 |
+
|
303 |
+
|
304 |
+
if __name__ == "__main__":
|
305 |
+
args = _parse_args()
|
306 |
+
generate(args)
|
app_os.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""
|
3 |
+
THis is the main file for the gradio web demo. It uses the CogVideoX-5B model to generate videos gradio web demo.
|
4 |
+
set environment variable OPENAI_API_KEY to use the OpenAI API to enhance the prompt.
|
5 |
+
|
6 |
+
Usage:
|
7 |
+
OpenAI_API_KEY=your_openai_api_key OPENAI_BASE_URL=https://api.openai.com/v1 python inference/gradio_web_demo.py
|
8 |
+
"""
|
9 |
+
|
10 |
+
import logging
|
11 |
+
import math
|
12 |
+
import os
|
13 |
+
import sys
|
14 |
+
from fastapi.responses import PlainTextResponse
|
15 |
+
from PIL import Image
|
16 |
+
from huggingface_hub.utils.tqdm import progress_bar_states
|
17 |
+
from numpy import ndarray
|
18 |
+
|
19 |
+
current_dir = os.path.abspath(os.path.dirname(__file__))
|
20 |
+
sys.path.append(os.path.join(current_dir, '../'))
|
21 |
+
import random
|
22 |
+
import threading
|
23 |
+
import time
|
24 |
+
|
25 |
+
import cv2
|
26 |
+
import tempfile
|
27 |
+
import imageio_ffmpeg
|
28 |
+
import gradio as gr
|
29 |
+
|
30 |
+
from datetime import datetime, timedelta
|
31 |
+
|
32 |
+
os.makedirs("./output", exist_ok=True)
|
33 |
+
os.makedirs("./input", exist_ok=True)
|
34 |
+
os.makedirs("./gradio_tmp", exist_ok=True)
|
35 |
+
|
36 |
+
class DEMO:
|
37 |
+
def __init__(self,generate):
|
38 |
+
with gr.Blocks() as self.demo:
|
39 |
+
gr.Markdown("""
|
40 |
+
<div style="text-align: center; font-size: 32px; font-weight: bold; margin-bottom: 20px;">
|
41 |
+
AniSora-Bilibili动画视频生成模型
|
42 |
+
</div>
|
43 |
+
""")
|
44 |
+
with gr.Row():
|
45 |
+
with gr.Column():
|
46 |
+
with gr.Accordion("I2V: Image Input (cannot be used simultaneously with video input)", open=True):
|
47 |
+
image_input = gr.Image(label="Input Image")
|
48 |
+
prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here", lines=5)
|
49 |
+
nf = gr.Slider(label="秒数", minimum=3,maximum=5, step=0.5, value=5)
|
50 |
+
speed = gr.Radio(label="加速模式",value='加速版',choices=['原版','加速版'])
|
51 |
+
with gr.Group():
|
52 |
+
with gr.Column():
|
53 |
+
with gr.Row():
|
54 |
+
seed_param = gr.Number(
|
55 |
+
label="Inference Seed (Enter a positive number, -1 for random)", value=233
|
56 |
+
)
|
57 |
+
|
58 |
+
generate_button = gr.Button("🎬 Generate Video")
|
59 |
+
|
60 |
+
with gr.Column():
|
61 |
+
video_output = gr.Video(label="Generated Video")
|
62 |
+
with gr.Row():
|
63 |
+
download_video_button = gr.File(label="📥 Download Video", visible=False)
|
64 |
+
seed_text = gr.Number(label="Seed Used for Video Generation", visible=False)
|
65 |
+
|
66 |
+
generate_button.click(
|
67 |
+
generate,
|
68 |
+
inputs=[prompt, image_input,seed_param,nf,speed],
|
69 |
+
outputs=[video_output, download_video_button, seed_text],
|
70 |
+
)
|
71 |
+
|
72 |
+
|
73 |
+
if __name__ == "__main__":
|
74 |
+
from fastapi import FastAPI
|
75 |
+
import uvicorn
|
76 |
+
|
77 |
+
app = FastAPI()
|
78 |
+
|
79 |
+
|
80 |
+
@app.get('/v2/health/ready')
|
81 |
+
def health():
|
82 |
+
return ""
|
83 |
+
|
84 |
+
|
85 |
+
demoo=DEMO()
|
86 |
+
demo=demoo.demo
|
87 |
+
demo.queue(max_size=15)
|
88 |
+
app = gr.mount_gradio_app(app,demo, path="/api/adhoc/ttv/demo")
|
89 |
+
uvicorn.run(app,host="0.0.0.0",port=26780)#
|
90 |
+
# demo.launch(server_name="0.0.0.0",server_port=16780)
|
generate-pi-i2v-myinfer-oss-stu.py
ADDED
@@ -0,0 +1,452 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
4 |
+
import os
|
5 |
+
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
|
6 |
+
import argparse
|
7 |
+
from datetime import datetime
|
8 |
+
import logging
|
9 |
+
import sys
|
10 |
+
import warnings
|
11 |
+
|
12 |
+
warnings.filterwarnings('ignore')
|
13 |
+
|
14 |
+
import torch, random
|
15 |
+
import torch.distributed as dist
|
16 |
+
from PIL import Image
|
17 |
+
|
18 |
+
import wan
|
19 |
+
from wan.image2video_mdinfer_oss_stu import WanI2V
|
20 |
+
from wan.text2video import WanT2V
|
21 |
+
from wan.configs import WAN_CONFIGS, SIZE_CONFIGS, MAX_AREA_CONFIGS, SUPPORTED_SIZES
|
22 |
+
from wan.utils.prompt_extend import DashScopePromptExpander, QwenPromptExpander
|
23 |
+
from wan.utils.utils import cache_video, cache_image, str2bool
|
24 |
+
|
25 |
+
EXAMPLE_PROMPT = {
|
26 |
+
"t2v-1.3B": {
|
27 |
+
"prompt": "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage.",
|
28 |
+
},
|
29 |
+
"t2v-14B": {
|
30 |
+
"prompt": "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage.",
|
31 |
+
},
|
32 |
+
"t2i-14B": {
|
33 |
+
"prompt": "一个朴素端庄的美人",
|
34 |
+
},
|
35 |
+
"i2v-14B": {
|
36 |
+
"prompt":
|
37 |
+
"Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside.",
|
38 |
+
"image":
|
39 |
+
"examples/i2v_input.JPG",
|
40 |
+
},
|
41 |
+
}
|
42 |
+
|
43 |
+
|
44 |
+
def _validate_args(args):
|
45 |
+
# Basic check
|
46 |
+
assert args.ckpt_dir is not None, "Please specify the checkpoint directory."
|
47 |
+
assert args.task in WAN_CONFIGS, f"Unsupport task: {args.task}"
|
48 |
+
assert args.task in EXAMPLE_PROMPT, f"Unsupport task: {args.task}"
|
49 |
+
|
50 |
+
# The default sampling steps are 40 for image-to-video tasks and 50 for text-to-video tasks.
|
51 |
+
if args.sample_steps is None:
|
52 |
+
args.sample_steps = 40 if "i2v" in args.task else 50
|
53 |
+
|
54 |
+
if args.sample_shift is None:
|
55 |
+
args.sample_shift = 5.0
|
56 |
+
if "i2v" in args.task and args.size in ["832*480", "480*832"]:
|
57 |
+
args.sample_shift = 3.0
|
58 |
+
|
59 |
+
# The default number of frames are 1 for text-to-image tasks and 81 for other tasks.
|
60 |
+
if args.frame_num is None:
|
61 |
+
args.frame_num = 1 if "t2i" in args.task else 81
|
62 |
+
|
63 |
+
# T2I frame_num check
|
64 |
+
if "t2i" in args.task:
|
65 |
+
assert args.frame_num == 1, f"Unsupport frame_num {args.frame_num} for task {args.task}"
|
66 |
+
|
67 |
+
args.base_seed = args.base_seed if args.base_seed >= 0 else random.randint(
|
68 |
+
0, sys.maxsize)
|
69 |
+
# Size check
|
70 |
+
assert args.size in SUPPORTED_SIZES[
|
71 |
+
args.
|
72 |
+
task], f"Unsupport size {args.size} for task {args.task}, supported sizes are: {', '.join(SUPPORTED_SIZES[args.task])}"
|
73 |
+
|
74 |
+
args.sample_steps=96#64########todo
|
75 |
+
|
76 |
+
def _parse_args():
|
77 |
+
parser = argparse.ArgumentParser(
|
78 |
+
description="Generate a image or video from a text prompt or image using Wan"
|
79 |
+
)
|
80 |
+
parser.add_argument(
|
81 |
+
"--task",
|
82 |
+
type=str,
|
83 |
+
default="t2v-14B",
|
84 |
+
choices=list(WAN_CONFIGS.keys()),
|
85 |
+
help="The task to run.")
|
86 |
+
parser.add_argument(
|
87 |
+
"--size",
|
88 |
+
type=str,
|
89 |
+
default="1280*720",
|
90 |
+
choices=list(SIZE_CONFIGS.keys()),
|
91 |
+
help="The area (width*height) of the generated video. For the I2V task, the aspect ratio of the output video will follow that of the input image."
|
92 |
+
)
|
93 |
+
parser.add_argument(
|
94 |
+
"--frame_num",
|
95 |
+
type=int,
|
96 |
+
default=None,
|
97 |
+
help="How many frames to sample from a image or video. The number should be 4n+1"
|
98 |
+
)
|
99 |
+
parser.add_argument(
|
100 |
+
"--ckpt_dir",
|
101 |
+
type=str,
|
102 |
+
default=None,
|
103 |
+
help="The path to the checkpoint directory.")
|
104 |
+
parser.add_argument(
|
105 |
+
"--offload_model",
|
106 |
+
type=str2bool,
|
107 |
+
default=None,
|
108 |
+
help="Whether to offload the model to CPU after each model forward, reducing GPU memory usage."
|
109 |
+
)
|
110 |
+
parser.add_argument(
|
111 |
+
"--ulysses_size",
|
112 |
+
type=int,
|
113 |
+
default=1,
|
114 |
+
help="The size of the ulysses parallelism in DiT.")
|
115 |
+
parser.add_argument(
|
116 |
+
"--ring_size",
|
117 |
+
type=int,
|
118 |
+
default=1,
|
119 |
+
help="The size of the ring attention parallelism in DiT.")
|
120 |
+
parser.add_argument(
|
121 |
+
"--t5_fsdp",
|
122 |
+
action="store_true",
|
123 |
+
default=False,
|
124 |
+
help="Whether to use FSDP for T5.")
|
125 |
+
parser.add_argument(
|
126 |
+
"--t5_cpu",
|
127 |
+
action="store_true",
|
128 |
+
default=False,
|
129 |
+
help="Whether to place T5 model on CPU.")
|
130 |
+
parser.add_argument(
|
131 |
+
"--dit_fsdp",
|
132 |
+
action="store_true",
|
133 |
+
default=False,
|
134 |
+
help="Whether to use FSDP for DiT.")
|
135 |
+
parser.add_argument(
|
136 |
+
"--save_file",
|
137 |
+
type=str,
|
138 |
+
default=None,
|
139 |
+
help="The file to save the generated image or video to.")
|
140 |
+
parser.add_argument(
|
141 |
+
"--prompt",
|
142 |
+
type=str,
|
143 |
+
default=None,
|
144 |
+
help="The prompt to generate the image or video from.")
|
145 |
+
parser.add_argument(
|
146 |
+
"--use_prompt_extend",
|
147 |
+
action="store_true",
|
148 |
+
default=False,
|
149 |
+
help="Whether to use prompt extend.")
|
150 |
+
parser.add_argument(
|
151 |
+
"--student_steps",
|
152 |
+
type=int,
|
153 |
+
default=20,
|
154 |
+
help="The student steps during searching!")
|
155 |
+
parser.add_argument(
|
156 |
+
"--norm",
|
157 |
+
type=float,
|
158 |
+
default=2.0,
|
159 |
+
help="Norm of the cost function.")
|
160 |
+
parser.add_argument(
|
161 |
+
"--frame_type",
|
162 |
+
type=str,
|
163 |
+
default='all',
|
164 |
+
help="The cost frames of video.")
|
165 |
+
parser.add_argument(
|
166 |
+
"--channel_type",
|
167 |
+
type=str,
|
168 |
+
default="all",
|
169 |
+
choices=['2', '4', '8','12',"all"],
|
170 |
+
help="The cost channel of video.")
|
171 |
+
parser.add_argument(
|
172 |
+
"--prompt_extend_method",
|
173 |
+
type=str,
|
174 |
+
default="local_qwen",
|
175 |
+
choices=["dashscope", "local_qwen"],
|
176 |
+
help="The prompt extend method to use.")
|
177 |
+
parser.add_argument(
|
178 |
+
"--prompt_extend_model",
|
179 |
+
type=str,
|
180 |
+
default=None,
|
181 |
+
help="The prompt extend model to use.")
|
182 |
+
parser.add_argument(
|
183 |
+
"--prompt_extend_target_lang",
|
184 |
+
type=str,
|
185 |
+
default="ch",
|
186 |
+
choices=["ch", "en"],
|
187 |
+
help="The target language of prompt extend.")
|
188 |
+
parser.add_argument(
|
189 |
+
"--base_seed",
|
190 |
+
type=int,
|
191 |
+
default=-1,
|
192 |
+
help="The seed to use for generating the image or video.")
|
193 |
+
parser.add_argument(
|
194 |
+
"--image",
|
195 |
+
type=str,
|
196 |
+
default=None,
|
197 |
+
help="The image to generate the video from.")
|
198 |
+
parser.add_argument(
|
199 |
+
"--sample_solver",
|
200 |
+
type=str,
|
201 |
+
default='unipc',
|
202 |
+
choices=['unipc', 'dpm++'],
|
203 |
+
help="The solver used to sample.")
|
204 |
+
parser.add_argument(
|
205 |
+
"--sample_steps", type=int, default=None, help="The sampling steps.")
|
206 |
+
parser.add_argument(
|
207 |
+
"--sample_shift",
|
208 |
+
type=float,
|
209 |
+
default=None,
|
210 |
+
help="Sampling shift factor for flow matching schedulers.")
|
211 |
+
parser.add_argument(
|
212 |
+
"--sample_guide_scale",
|
213 |
+
type=float,
|
214 |
+
default=5.0,
|
215 |
+
help="Classifier free guidance scale.")
|
216 |
+
|
217 |
+
args = parser.parse_args()
|
218 |
+
|
219 |
+
_validate_args(args)
|
220 |
+
|
221 |
+
return args
|
222 |
+
|
223 |
+
|
224 |
+
def _init_logging(rank):
|
225 |
+
# logging
|
226 |
+
if rank == 0:
|
227 |
+
# set format
|
228 |
+
logging.basicConfig(
|
229 |
+
level=logging.INFO,
|
230 |
+
format="[%(asctime)s] %(levelname)s: %(message)s",
|
231 |
+
handlers=[logging.StreamHandler(stream=sys.stdout)])
|
232 |
+
else:
|
233 |
+
logging.basicConfig(level=logging.ERROR)
|
234 |
+
|
235 |
+
|
236 |
+
def generate(args):
|
237 |
+
rank = int(os.getenv("RANK", 0))
|
238 |
+
world_size = int(os.getenv("WORLD_SIZE", 1))
|
239 |
+
local_rank = int(os.getenv("LOCAL_RANK", 0))
|
240 |
+
device = local_rank
|
241 |
+
_init_logging(rank)
|
242 |
+
|
243 |
+
if args.offload_model is None:
|
244 |
+
args.offload_model = False if world_size > 1 else True
|
245 |
+
logging.info(
|
246 |
+
f"offload_model is not specified, set to {args.offload_model}.")
|
247 |
+
if world_size > 1:
|
248 |
+
torch.cuda.set_device(local_rank)
|
249 |
+
dist.init_process_group(
|
250 |
+
backend="nccl",
|
251 |
+
init_method="env://",
|
252 |
+
rank=rank,
|
253 |
+
world_size=world_size)
|
254 |
+
else:
|
255 |
+
assert not (
|
256 |
+
args.t5_fsdp or args.dit_fsdp
|
257 |
+
), f"t5_fsdp and dit_fsdp are not supported in non-distributed environments."
|
258 |
+
assert not (
|
259 |
+
args.ulysses_size > 1 or args.ring_size > 1
|
260 |
+
), f"context parallel are not supported in non-distributed environments."
|
261 |
+
|
262 |
+
if args.ulysses_size > 1 or args.ring_size > 1:
|
263 |
+
assert args.ulysses_size * args.ring_size == world_size, f"The number of ulysses_size and ring_size should be equal to the world size."
|
264 |
+
from xfuser.core.distributed import (initialize_model_parallel,
|
265 |
+
init_distributed_environment)
|
266 |
+
init_distributed_environment(
|
267 |
+
rank=dist.get_rank(), world_size=dist.get_world_size())
|
268 |
+
|
269 |
+
initialize_model_parallel(
|
270 |
+
sequence_parallel_degree=dist.get_world_size(),
|
271 |
+
ring_degree=args.ring_size,
|
272 |
+
ulysses_degree=args.ulysses_size,
|
273 |
+
)
|
274 |
+
|
275 |
+
if args.use_prompt_extend:
|
276 |
+
if args.prompt_extend_method == "dashscope":
|
277 |
+
prompt_expander = DashScopePromptExpander(
|
278 |
+
model_name=args.prompt_extend_model, is_vl="i2v" in args.task)
|
279 |
+
elif args.prompt_extend_method == "local_qwen":
|
280 |
+
prompt_expander = QwenPromptExpander(
|
281 |
+
model_name=args.prompt_extend_model,
|
282 |
+
is_vl="i2v" in args.task,
|
283 |
+
device=rank)
|
284 |
+
else:
|
285 |
+
raise NotImplementedError(
|
286 |
+
f"Unsupport prompt_extend_method: {args.prompt_extend_method}")
|
287 |
+
|
288 |
+
cfg = WAN_CONFIGS[args.task]
|
289 |
+
if args.ulysses_size > 1:
|
290 |
+
assert cfg.num_heads % args.ulysses_size == 0, f"`num_heads` must be divisible by `ulysses_size`."
|
291 |
+
|
292 |
+
logging.info(f"Generation job args: {args}")
|
293 |
+
logging.info(f"Generation model config: {cfg}")
|
294 |
+
|
295 |
+
if dist.is_initialized():
|
296 |
+
base_seed = [args.base_seed] if rank == 0 else [None]
|
297 |
+
dist.broadcast_object_list(base_seed, src=0)
|
298 |
+
args.base_seed = base_seed[0]
|
299 |
+
|
300 |
+
if "t2v" in args.task or "t2i" in args.task:
|
301 |
+
opt_dir=args.image
|
302 |
+
with open(args.prompt,"r")as f:
|
303 |
+
lines=f.read().strip("\n").split("\n")
|
304 |
+
# if args.prompt is None:
|
305 |
+
# args.prompt = EXAMPLE_PROMPT[args.task]["prompt"]
|
306 |
+
|
307 |
+
logging.info("Creating WanT2V pipeline.")
|
308 |
+
# wan_t2v = wan.WanT2V(
|
309 |
+
wan_t2v = WanT2V(
|
310 |
+
config=cfg,
|
311 |
+
checkpoint_dir=args.ckpt_dir,
|
312 |
+
device_id=device,
|
313 |
+
rank=rank,
|
314 |
+
t5_fsdp=args.t5_fsdp,
|
315 |
+
dit_fsdp=args.dit_fsdp,
|
316 |
+
use_usp=(args.ulysses_size > 1 or args.ring_size > 1),
|
317 |
+
t5_cpu=args.t5_cpu,
|
318 |
+
)
|
319 |
+
for idx,line in enumerate(lines):
|
320 |
+
args.save_file="%s/%s.mp4"%(opt_dir,idx)
|
321 |
+
prompt,image=line.split("@@")
|
322 |
+
args.image=image
|
323 |
+
args.prompt=prompt
|
324 |
+
logging.info(f"Input prompt: {args.prompt}")
|
325 |
+
if args.use_prompt_extend:
|
326 |
+
logging.info("Extending prompt ...")
|
327 |
+
if rank == 0:
|
328 |
+
prompt_output = prompt_expander(
|
329 |
+
args.prompt,
|
330 |
+
tar_lang=args.prompt_extend_target_lang,
|
331 |
+
seed=args.base_seed)
|
332 |
+
if prompt_output.status == False:
|
333 |
+
logging.info(
|
334 |
+
f"Extending prompt failed: {prompt_output.message}")
|
335 |
+
logging.info("Falling back to original prompt.")
|
336 |
+
input_prompt = args.prompt
|
337 |
+
else:
|
338 |
+
input_prompt = prompt_output.prompt
|
339 |
+
input_prompt = [input_prompt]
|
340 |
+
else:
|
341 |
+
input_prompt = [None]
|
342 |
+
if dist.is_initialized():
|
343 |
+
dist.broadcast_object_list(input_prompt, src=0)
|
344 |
+
args.prompt = input_prompt[0]
|
345 |
+
logging.info(f"Extended prompt: {args.prompt}")
|
346 |
+
|
347 |
+
logging.info(
|
348 |
+
f"Generating {'image' if 't2i' in args.task else 'video'} ...")
|
349 |
+
video = wan_t2v.generate(
|
350 |
+
args.prompt,
|
351 |
+
size=SIZE_CONFIGS[args.size],
|
352 |
+
frame_num=args.frame_num,
|
353 |
+
shift=args.sample_shift,
|
354 |
+
sample_solver=args.sample_solver,
|
355 |
+
sampling_steps=args.sample_steps,
|
356 |
+
guide_scale=args.sample_guide_scale,
|
357 |
+
seed=args.base_seed,
|
358 |
+
offload_model=args.offload_model)
|
359 |
+
if rank==0:
|
360 |
+
cache_video(
|
361 |
+
tensor=video[None],
|
362 |
+
save_file=args.save_file,
|
363 |
+
fps=cfg.sample_fps,
|
364 |
+
nrow=1,
|
365 |
+
normalize=True,
|
366 |
+
value_range=(-1, 1))
|
367 |
+
else:
|
368 |
+
if args.prompt is None:
|
369 |
+
args.prompt = EXAMPLE_PROMPT[args.task]["prompt"]
|
370 |
+
if args.image is None:
|
371 |
+
args.image = EXAMPLE_PROMPT[args.task]["image"]
|
372 |
+
logging.info(f"Input prompt: {args.prompt}")
|
373 |
+
logging.info(f"Input image: {args.image}")
|
374 |
+
|
375 |
+
opt_dir=args.image
|
376 |
+
with open(args.prompt,"r",encoding="gbk")as f:
|
377 |
+
lines=f.read().strip("\n").split("\n")
|
378 |
+
logging.info("Creating WanI2V pipeline.")
|
379 |
+
# wan_i2v = wan.WanI2V(
|
380 |
+
wan_i2v = WanI2V(
|
381 |
+
config=cfg,
|
382 |
+
checkpoint_dir=args.ckpt_dir,
|
383 |
+
device_id=device,
|
384 |
+
rank=rank,
|
385 |
+
t5_fsdp=args.t5_fsdp,
|
386 |
+
dit_fsdp=args.dit_fsdp,
|
387 |
+
use_usp=(args.ulysses_size > 1 or args.ring_size > 1),
|
388 |
+
t5_cpu=args.t5_cpu,
|
389 |
+
)
|
390 |
+
|
391 |
+
for idx,line in enumerate(lines):
|
392 |
+
args.save_file="%s/%s.mp4"%(opt_dir,idx)
|
393 |
+
prompt,image=line.split("@@")
|
394 |
+
args.image=image
|
395 |
+
args.prompt=prompt
|
396 |
+
img = Image.open(args.image).convert("RGB")
|
397 |
+
if args.use_prompt_extend:
|
398 |
+
logging.info("Extending prompt ...")
|
399 |
+
if rank == 0:
|
400 |
+
prompt_output = prompt_expander(
|
401 |
+
args.prompt,
|
402 |
+
tar_lang=args.prompt_extend_target_lang,
|
403 |
+
image=img,
|
404 |
+
seed=args.base_seed)
|
405 |
+
if prompt_output.status == False:
|
406 |
+
logging.info(
|
407 |
+
f"Extending prompt failed: {prompt_output.message}")
|
408 |
+
logging.info("Falling back to original prompt.")
|
409 |
+
input_prompt = args.prompt
|
410 |
+
else:
|
411 |
+
input_prompt = prompt_output.prompt
|
412 |
+
input_prompt = [input_prompt]
|
413 |
+
else:
|
414 |
+
input_prompt = [None]
|
415 |
+
if dist.is_initialized():
|
416 |
+
dist.broadcast_object_list(input_prompt, src=0)
|
417 |
+
args.prompt = input_prompt[0]
|
418 |
+
logging.info(f"Extended prompt: {args.prompt}")
|
419 |
+
logging.info("Generating video ...")
|
420 |
+
if os.path.exists(args.save_file)==False:
|
421 |
+
video = wan_i2v.generate(
|
422 |
+
args.prompt,
|
423 |
+
img,
|
424 |
+
max_area=MAX_AREA_CONFIGS[args.size],
|
425 |
+
frame_num=args.frame_num,
|
426 |
+
shift=args.sample_shift,
|
427 |
+
sample_solver=args.sample_solver,
|
428 |
+
sampling_steps=args.sample_steps,
|
429 |
+
guide_scale=args.sample_guide_scale,
|
430 |
+
seed=args.base_seed,
|
431 |
+
offload_model=args.offload_model,
|
432 |
+
|
433 |
+
student_steps=16,
|
434 |
+
norm=2,
|
435 |
+
frame_type="4",
|
436 |
+
channel_type="all",
|
437 |
+
|
438 |
+
)
|
439 |
+
if rank==0:
|
440 |
+
cache_video(
|
441 |
+
tensor=video[None],
|
442 |
+
save_file=args.save_file,
|
443 |
+
fps=cfg.sample_fps,
|
444 |
+
nrow=1,
|
445 |
+
normalize=True,
|
446 |
+
value_range=(-1, 1))
|
447 |
+
logging.info("Finished.")
|
448 |
+
|
449 |
+
|
450 |
+
if __name__ == "__main__":
|
451 |
+
args = _parse_args()
|
452 |
+
generate(args)
|
generate-pi-i2v-myinfer-oss-tea.py
ADDED
@@ -0,0 +1,453 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
4 |
+
import os
|
5 |
+
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
|
6 |
+
import argparse
|
7 |
+
from datetime import datetime
|
8 |
+
import logging
|
9 |
+
import sys
|
10 |
+
import warnings
|
11 |
+
|
12 |
+
warnings.filterwarnings('ignore')
|
13 |
+
|
14 |
+
import torch, random
|
15 |
+
import torch.distributed as dist
|
16 |
+
from PIL import Image
|
17 |
+
|
18 |
+
import wan
|
19 |
+
from wan.image2video_mdinfer_oss_tea import WanI2V
|
20 |
+
from wan.text2video import WanT2V
|
21 |
+
from wan.configs import WAN_CONFIGS, SIZE_CONFIGS, MAX_AREA_CONFIGS, SUPPORTED_SIZES
|
22 |
+
from wan.utils.prompt_extend import DashScopePromptExpander, QwenPromptExpander
|
23 |
+
from wan.utils.utils import cache_video, cache_image, str2bool
|
24 |
+
|
25 |
+
EXAMPLE_PROMPT = {
|
26 |
+
"t2v-1.3B": {
|
27 |
+
"prompt": "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage.",
|
28 |
+
},
|
29 |
+
"t2v-14B": {
|
30 |
+
"prompt": "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage.",
|
31 |
+
},
|
32 |
+
"t2i-14B": {
|
33 |
+
"prompt": "一个朴素端庄的美人",
|
34 |
+
},
|
35 |
+
"i2v-14B": {
|
36 |
+
"prompt":
|
37 |
+
"Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside.",
|
38 |
+
"image":
|
39 |
+
"examples/i2v_input.JPG",
|
40 |
+
},
|
41 |
+
}
|
42 |
+
|
43 |
+
|
44 |
+
def _validate_args(args):
|
45 |
+
# Basic check
|
46 |
+
assert args.ckpt_dir is not None, "Please specify the checkpoint directory."
|
47 |
+
assert args.task in WAN_CONFIGS, f"Unsupport task: {args.task}"
|
48 |
+
assert args.task in EXAMPLE_PROMPT, f"Unsupport task: {args.task}"
|
49 |
+
|
50 |
+
# The default sampling steps are 40 for image-to-video tasks and 50 for text-to-video tasks.
|
51 |
+
if args.sample_steps is None:
|
52 |
+
args.sample_steps = 40 if "i2v" in args.task else 50
|
53 |
+
|
54 |
+
if args.sample_shift is None:
|
55 |
+
args.sample_shift = 5.0
|
56 |
+
if "i2v" in args.task and args.size in ["832*480", "480*832"]:
|
57 |
+
args.sample_shift = 3.0
|
58 |
+
|
59 |
+
# The default number of frames are 1 for text-to-image tasks and 81 for other tasks.
|
60 |
+
if args.frame_num is None:
|
61 |
+
args.frame_num = 1 if "t2i" in args.task else 81
|
62 |
+
|
63 |
+
# T2I frame_num check
|
64 |
+
if "t2i" in args.task:
|
65 |
+
assert args.frame_num == 1, f"Unsupport frame_num {args.frame_num} for task {args.task}"
|
66 |
+
|
67 |
+
args.base_seed = args.base_seed if args.base_seed >= 0 else random.randint(
|
68 |
+
0, sys.maxsize)
|
69 |
+
# Size check
|
70 |
+
assert args.size in SUPPORTED_SIZES[
|
71 |
+
args.
|
72 |
+
task], f"Unsupport size {args.size} for task {args.task}, supported sizes are: {', '.join(SUPPORTED_SIZES[args.task])}"
|
73 |
+
|
74 |
+
|
75 |
+
def _parse_args():
|
76 |
+
parser = argparse.ArgumentParser(
|
77 |
+
description="Generate a image or video from a text prompt or image using Wan"
|
78 |
+
)
|
79 |
+
parser.add_argument(
|
80 |
+
"--task",
|
81 |
+
type=str,
|
82 |
+
default="t2v-14B",
|
83 |
+
choices=list(WAN_CONFIGS.keys()),
|
84 |
+
help="The task to run.")
|
85 |
+
parser.add_argument(
|
86 |
+
"--size",
|
87 |
+
type=str,
|
88 |
+
default="1280*720",
|
89 |
+
choices=list(SIZE_CONFIGS.keys()),
|
90 |
+
help="The area (width*height) of the generated video. For the I2V task, the aspect ratio of the output video will follow that of the input image."
|
91 |
+
)
|
92 |
+
parser.add_argument(
|
93 |
+
"--frame_num",
|
94 |
+
type=int,
|
95 |
+
default=None,
|
96 |
+
help="How many frames to sample from a image or video. The number should be 4n+1"
|
97 |
+
)
|
98 |
+
parser.add_argument(
|
99 |
+
"--ckpt_dir",
|
100 |
+
type=str,
|
101 |
+
default=None,
|
102 |
+
help="The path to the checkpoint directory.")
|
103 |
+
parser.add_argument(
|
104 |
+
"--offload_model",
|
105 |
+
type=str2bool,
|
106 |
+
default=None,
|
107 |
+
help="Whether to offload the model to CPU after each model forward, reducing GPU memory usage."
|
108 |
+
)
|
109 |
+
parser.add_argument(
|
110 |
+
"--ulysses_size",
|
111 |
+
type=int,
|
112 |
+
default=1,
|
113 |
+
help="The size of the ulysses parallelism in DiT.")
|
114 |
+
parser.add_argument(
|
115 |
+
"--ring_size",
|
116 |
+
type=int,
|
117 |
+
default=1,
|
118 |
+
help="The size of the ring attention parallelism in DiT.")
|
119 |
+
parser.add_argument(
|
120 |
+
"--t5_fsdp",
|
121 |
+
action="store_true",
|
122 |
+
default=False,
|
123 |
+
help="Whether to use FSDP for T5.")
|
124 |
+
parser.add_argument(
|
125 |
+
"--t5_cpu",
|
126 |
+
action="store_true",
|
127 |
+
default=False,
|
128 |
+
help="Whether to place T5 model on CPU.")
|
129 |
+
parser.add_argument(
|
130 |
+
"--dit_fsdp",
|
131 |
+
action="store_true",
|
132 |
+
default=False,
|
133 |
+
help="Whether to use FSDP for DiT.")
|
134 |
+
parser.add_argument(
|
135 |
+
"--save_file",
|
136 |
+
type=str,
|
137 |
+
default=None,
|
138 |
+
help="The file to save the generated image or video to.")
|
139 |
+
parser.add_argument(
|
140 |
+
"--prompt",
|
141 |
+
type=str,
|
142 |
+
default=None,
|
143 |
+
help="The prompt to generate the image or video from.")
|
144 |
+
parser.add_argument(
|
145 |
+
"--use_prompt_extend",
|
146 |
+
action="store_true",
|
147 |
+
default=False,
|
148 |
+
help="Whether to use prompt extend.")
|
149 |
+
parser.add_argument(
|
150 |
+
"--student_steps",
|
151 |
+
type=int,
|
152 |
+
default=20,
|
153 |
+
help="The student steps during searching!")
|
154 |
+
parser.add_argument(
|
155 |
+
"--norm",
|
156 |
+
type=float,
|
157 |
+
default=2.0,
|
158 |
+
help="Norm of the cost function.")
|
159 |
+
parser.add_argument(
|
160 |
+
"--frame_type",
|
161 |
+
type=str,
|
162 |
+
default='all',
|
163 |
+
help="The cost frames of video.")
|
164 |
+
parser.add_argument(
|
165 |
+
"--channel_type",
|
166 |
+
type=str,
|
167 |
+
default="all",
|
168 |
+
choices=['2', '4', '8','12',"all"],
|
169 |
+
help="The cost channel of video.")
|
170 |
+
parser.add_argument(
|
171 |
+
"--prompt_extend_method",
|
172 |
+
type=str,
|
173 |
+
default="local_qwen",
|
174 |
+
choices=["dashscope", "local_qwen"],
|
175 |
+
help="The prompt extend method to use.")
|
176 |
+
parser.add_argument(
|
177 |
+
"--prompt_extend_model",
|
178 |
+
type=str,
|
179 |
+
default=None,
|
180 |
+
help="The prompt extend model to use.")
|
181 |
+
parser.add_argument(
|
182 |
+
"--prompt_extend_target_lang",
|
183 |
+
type=str,
|
184 |
+
default="ch",
|
185 |
+
choices=["ch", "en"],
|
186 |
+
help="The target language of prompt extend.")
|
187 |
+
parser.add_argument(
|
188 |
+
"--base_seed",
|
189 |
+
type=int,
|
190 |
+
default=-1,
|
191 |
+
help="The seed to use for generating the image or video.")
|
192 |
+
parser.add_argument(
|
193 |
+
"--image",
|
194 |
+
type=str,
|
195 |
+
default=None,
|
196 |
+
help="The image to generate the video from.")
|
197 |
+
parser.add_argument(
|
198 |
+
"--sample_solver",
|
199 |
+
type=str,
|
200 |
+
default='unipc',
|
201 |
+
choices=['unipc', 'dpm++'],
|
202 |
+
help="The solver used to sample.")
|
203 |
+
parser.add_argument(
|
204 |
+
"--sample_steps", type=int, default=None, help="The sampling steps.")
|
205 |
+
parser.add_argument(
|
206 |
+
"--sample_shift",
|
207 |
+
type=float,
|
208 |
+
default=None,
|
209 |
+
help="Sampling shift factor for flow matching schedulers.")
|
210 |
+
parser.add_argument(
|
211 |
+
"--sample_guide_scale",
|
212 |
+
type=float,
|
213 |
+
default=5.0,
|
214 |
+
help="Classifier free guidance scale.")
|
215 |
+
|
216 |
+
args = parser.parse_args()
|
217 |
+
|
218 |
+
_validate_args(args)
|
219 |
+
|
220 |
+
return args
|
221 |
+
|
222 |
+
|
223 |
+
def _init_logging(rank):
|
224 |
+
# logging
|
225 |
+
if rank == 0:
|
226 |
+
# set format
|
227 |
+
logging.basicConfig(
|
228 |
+
level=logging.INFO,
|
229 |
+
format="[%(asctime)s] %(levelname)s: %(message)s",
|
230 |
+
handlers=[logging.StreamHandler(stream=sys.stdout)])
|
231 |
+
else:
|
232 |
+
logging.basicConfig(level=logging.ERROR)
|
233 |
+
|
234 |
+
|
235 |
+
def generate(args):
|
236 |
+
rank = int(os.getenv("RANK", 0))
|
237 |
+
world_size = int(os.getenv("WORLD_SIZE", 1))
|
238 |
+
local_rank = int(os.getenv("LOCAL_RANK", 0))
|
239 |
+
device = local_rank
|
240 |
+
_init_logging(rank)
|
241 |
+
|
242 |
+
if args.offload_model is None:
|
243 |
+
args.offload_model = False if world_size > 1 else True
|
244 |
+
logging.info(
|
245 |
+
f"offload_model is not specified, set to {args.offload_model}.")
|
246 |
+
if world_size > 1:
|
247 |
+
torch.cuda.set_device(local_rank)
|
248 |
+
dist.init_process_group(
|
249 |
+
backend="nccl",
|
250 |
+
init_method="env://",
|
251 |
+
rank=rank,
|
252 |
+
world_size=world_size)
|
253 |
+
else:
|
254 |
+
assert not (
|
255 |
+
args.t5_fsdp or args.dit_fsdp
|
256 |
+
), f"t5_fsdp and dit_fsdp are not supported in non-distributed environments."
|
257 |
+
assert not (
|
258 |
+
args.ulysses_size > 1 or args.ring_size > 1
|
259 |
+
), f"context parallel are not supported in non-distributed environments."
|
260 |
+
|
261 |
+
if args.ulysses_size > 1 or args.ring_size > 1:
|
262 |
+
assert args.ulysses_size * args.ring_size == world_size, f"The number of ulysses_size and ring_size should be equal to the world size."
|
263 |
+
from xfuser.core.distributed import (initialize_model_parallel,
|
264 |
+
init_distributed_environment)
|
265 |
+
init_distributed_environment(
|
266 |
+
rank=dist.get_rank(), world_size=dist.get_world_size())
|
267 |
+
|
268 |
+
initialize_model_parallel(
|
269 |
+
sequence_parallel_degree=dist.get_world_size(),
|
270 |
+
ring_degree=args.ring_size,
|
271 |
+
ulysses_degree=args.ulysses_size,
|
272 |
+
)
|
273 |
+
|
274 |
+
if args.use_prompt_extend:
|
275 |
+
if args.prompt_extend_method == "dashscope":
|
276 |
+
prompt_expander = DashScopePromptExpander(
|
277 |
+
model_name=args.prompt_extend_model, is_vl="i2v" in args.task)
|
278 |
+
elif args.prompt_extend_method == "local_qwen":
|
279 |
+
prompt_expander = QwenPromptExpander(
|
280 |
+
model_name=args.prompt_extend_model,
|
281 |
+
is_vl="i2v" in args.task,
|
282 |
+
device=rank)
|
283 |
+
else:
|
284 |
+
raise NotImplementedError(
|
285 |
+
f"Unsupport prompt_extend_method: {args.prompt_extend_method}")
|
286 |
+
|
287 |
+
cfg = WAN_CONFIGS[args.task]
|
288 |
+
if args.ulysses_size > 1:
|
289 |
+
assert cfg.num_heads % args.ulysses_size == 0, f"`num_heads` must be divisible by `ulysses_size`."
|
290 |
+
|
291 |
+
logging.info(f"Generation job args: {args}")
|
292 |
+
logging.info(f"Generation model config: {cfg}")
|
293 |
+
|
294 |
+
if dist.is_initialized():
|
295 |
+
base_seed = [args.base_seed] if rank == 0 else [None]
|
296 |
+
dist.broadcast_object_list(base_seed, src=0)
|
297 |
+
args.base_seed = base_seed[0]
|
298 |
+
|
299 |
+
if "t2v" in args.task or "t2i" in args.task:
|
300 |
+
opt_dir=args.image
|
301 |
+
with open(args.prompt,"r")as f:
|
302 |
+
lines=f.read().strip("\n").split("\n")
|
303 |
+
# if args.prompt is None:
|
304 |
+
# args.prompt = EXAMPLE_PROMPT[args.task]["prompt"]
|
305 |
+
|
306 |
+
logging.info("Creating WanT2V pipeline.")
|
307 |
+
# wan_t2v = wan.WanT2V(
|
308 |
+
wan_t2v = WanT2V(
|
309 |
+
config=cfg,
|
310 |
+
checkpoint_dir=args.ckpt_dir,
|
311 |
+
device_id=device,
|
312 |
+
rank=rank,
|
313 |
+
t5_fsdp=args.t5_fsdp,
|
314 |
+
dit_fsdp=args.dit_fsdp,
|
315 |
+
use_usp=(args.ulysses_size > 1 or args.ring_size > 1),
|
316 |
+
t5_cpu=args.t5_cpu,
|
317 |
+
)
|
318 |
+
for idx,line in enumerate(lines):
|
319 |
+
args.save_file="%s/%s.mp4"%(opt_dir,idx)
|
320 |
+
prompt,image=line.split("@@")
|
321 |
+
args.image=image
|
322 |
+
args.prompt=prompt
|
323 |
+
logging.info(f"Input prompt: {args.prompt}")
|
324 |
+
if args.use_prompt_extend:
|
325 |
+
logging.info("Extending prompt ...")
|
326 |
+
if rank == 0:
|
327 |
+
prompt_output = prompt_expander(
|
328 |
+
args.prompt,
|
329 |
+
tar_lang=args.prompt_extend_target_lang,
|
330 |
+
seed=args.base_seed)
|
331 |
+
if prompt_output.status == False:
|
332 |
+
logging.info(
|
333 |
+
f"Extending prompt failed: {prompt_output.message}")
|
334 |
+
logging.info("Falling back to original prompt.")
|
335 |
+
input_prompt = args.prompt
|
336 |
+
else:
|
337 |
+
input_prompt = prompt_output.prompt
|
338 |
+
input_prompt = [input_prompt]
|
339 |
+
else:
|
340 |
+
input_prompt = [None]
|
341 |
+
if dist.is_initialized():
|
342 |
+
dist.broadcast_object_list(input_prompt, src=0)
|
343 |
+
args.prompt = input_prompt[0]
|
344 |
+
logging.info(f"Extended prompt: {args.prompt}")
|
345 |
+
|
346 |
+
logging.info(
|
347 |
+
f"Generating {'image' if 't2i' in args.task else 'video'} ...")
|
348 |
+
video = wan_t2v.generate(
|
349 |
+
args.prompt,
|
350 |
+
size=SIZE_CONFIGS[args.size],
|
351 |
+
frame_num=args.frame_num,
|
352 |
+
shift=args.sample_shift,
|
353 |
+
sample_solver=args.sample_solver,
|
354 |
+
sampling_steps=args.sample_steps,
|
355 |
+
guide_scale=args.sample_guide_scale,
|
356 |
+
seed=args.base_seed,
|
357 |
+
offload_model=args.offload_model)
|
358 |
+
if rank==0:
|
359 |
+
cache_video(
|
360 |
+
tensor=video[None],
|
361 |
+
save_file=args.save_file,
|
362 |
+
fps=cfg.sample_fps,
|
363 |
+
nrow=1,
|
364 |
+
normalize=True,
|
365 |
+
value_range=(-1, 1))
|
366 |
+
else:
|
367 |
+
if args.prompt is None:
|
368 |
+
args.prompt = EXAMPLE_PROMPT[args.task]["prompt"]
|
369 |
+
if args.image is None:
|
370 |
+
args.image = EXAMPLE_PROMPT[args.task]["image"]
|
371 |
+
logging.info(f"Input prompt: {args.prompt}")
|
372 |
+
logging.info(f"Input image: {args.image}")
|
373 |
+
|
374 |
+
opt_dir=args.image
|
375 |
+
with open(args.prompt,"r",encoding="gbk")as f:
|
376 |
+
lines=f.read().strip("\n").split("\n")
|
377 |
+
logging.info("Creating WanI2V pipeline.")
|
378 |
+
# wan_i2v = wan.WanI2V(
|
379 |
+
wan_i2v = WanI2V(
|
380 |
+
config=cfg,
|
381 |
+
checkpoint_dir=args.ckpt_dir,
|
382 |
+
device_id=device,
|
383 |
+
rank=rank,
|
384 |
+
t5_fsdp=args.t5_fsdp,
|
385 |
+
dit_fsdp=args.dit_fsdp,
|
386 |
+
use_usp=(args.ulysses_size > 1 or args.ring_size > 1),
|
387 |
+
t5_cpu=args.t5_cpu,
|
388 |
+
)
|
389 |
+
|
390 |
+
for idx,line in enumerate(lines):
|
391 |
+
# args.save_file="%s/%s.mp4"%(opt_dir,idx)
|
392 |
+
args.save_file="%s-%s.mp4"%(opt_dir,idx)
|
393 |
+
prompt,image=line.split("@@")
|
394 |
+
args.image=image
|
395 |
+
args.prompt=prompt
|
396 |
+
img = Image.open(args.image).convert("RGB")
|
397 |
+
if args.use_prompt_extend:
|
398 |
+
logging.info("Extending prompt ...")
|
399 |
+
if rank == 0:
|
400 |
+
prompt_output = prompt_expander(
|
401 |
+
args.prompt,
|
402 |
+
tar_lang=args.prompt_extend_target_lang,
|
403 |
+
image=img,
|
404 |
+
seed=args.base_seed)
|
405 |
+
if prompt_output.status == False:
|
406 |
+
logging.info(
|
407 |
+
f"Extending prompt failed: {prompt_output.message}")
|
408 |
+
logging.info("Falling back to original prompt.")
|
409 |
+
input_prompt = args.prompt
|
410 |
+
else:
|
411 |
+
input_prompt = prompt_output.prompt
|
412 |
+
input_prompt = [input_prompt]
|
413 |
+
else:
|
414 |
+
input_prompt = [None]
|
415 |
+
if dist.is_initialized():
|
416 |
+
dist.broadcast_object_list(input_prompt, src=0)
|
417 |
+
args.prompt = input_prompt[0]
|
418 |
+
logging.info(f"Extended prompt: {args.prompt}")
|
419 |
+
logging.info("Generating video ...")
|
420 |
+
if os.path.exists(args.save_file)==False:
|
421 |
+
video = wan_i2v.generate(
|
422 |
+
args,
|
423 |
+
args.prompt,
|
424 |
+
img,
|
425 |
+
max_area=MAX_AREA_CONFIGS[args.size],
|
426 |
+
frame_num=args.frame_num,
|
427 |
+
shift=args.sample_shift,
|
428 |
+
sample_solver=args.sample_solver,
|
429 |
+
sampling_steps=args.sample_steps,
|
430 |
+
guide_scale=args.sample_guide_scale,
|
431 |
+
seed=args.base_seed,
|
432 |
+
offload_model=args.offload_model,
|
433 |
+
|
434 |
+
student_steps=args.student_steps,#12,
|
435 |
+
norm=2,
|
436 |
+
frame_type="4",
|
437 |
+
channel_type="all",
|
438 |
+
|
439 |
+
)
|
440 |
+
if rank==0:
|
441 |
+
cache_video(
|
442 |
+
tensor=video[None],
|
443 |
+
save_file=args.save_file,
|
444 |
+
fps=cfg.sample_fps,
|
445 |
+
nrow=1,
|
446 |
+
normalize=True,
|
447 |
+
value_range=(-1, 1))
|
448 |
+
logging.info("Finished.")
|
449 |
+
|
450 |
+
|
451 |
+
if __name__ == "__main__":
|
452 |
+
args = _parse_args()
|
453 |
+
generate(args)
|
generate-pi-i2v.py
ADDED
@@ -0,0 +1,418 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
4 |
+
import os
|
5 |
+
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
|
6 |
+
import argparse
|
7 |
+
from datetime import datetime
|
8 |
+
import logging
|
9 |
+
import sys
|
10 |
+
import warnings
|
11 |
+
|
12 |
+
warnings.filterwarnings('ignore')
|
13 |
+
|
14 |
+
import torch, random
|
15 |
+
import torch.distributed as dist
|
16 |
+
from PIL import Image
|
17 |
+
|
18 |
+
import wan
|
19 |
+
from wan.configs import WAN_CONFIGS, SIZE_CONFIGS, MAX_AREA_CONFIGS, SUPPORTED_SIZES
|
20 |
+
from wan.utils.prompt_extend import DashScopePromptExpander, QwenPromptExpander
|
21 |
+
from wan.utils.utils import cache_video, cache_image, str2bool
|
22 |
+
|
23 |
+
EXAMPLE_PROMPT = {
|
24 |
+
"t2v-1.3B": {
|
25 |
+
"prompt": "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage.",
|
26 |
+
},
|
27 |
+
"t2v-14B": {
|
28 |
+
"prompt": "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage.",
|
29 |
+
},
|
30 |
+
"t2i-14B": {
|
31 |
+
"prompt": "一个朴素端庄的美人",
|
32 |
+
},
|
33 |
+
"i2v-14B": {
|
34 |
+
"prompt":
|
35 |
+
"Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside.",
|
36 |
+
"image":
|
37 |
+
"examples/i2v_input.JPG",
|
38 |
+
},
|
39 |
+
}
|
40 |
+
|
41 |
+
|
42 |
+
def _validate_args(args):
|
43 |
+
# Basic check
|
44 |
+
assert args.ckpt_dir is not None, "Please specify the checkpoint directory."
|
45 |
+
assert args.task in WAN_CONFIGS, f"Unsupport task: {args.task}"
|
46 |
+
assert args.task in EXAMPLE_PROMPT, f"Unsupport task: {args.task}"
|
47 |
+
|
48 |
+
# The default sampling steps are 40 for image-to-video tasks and 50 for text-to-video tasks.
|
49 |
+
if args.sample_steps is None:
|
50 |
+
args.sample_steps = 40 if "i2v" in args.task else 50
|
51 |
+
|
52 |
+
if args.sample_shift is None:
|
53 |
+
args.sample_shift = 5.0
|
54 |
+
if "i2v" in args.task and args.size in ["832*480", "480*832"]:
|
55 |
+
args.sample_shift = 3.0
|
56 |
+
|
57 |
+
# The default number of frames are 1 for text-to-image tasks and 81 for other tasks.
|
58 |
+
if args.frame_num is None:
|
59 |
+
args.frame_num = 1 if "t2i" in args.task else 81
|
60 |
+
|
61 |
+
# T2I frame_num check
|
62 |
+
if "t2i" in args.task:
|
63 |
+
assert args.frame_num == 1, f"Unsupport frame_num {args.frame_num} for task {args.task}"
|
64 |
+
|
65 |
+
args.base_seed = args.base_seed if args.base_seed >= 0 else random.randint(
|
66 |
+
0, sys.maxsize)
|
67 |
+
# Size check
|
68 |
+
assert args.size in SUPPORTED_SIZES[
|
69 |
+
args.
|
70 |
+
task], f"Unsupport size {args.size} for task {args.task}, supported sizes are: {', '.join(SUPPORTED_SIZES[args.task])}"
|
71 |
+
|
72 |
+
|
73 |
+
def _parse_args():
|
74 |
+
parser = argparse.ArgumentParser(
|
75 |
+
description="Generate a image or video from a text prompt or image using Wan"
|
76 |
+
)
|
77 |
+
parser.add_argument(
|
78 |
+
"--task",
|
79 |
+
type=str,
|
80 |
+
default="t2v-14B",
|
81 |
+
choices=list(WAN_CONFIGS.keys()),
|
82 |
+
help="The task to run.")
|
83 |
+
parser.add_argument(
|
84 |
+
"--size",
|
85 |
+
type=str,
|
86 |
+
default="1280*720",
|
87 |
+
choices=list(SIZE_CONFIGS.keys()),
|
88 |
+
help="The area (width*height) of the generated video. For the I2V task, the aspect ratio of the output video will follow that of the input image."
|
89 |
+
)
|
90 |
+
parser.add_argument(
|
91 |
+
"--frame_num",
|
92 |
+
type=int,
|
93 |
+
default=None,
|
94 |
+
help="How many frames to sample from a image or video. The number should be 4n+1"
|
95 |
+
)
|
96 |
+
parser.add_argument(
|
97 |
+
"--ckpt_dir",
|
98 |
+
type=str,
|
99 |
+
default=None,
|
100 |
+
help="The path to the checkpoint directory.")
|
101 |
+
parser.add_argument(
|
102 |
+
"--offload_model",
|
103 |
+
type=str2bool,
|
104 |
+
default=None,
|
105 |
+
help="Whether to offload the model to CPU after each model forward, reducing GPU memory usage."
|
106 |
+
)
|
107 |
+
parser.add_argument(
|
108 |
+
"--ulysses_size",
|
109 |
+
type=int,
|
110 |
+
default=1,
|
111 |
+
help="The size of the ulysses parallelism in DiT.")
|
112 |
+
parser.add_argument(
|
113 |
+
"--ring_size",
|
114 |
+
type=int,
|
115 |
+
default=1,
|
116 |
+
help="The size of the ring attention parallelism in DiT.")
|
117 |
+
parser.add_argument(
|
118 |
+
"--t5_fsdp",
|
119 |
+
action="store_true",
|
120 |
+
default=False,
|
121 |
+
help="Whether to use FSDP for T5.")
|
122 |
+
parser.add_argument(
|
123 |
+
"--t5_cpu",
|
124 |
+
action="store_true",
|
125 |
+
default=False,
|
126 |
+
help="Whether to place T5 model on CPU.")
|
127 |
+
parser.add_argument(
|
128 |
+
"--dit_fsdp",
|
129 |
+
action="store_true",
|
130 |
+
default=False,
|
131 |
+
help="Whether to use FSDP for DiT.")
|
132 |
+
parser.add_argument(
|
133 |
+
"--save_file",
|
134 |
+
type=str,
|
135 |
+
default=None,
|
136 |
+
help="The file to save the generated image or video to.")
|
137 |
+
parser.add_argument(
|
138 |
+
"--prompt",
|
139 |
+
type=str,
|
140 |
+
default=None,
|
141 |
+
help="The prompt to generate the image or video from.")
|
142 |
+
parser.add_argument(
|
143 |
+
"--use_prompt_extend",
|
144 |
+
action="store_true",
|
145 |
+
default=False,
|
146 |
+
help="Whether to use prompt extend.")
|
147 |
+
parser.add_argument(
|
148 |
+
"--prompt_extend_method",
|
149 |
+
type=str,
|
150 |
+
default="local_qwen",
|
151 |
+
choices=["dashscope", "local_qwen"],
|
152 |
+
help="The prompt extend method to use.")
|
153 |
+
parser.add_argument(
|
154 |
+
"--prompt_extend_model",
|
155 |
+
type=str,
|
156 |
+
default=None,
|
157 |
+
help="The prompt extend model to use.")
|
158 |
+
parser.add_argument(
|
159 |
+
"--prompt_extend_target_lang",
|
160 |
+
type=str,
|
161 |
+
default="ch",
|
162 |
+
choices=["ch", "en"],
|
163 |
+
help="The target language of prompt extend.")
|
164 |
+
parser.add_argument(
|
165 |
+
"--base_seed",
|
166 |
+
type=int,
|
167 |
+
default=-1,
|
168 |
+
help="The seed to use for generating the image or video.")
|
169 |
+
parser.add_argument(
|
170 |
+
"--image",
|
171 |
+
type=str,
|
172 |
+
default=None,
|
173 |
+
help="The image to generate the video from.")
|
174 |
+
parser.add_argument(
|
175 |
+
"--sample_solver",
|
176 |
+
type=str,
|
177 |
+
default='unipc',
|
178 |
+
choices=['unipc', 'dpm++'],
|
179 |
+
help="The solver used to sample.")
|
180 |
+
parser.add_argument(
|
181 |
+
"--sample_steps", type=int, default=None, help="The sampling steps.")
|
182 |
+
parser.add_argument(
|
183 |
+
"--sample_shift",
|
184 |
+
type=float,
|
185 |
+
default=None,
|
186 |
+
help="Sampling shift factor for flow matching schedulers.")
|
187 |
+
parser.add_argument(
|
188 |
+
"--sample_guide_scale",
|
189 |
+
type=float,
|
190 |
+
default=5.0,
|
191 |
+
help="Classifier free guidance scale.")
|
192 |
+
|
193 |
+
args = parser.parse_args()
|
194 |
+
|
195 |
+
_validate_args(args)
|
196 |
+
|
197 |
+
return args
|
198 |
+
|
199 |
+
|
200 |
+
def _init_logging(rank):
|
201 |
+
# logging
|
202 |
+
if rank == 0:
|
203 |
+
# set format
|
204 |
+
logging.basicConfig(
|
205 |
+
level=logging.INFO,
|
206 |
+
format="[%(asctime)s] %(levelname)s: %(message)s",
|
207 |
+
handlers=[logging.StreamHandler(stream=sys.stdout)])
|
208 |
+
else:
|
209 |
+
logging.basicConfig(level=logging.ERROR)
|
210 |
+
|
211 |
+
|
212 |
+
def generate(args):
|
213 |
+
rank = int(os.getenv("RANK", 0))
|
214 |
+
world_size = int(os.getenv("WORLD_SIZE", 1))
|
215 |
+
local_rank = int(os.getenv("LOCAL_RANK", 0))
|
216 |
+
device = local_rank
|
217 |
+
_init_logging(rank)
|
218 |
+
|
219 |
+
if args.offload_model is None:
|
220 |
+
args.offload_model = False if world_size > 1 else True
|
221 |
+
logging.info(
|
222 |
+
f"offload_model is not specified, set to {args.offload_model}.")
|
223 |
+
if world_size > 1:
|
224 |
+
torch.cuda.set_device(local_rank)
|
225 |
+
dist.init_process_group(
|
226 |
+
backend="nccl",
|
227 |
+
init_method="env://",
|
228 |
+
rank=rank,
|
229 |
+
world_size=world_size)
|
230 |
+
else:
|
231 |
+
assert not (
|
232 |
+
args.t5_fsdp or args.dit_fsdp
|
233 |
+
), f"t5_fsdp and dit_fsdp are not supported in non-distributed environments."
|
234 |
+
assert not (
|
235 |
+
args.ulysses_size > 1 or args.ring_size > 1
|
236 |
+
), f"context parallel are not supported in non-distributed environments."
|
237 |
+
|
238 |
+
if args.ulysses_size > 1 or args.ring_size > 1:
|
239 |
+
assert args.ulysses_size * args.ring_size == world_size, f"The number of ulysses_size and ring_size should be equal to the world size."
|
240 |
+
from xfuser.core.distributed import (initialize_model_parallel,
|
241 |
+
init_distributed_environment)
|
242 |
+
init_distributed_environment(
|
243 |
+
rank=dist.get_rank(), world_size=dist.get_world_size())
|
244 |
+
|
245 |
+
initialize_model_parallel(
|
246 |
+
sequence_parallel_degree=dist.get_world_size(),
|
247 |
+
ring_degree=args.ring_size,
|
248 |
+
ulysses_degree=args.ulysses_size,
|
249 |
+
)
|
250 |
+
|
251 |
+
if args.use_prompt_extend:
|
252 |
+
if args.prompt_extend_method == "dashscope":
|
253 |
+
prompt_expander = DashScopePromptExpander(
|
254 |
+
model_name=args.prompt_extend_model, is_vl="i2v" in args.task)
|
255 |
+
elif args.prompt_extend_method == "local_qwen":
|
256 |
+
prompt_expander = QwenPromptExpander(
|
257 |
+
model_name=args.prompt_extend_model,
|
258 |
+
is_vl="i2v" in args.task,
|
259 |
+
device=rank)
|
260 |
+
else:
|
261 |
+
raise NotImplementedError(
|
262 |
+
f"Unsupport prompt_extend_method: {args.prompt_extend_method}")
|
263 |
+
|
264 |
+
cfg = WAN_CONFIGS[args.task]
|
265 |
+
if args.ulysses_size > 1:
|
266 |
+
assert cfg.num_heads % args.ulysses_size == 0, f"`num_heads` must be divisible by `ulysses_size`."
|
267 |
+
|
268 |
+
logging.info(f"Generation job args: {args}")
|
269 |
+
logging.info(f"Generation model config: {cfg}")
|
270 |
+
|
271 |
+
if dist.is_initialized():
|
272 |
+
base_seed = [args.base_seed] if rank == 0 else [None]
|
273 |
+
dist.broadcast_object_list(base_seed, src=0)
|
274 |
+
args.base_seed = base_seed[0]
|
275 |
+
|
276 |
+
if "t2v" in args.task or "t2i" in args.task:
|
277 |
+
opt_dir=args.image
|
278 |
+
with open(args.prompt,"r")as f:
|
279 |
+
lines=f.read().strip("\n").split("\n")
|
280 |
+
# if args.prompt is None:
|
281 |
+
# args.prompt = EXAMPLE_PROMPT[args.task]["prompt"]
|
282 |
+
for idx,line in enumerate(lines):
|
283 |
+
args.save_file="%s/%s.mp4"%(opt_dir,idx)
|
284 |
+
prompt,image=line.split("@@")
|
285 |
+
args.image=image
|
286 |
+
args.prompt=prompt
|
287 |
+
logging.info(f"Input prompt: {args.prompt}")
|
288 |
+
if args.use_prompt_extend:
|
289 |
+
logging.info("Extending prompt ...")
|
290 |
+
if rank == 0:
|
291 |
+
prompt_output = prompt_expander(
|
292 |
+
args.prompt,
|
293 |
+
tar_lang=args.prompt_extend_target_lang,
|
294 |
+
seed=args.base_seed)
|
295 |
+
if prompt_output.status == False:
|
296 |
+
logging.info(
|
297 |
+
f"Extending prompt failed: {prompt_output.message}")
|
298 |
+
logging.info("Falling back to original prompt.")
|
299 |
+
input_prompt = args.prompt
|
300 |
+
else:
|
301 |
+
input_prompt = prompt_output.prompt
|
302 |
+
input_prompt = [input_prompt]
|
303 |
+
else:
|
304 |
+
input_prompt = [None]
|
305 |
+
if dist.is_initialized():
|
306 |
+
dist.broadcast_object_list(input_prompt, src=0)
|
307 |
+
args.prompt = input_prompt[0]
|
308 |
+
logging.info(f"Extended prompt: {args.prompt}")
|
309 |
+
|
310 |
+
logging.info("Creating WanT2V pipeline.")
|
311 |
+
wan_t2v = wan.WanT2V(
|
312 |
+
config=cfg,
|
313 |
+
checkpoint_dir=args.ckpt_dir,
|
314 |
+
device_id=device,
|
315 |
+
rank=rank,
|
316 |
+
t5_fsdp=args.t5_fsdp,
|
317 |
+
dit_fsdp=args.dit_fsdp,
|
318 |
+
use_usp=(args.ulysses_size > 1 or args.ring_size > 1),
|
319 |
+
t5_cpu=args.t5_cpu,
|
320 |
+
)
|
321 |
+
logging.info(
|
322 |
+
f"Generating {'image' if 't2i' in args.task else 'video'} ...")
|
323 |
+
video = wan_t2v.generate(
|
324 |
+
args.prompt,
|
325 |
+
size=SIZE_CONFIGS[args.size],
|
326 |
+
frame_num=args.frame_num,
|
327 |
+
shift=args.sample_shift,
|
328 |
+
sample_solver=args.sample_solver,
|
329 |
+
sampling_steps=args.sample_steps,
|
330 |
+
guide_scale=args.sample_guide_scale,
|
331 |
+
seed=args.base_seed,
|
332 |
+
offload_model=args.offload_model)
|
333 |
+
if rank==0:
|
334 |
+
cache_video(
|
335 |
+
tensor=video[None],
|
336 |
+
save_file=args.save_file,
|
337 |
+
fps=cfg.sample_fps,
|
338 |
+
nrow=1,
|
339 |
+
normalize=True,
|
340 |
+
value_range=(-1, 1))
|
341 |
+
else:
|
342 |
+
if args.prompt is None:
|
343 |
+
args.prompt = EXAMPLE_PROMPT[args.task]["prompt"]
|
344 |
+
if args.image is None:
|
345 |
+
args.image = EXAMPLE_PROMPT[args.task]["image"]
|
346 |
+
logging.info(f"Input prompt: {args.prompt}")
|
347 |
+
logging.info(f"Input image: {args.image}")
|
348 |
+
|
349 |
+
opt_dir=args.image
|
350 |
+
with open(args.prompt,"r",encoding="gbk")as f:
|
351 |
+
lines=f.read().strip("\n").split("\n")
|
352 |
+
logging.info("Creating WanI2V pipeline.")
|
353 |
+
wan_i2v = wan.WanI2V(
|
354 |
+
config=cfg,
|
355 |
+
checkpoint_dir=args.ckpt_dir,
|
356 |
+
device_id=device,
|
357 |
+
rank=rank,
|
358 |
+
t5_fsdp=args.t5_fsdp,
|
359 |
+
dit_fsdp=args.dit_fsdp,
|
360 |
+
use_usp=(args.ulysses_size > 1 or args.ring_size > 1),
|
361 |
+
t5_cpu=args.t5_cpu,
|
362 |
+
)
|
363 |
+
|
364 |
+
for idx,line in enumerate(lines):
|
365 |
+
args.save_file="%s/%s.mp4"%(opt_dir,idx)
|
366 |
+
prompt,image=line.split("@@")
|
367 |
+
args.image=image
|
368 |
+
args.prompt=prompt
|
369 |
+
img = Image.open(args.image).convert("RGB")
|
370 |
+
if args.use_prompt_extend:
|
371 |
+
logging.info("Extending prompt ...")
|
372 |
+
if rank == 0:
|
373 |
+
prompt_output = prompt_expander(
|
374 |
+
args.prompt,
|
375 |
+
tar_lang=args.prompt_extend_target_lang,
|
376 |
+
image=img,
|
377 |
+
seed=args.base_seed)
|
378 |
+
if prompt_output.status == False:
|
379 |
+
logging.info(
|
380 |
+
f"Extending prompt failed: {prompt_output.message}")
|
381 |
+
logging.info("Falling back to original prompt.")
|
382 |
+
input_prompt = args.prompt
|
383 |
+
else:
|
384 |
+
input_prompt = prompt_output.prompt
|
385 |
+
input_prompt = [input_prompt]
|
386 |
+
else:
|
387 |
+
input_prompt = [None]
|
388 |
+
if dist.is_initialized():
|
389 |
+
dist.broadcast_object_list(input_prompt, src=0)
|
390 |
+
args.prompt = input_prompt[0]
|
391 |
+
logging.info(f"Extended prompt: {args.prompt}")
|
392 |
+
logging.info("Generating video ...")
|
393 |
+
if os.path.exists(args.save_file)==False:
|
394 |
+
video = wan_i2v.generate(
|
395 |
+
args.prompt,
|
396 |
+
img,
|
397 |
+
max_area=MAX_AREA_CONFIGS[args.size],
|
398 |
+
frame_num=args.frame_num,
|
399 |
+
shift=args.sample_shift,
|
400 |
+
sample_solver=args.sample_solver,
|
401 |
+
sampling_steps=args.sample_steps,
|
402 |
+
guide_scale=args.sample_guide_scale,
|
403 |
+
seed=args.base_seed,
|
404 |
+
offload_model=args.offload_model)
|
405 |
+
if rank==0:
|
406 |
+
cache_video(
|
407 |
+
tensor=video[None],
|
408 |
+
save_file=args.save_file,
|
409 |
+
fps=cfg.sample_fps,
|
410 |
+
nrow=1,
|
411 |
+
normalize=True,
|
412 |
+
value_range=(-1, 1))
|
413 |
+
logging.info("Finished.")
|
414 |
+
|
415 |
+
|
416 |
+
if __name__ == "__main__":
|
417 |
+
args = _parse_args()
|
418 |
+
generate(args)
|
get-med.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
datas=[
|
2 |
+
[3, 12, 38, 55, 68, 72, 76, 79, 82, 84, 87, 89, 91, 92, 94, 96],
|
3 |
+
[2, 5, 9, 16, 29, 42, 58, 68, 75, 79, 83, 88, 92, 94, 95, 96],
|
4 |
+
[1, 3, 7, 13, 24, 36, 44, 54, 63, 71, 78, 85, 89, 93, 95, 96],
|
5 |
+
[3, 10, 39, 60, 73, 77, 81, 84, 86, 88, 90, 92, 93, 94, 95, 96],
|
6 |
+
[2, 5, 11, 21, 36, 51, 65, 73, 80, 85, 88, 91, 93, 94, 95, 96],
|
7 |
+
[1, 3, 6, 11, 20, 36, 49, 65, 71, 77, 83, 87, 91, 94, 95, 96],
|
8 |
+
[2, 6, 15, 26, 39, 49, 57, 64, 70, 74, 80, 86, 90, 93, 95, 96],
|
9 |
+
[1, 4, 9, 17, 30, 49, 63, 71, 81, 86, 89, 91, 93, 94, 95, 96],
|
10 |
+
[4, 13, 29, 45, 58, 67, 73, 76, 80, 84, 88, 90, 92, 94, 95, 96],
|
11 |
+
]
|
12 |
+
def calculate_median(arr):
|
13 |
+
sorted_arr = sorted(arr)
|
14 |
+
n = len(sorted_arr)
|
15 |
+
middle_index = n // 2
|
16 |
+
return sorted_arr[middle_index]
|
17 |
+
|
18 |
+
opt=[]
|
19 |
+
leng=len(datas[0])
|
20 |
+
len_datas=len(datas)
|
21 |
+
for i in range(leng):
|
22 |
+
tmp=[datas[j][i]for j in range(len_datas)]
|
23 |
+
opt.append(calculate_median(tmp))
|
24 |
+
print(opt)
|
25 |
+
#544P#96-12#[5, 20, 54, 63, 71, 79, 82, 87, 91, 94, 95, 96]
|
note-webui.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
python generate-pi-i2v-app-os.py --task i2v-14B --size 960*544 --ckpt_dir /data/docker/eryu51mmd
|
2 |
+
|
3 |
+
|
4 |
+
|
5 |
+
|
6 |
+
python generate-pi-i2v-app-os.py --task i2v-14B --size 960*544 --ckpt_dir /data/docker/eryu-mysp2v2-500/lns
|
7 |
+
python generate-pi-i2v-app-os.py --task i2v-14B --size 960*544 --ckpt_dir /data/docker/eryu0417
|
8 |
+
|
9 |
+
todo:
|
10 |
+
add motion
|
preprocess/extract-clip.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
nohup python preprocess/extract-clip.py 0 2 >> clip.log 2>&1 &
|
3 |
+
nohup python preprocess/extract-clip.py 1 2 >> clip.log 2>&1 &
|
4 |
+
|
5 |
+
0/1: What part does this process do
|
6 |
+
2: It consists of two parts in total.
|
7 |
+
'''
|
8 |
+
root_mp4s="test_data/mp4root"
|
9 |
+
h,w=480,832
|
10 |
+
opt_root="output_root/clip"
|
11 |
+
checkpoint_dir="/DATA/bvac/personal/wan21/Wan2.1-I2V-14B-720P"
|
12 |
+
|
13 |
+
|
14 |
+
import os,sys,traceback
|
15 |
+
import pdb
|
16 |
+
all=int(sys.argv[2])
|
17 |
+
i_part=int(sys.argv[1])
|
18 |
+
# os.environ["CUDA_VISIBLE_DEVICES"]=str(int(sys.argv[1])%4)
|
19 |
+
os.environ["CUDA_VISIBLE_DEVICES"]=sys.argv[1]
|
20 |
+
import pdb,torch
|
21 |
+
from wan.modules.clip import CLIPModel
|
22 |
+
device="cuda"
|
23 |
+
clip = CLIPModel(
|
24 |
+
dtype=torch.float16,
|
25 |
+
device=device,
|
26 |
+
checkpoint_path=os.path.join(checkpoint_dir,'models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth'),
|
27 |
+
tokenizer_path=os.path.join(checkpoint_dir, 'xlm-roberta-large'))
|
28 |
+
|
29 |
+
clip.model = clip.model.to(device)
|
30 |
+
from decord import VideoReader
|
31 |
+
import torchvision.transforms.functional as TF
|
32 |
+
def read_img(path):
|
33 |
+
vr = VideoReader(uri=path, height=-1, width=-1)
|
34 |
+
temp_frms = vr.get_batch([2])
|
35 |
+
return (TF.to_tensor(temp_frms.asnumpy().astype("float32")[0])/255).sub_(0.5).div_(0.5).to(device)
|
36 |
+
|
37 |
+
os.makedirs(opt_root,exist_ok=True)
|
38 |
+
def go(todos):
|
39 |
+
for path in todos:
|
40 |
+
try:
|
41 |
+
name=os.path.basename(path).replace(".mp4",".pt")
|
42 |
+
if os.path.exists("%s/%s"%(opt_root,name)):continue
|
43 |
+
img = read_img(path)
|
44 |
+
clip_context = clip.visual([img[:, None, :, :]])
|
45 |
+
save_path="%s/%s"%(opt_root,name)
|
46 |
+
torch.save(clip_context, save_path)
|
47 |
+
except:
|
48 |
+
print(path,traceback.format_exc())
|
49 |
+
|
50 |
+
todo=[]
|
51 |
+
for name in os.listdir(root_mp4s):
|
52 |
+
todo.append("%s/%s"%(root_mp4s,name))
|
53 |
+
todo=sorted(todo)
|
54 |
+
todo=todo[i_part::all]
|
55 |
+
go(todo)
|
56 |
+
|
57 |
+
|
preprocess/extract-t5.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
nohup python preprocess/extract-t5.py 0 2 >> t5.log 2>&1 &
|
3 |
+
nohup python preprocess/extract-t5.py 1 2 >> t5.log 2>&1 &
|
4 |
+
|
5 |
+
0/1: What part does this process do
|
6 |
+
2: It consists of two parts in total.
|
7 |
+
'''
|
8 |
+
txt_path="test_data/train_data_prompts.txt"
|
9 |
+
opt_root="output_root/t5"
|
10 |
+
checkpoint_dir="/DATA/bvac/personal/wan21/Wan2.1-I2V-14B-720P"
|
11 |
+
|
12 |
+
import os,sys
|
13 |
+
all=int(sys.argv[2])
|
14 |
+
i_part=int(sys.argv[1])
|
15 |
+
# os.environ["CUDA_VISIBLE_DEVICES"]=str(int(sys.argv[1])%4)
|
16 |
+
os.environ["CUDA_VISIBLE_DEVICES"]=sys.argv[1]
|
17 |
+
import pdb,torch
|
18 |
+
from wan.modules.t5 import T5EncoderModel
|
19 |
+
device="cuda"
|
20 |
+
text_encoder = T5EncoderModel(
|
21 |
+
text_len=512,
|
22 |
+
dtype=torch.bfloat16,
|
23 |
+
device=torch.device('cpu'),
|
24 |
+
checkpoint_path=os.path.join(checkpoint_dir, 'models_t5_umt5-xxl-enc-bf16.pth'),
|
25 |
+
tokenizer_path=os.path.join(checkpoint_dir, 'google/umt5-xxl'),
|
26 |
+
shard_fn=None,
|
27 |
+
)
|
28 |
+
text_encoder.model=text_encoder.model.to(device)
|
29 |
+
os.makedirs(opt_root,exist_ok=True)
|
30 |
+
def go(todos):
|
31 |
+
for name,text in todos:
|
32 |
+
try:
|
33 |
+
if os.path.exists("%s/%s"%(opt_root,name)):continue
|
34 |
+
context = text_encoder([text], device)[0].cpu()#torch.Size([138, 4096])#"In this scene, a man with a beard is seen tending to a woman who lies in bed, her face illuminated by the soft glow of a nearby light source. The man, dressed in a blue robe adorned with intricate designs, holds a bowl, possibly containing a healing potion or a magical elixir. The woman, clad in a pink garment, appears to be resting or possibly unwell, as she lies on her side with her eyes closed. The setting suggests a historical or medieval context, with the dimly lit room and the man's attire evoking a sense of timelessness and mystery. "
|
35 |
+
save_path="%s/%s"%(opt_root,name)
|
36 |
+
torch.save(context,save_path)
|
37 |
+
except:
|
38 |
+
print(text,traceback.format_exc())
|
39 |
+
|
40 |
+
|
41 |
+
todo=[]
|
42 |
+
with open(txt_path,"r")as f:lines=f.read().strip("\n").split("\n")
|
43 |
+
for line in lines:
|
44 |
+
todo.append(line.split("|"))
|
45 |
+
todo=sorted(todo)[i_part::all]
|
46 |
+
go(todo)
|
preprocess/extract-vae1.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
nohup python preprocess/extract-vae1.py 0 2 >> vae1.log 2>&1 &
|
3 |
+
nohup python preprocess/extract-vae1.py 1 2 >> vae1.log 2>&1 &
|
4 |
+
|
5 |
+
0/1: What part does this process do
|
6 |
+
2: It consists of two parts in total.
|
7 |
+
'''
|
8 |
+
root_mp4s="test_data/mp4root"
|
9 |
+
h,w=480,832
|
10 |
+
num_frames = 49
|
11 |
+
opt_root="output_root/vae1"
|
12 |
+
checkpoint_dir="/DATA/bvac/personal/wan21/Wan2.1-I2V-14B-480P"
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
import os,sys,traceback
|
18 |
+
import pdb
|
19 |
+
os.environ["CUDA_VISIBLE_DEVICES"]=sys.argv[1]
|
20 |
+
all=int(sys.argv[2])
|
21 |
+
i_part=int(os.environ["CUDA_VISIBLE_DEVICES"])
|
22 |
+
import pdb,torch
|
23 |
+
from wan.modules.vae import WanVAE
|
24 |
+
device="cuda"
|
25 |
+
vae = WanVAE(vae_pth=os.path.join(checkpoint_dir, 'Wan2.1_VAE.pth'),device=device)
|
26 |
+
|
27 |
+
from decord import VideoReader
|
28 |
+
import torchvision.transforms.functional as TF
|
29 |
+
def read_img(path):
|
30 |
+
vr = VideoReader(uri=path, height=-1, width=-1)
|
31 |
+
temp_frms = vr.get_batch([2])
|
32 |
+
return (TF.to_tensor(temp_frms.asnumpy().astype("float32")[0])/255).sub_(0.5).div_(0.5).to(device)
|
33 |
+
|
34 |
+
os.makedirs(opt_root,exist_ok=True)
|
35 |
+
def go(todos):
|
36 |
+
for path in todos:
|
37 |
+
try:
|
38 |
+
name=os.path.basename(path).replace(".mp4",".pt")
|
39 |
+
if os.path.exists("%s/%s"%(opt_root,name)):continue
|
40 |
+
img = read_img(path)
|
41 |
+
tensorr = vae.encode([
|
42 |
+
torch.concat([
|
43 |
+
torch.nn.functional.interpolate(
|
44 |
+
img[None].cpu(), size=(h, w), mode='bicubic').transpose(
|
45 |
+
0, 1),
|
46 |
+
torch.zeros(3, num_frames-1, h, w)
|
47 |
+
], dim=1).to(device)
|
48 |
+
])[0].cpu() # torch.Size([16, 21, 90, 160])#21->13
|
49 |
+
save_path="%s/%s"%(opt_root,name)
|
50 |
+
torch.save(tensorr, save_path)
|
51 |
+
except:
|
52 |
+
print(path,traceback.format_exc())
|
53 |
+
|
54 |
+
todo=[]
|
55 |
+
for name in os.listdir(root_mp4s):
|
56 |
+
todo.append("%s/%s"%(root_mp4s,name))
|
57 |
+
todo=sorted(todo)
|
58 |
+
todo=todo[i_part::all]
|
59 |
+
go(todo)
|
60 |
+
|
61 |
+
|
62 |
+
|
preprocess/extract-vae_all.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
nohup python preprocess/extract-vae_all.py 0 2 >> t5.log 2>&1 &
|
3 |
+
nohup python preprocess/extract-vae_all.py 1 2 >> t5.log 2>&1 &
|
4 |
+
|
5 |
+
0/1: What part does this process do
|
6 |
+
2: It consists of two parts in total.
|
7 |
+
'''
|
8 |
+
root_mp4s="test_data/mp4root"
|
9 |
+
h,w=480,832
|
10 |
+
num_frames = 49
|
11 |
+
opt_root="output_root/vae_all"
|
12 |
+
checkpoint_dir="/DATA/bvac/personal/wan21/Wan2.1-I2V-14B-720P"
|
13 |
+
|
14 |
+
|
15 |
+
import os,sys,traceback,numpy as np
|
16 |
+
import pdb
|
17 |
+
os.environ["CUDA_VISIBLE_DEVICES"]=sys.argv[1]
|
18 |
+
all=int(sys.argv[2])
|
19 |
+
i_part=int(os.environ["CUDA_VISIBLE_DEVICES"])
|
20 |
+
import pdb,torch
|
21 |
+
from wan.modules.vae import WanVAE
|
22 |
+
device="cuda"
|
23 |
+
vae = WanVAE(vae_pth=os.path.join(checkpoint_dir, 'Wan2.1_VAE.pth'),device=device)
|
24 |
+
from decord import VideoReader
|
25 |
+
import torchvision.transforms.functional as TF
|
26 |
+
def read_img(path):
|
27 |
+
vr = VideoReader(uri=path, height=-1, width=-1)
|
28 |
+
actual_fps=vr.get_avg_fps()
|
29 |
+
start=2
|
30 |
+
wanted_fps=16
|
31 |
+
end = int(start + num_frames / wanted_fps * actual_fps)
|
32 |
+
indices = np.arange(start, end, (end - start) / num_frames).astype(int)
|
33 |
+
# print(100000,start,end,indices,len(indices),actual_fps,len(vr))
|
34 |
+
temp_frms = vr.get_batch(indices)
|
35 |
+
temp_frms = torch.from_numpy(temp_frms.asnumpy()).to(device).float()/255
|
36 |
+
temp_frms-=0.5
|
37 |
+
temp_frms/=0.5#torch.Size([49, 1080, 1920, 3])
|
38 |
+
temp_frms=temp_frms.permute(3,0,1,2)#torch.Size([3, 49, 1080, 1920])
|
39 |
+
return temp_frms
|
40 |
+
|
41 |
+
|
42 |
+
os.makedirs(opt_root,exist_ok=True)
|
43 |
+
def go(todos):
|
44 |
+
for path in todos:
|
45 |
+
try:
|
46 |
+
name=os.path.basename(path).rsplit('.', maxsplit=1)[0]+'.pt'
|
47 |
+
if os.path.exists("%s/%s"%(opt_root,name)):continue
|
48 |
+
img = read_img(path)
|
49 |
+
aa=torch.nn.functional.interpolate(img, size=(h, w), mode='bicubic')
|
50 |
+
tensorr = vae.encode(aa.unsqueeze(0))[0].cpu() # torch.Size([16, 21, 90, 160])#21->13
|
51 |
+
save_path="%s/%s"%(opt_root,name)
|
52 |
+
torch.save(tensorr, save_path)
|
53 |
+
except:
|
54 |
+
print(path,traceback.format_exc())
|
55 |
+
|
56 |
+
todo=[]
|
57 |
+
for name in os.listdir(root_mp4s):
|
58 |
+
todo.append("%s/%s"%(root_mp4s,name))
|
59 |
+
todo=sorted(todo)
|
60 |
+
todo=todo[i_part::all]
|
61 |
+
go(todo)
|
62 |
+
|
63 |
+
|
64 |
+
|
pyproject.toml
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[build-system]
|
2 |
+
requires = ["setuptools"]
|
3 |
+
build-backend = "setuptools.build_meta"
|
4 |
+
|
5 |
+
[project]
|
6 |
+
name = "fastvideo"
|
7 |
+
version = "1.2.0"
|
8 |
+
description = "FastVideo"
|
9 |
+
readme = "README1.md"
|
10 |
+
requires-python = ">=3.8"
|
11 |
+
classifiers = [
|
12 |
+
"Programming Language :: Python :: 3",
|
13 |
+
"License :: OSI Approved :: Apache Software License",
|
14 |
+
]
|
15 |
+
dependencies = [
|
16 |
+
#"transformers==4.46.1", "accelerate==1.0.1", "tokenizers==0.20.1", "albumentations==1.4.20", "av==13.1.0",
|
17 |
+
#"decord==0.6.0", "einops==0.8.0", "fastapi==0.115.3", "gdown==5.2.0", "h5py==3.12.1", "idna==3.6", "imageio==2.36.0",
|
18 |
+
#"matplotlib==3.9.2", "numpy==1.26.3", "omegaconf==2.3.0", "opencv-python==4.10.0.84", "opencv-python-headless==4.10.0.84",
|
19 |
+
#"pandas==2.2.3", "pillow==10.2.0", "pydub==0.25.1", "pytorch-lightning==2.4.0", "pytorchvideo==0.1.5", "PyYAML==6.0.1",
|
20 |
+
#"regex==2024.9.11", "requests==2.31.0", "scikit-learn==1.5.2", "scipy==1.14.1", "six==1.16.0", "test-tube==0.7.5",
|
21 |
+
#"timm==1.0.11", "torchdiffeq==0.2.4", "torchmetrics==1.5.1", "tqdm==4.66.5", "urllib3==2.2.0", "uvicorn==0.32.0",
|
22 |
+
#"scikit-video==1.1.11", "imageio-ffmpeg==0.5.1", "sentencepiece==0.2.0", "beautifulsoup4==4.12.3", "ftfy==6.3.0",
|
23 |
+
#"moviepy==1.0.3", "wandb==0.18.5", "tensorboard==2.18.0", "pydantic==2.9.2", "gradio==5.3.0", "huggingface_hub==0.26.1", "protobuf==5.28.3",
|
24 |
+
#"watch", "gpustat", "peft==0.13.2", "liger_kernel==0.4.1", "einops==0.8.0", "wheel==0.44.0", "loguru", "diffusers==0.32.0", "bitsandbytes"]
|
25 |
+
#"watch", "gpustat", "bitsandbytes"
|
26 |
+
]
|
27 |
+
|
28 |
+
|
29 |
+
[tool.setuptools.packages.find]
|
30 |
+
exclude = ["assets*", "docker*", "docs", "scripts*"]
|
31 |
+
|
32 |
+
[tool.wheel]
|
33 |
+
exclude = ["assets*", "docker*", "docs", "scripts*"]
|
34 |
+
|
35 |
+
[tool.mypy]
|
36 |
+
warn_return_any = true
|
37 |
+
warn_unused_configs = true
|
38 |
+
ignore_missing_imports = true
|
39 |
+
disallow_untyped_calls = true
|
40 |
+
check_untyped_defs = true
|
41 |
+
no_implicit_optional = true
|
req-fastvideo.txt
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers>=4.46.1
|
2 |
+
accelerate>=1.0.1
|
3 |
+
tokenizers>=0.20.1
|
4 |
+
albumentations>=1.4.20
|
5 |
+
av>=13.1.0
|
6 |
+
decord>=0.6.0
|
7 |
+
einops>=0.8.0
|
8 |
+
fastapi>=0.115.3
|
9 |
+
gdown>=5.2.0
|
10 |
+
h5py>=3.12.1
|
11 |
+
idna>=3.6
|
12 |
+
imageio>=2.36.0
|
13 |
+
matplotlib>=3.9.2
|
14 |
+
numpy>=1.26.3
|
15 |
+
omegaconf>=2.3.0
|
16 |
+
opencv-python>=4.10.0.84
|
17 |
+
opencv-python-headless>=4.10.0.84
|
18 |
+
pandas>=2.2.3
|
19 |
+
pillow>=10.2.0
|
20 |
+
pydub>=0.25.1
|
21 |
+
pytorch-lightning>=2.4.0
|
22 |
+
pytorchvideo>=0.1.5
|
23 |
+
PyYAML>=6.0.1
|
24 |
+
regex>=2024.9.11
|
25 |
+
requests>=2.31.0
|
26 |
+
scikit-learn>=1.5.2
|
27 |
+
scipy>=1.14.1
|
28 |
+
six>=1.16.0
|
29 |
+
test-tube>=0.7.5
|
30 |
+
timm>=1.0.11
|
31 |
+
torchdiffeq>=0.2.4
|
32 |
+
torchmetrics>=1.5.1
|
33 |
+
tqdm>=4.66.5
|
34 |
+
urllib3>=2.2.0
|
35 |
+
uvicorn>=0.32.0
|
36 |
+
scikit-video>=1.1.11
|
37 |
+
imageio-ffmpeg>=0.5.1
|
38 |
+
sentencepiece>=0.2.0
|
39 |
+
beautifulsoup4>=4.12.3
|
40 |
+
ftfy>=6.3.0
|
41 |
+
moviepy>=1.0.3
|
42 |
+
wandb>=0.18.5
|
43 |
+
tensorboard>=2.18.0
|
44 |
+
pydantic>=2.9.2
|
45 |
+
gradio>=5.3.0
|
46 |
+
huggingface_hub>=0.26.1
|
47 |
+
protobuf>=5.28.3
|
48 |
+
watch
|
49 |
+
gpustat
|
50 |
+
peft>=0.13.2
|
51 |
+
liger_kernel>=0.4.1
|
52 |
+
einops>=0.8.0
|
53 |
+
wheel>=0.44.0
|
54 |
+
loguru
|
55 |
+
torch
|
56 |
+
torchvision
|
57 |
+
ninja
|
58 |
+
safetensors
|
59 |
+
packaging
|
requirements.txt
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch>=2.4.0
|
2 |
+
torchvision>=0.19.0
|
3 |
+
opencv-python>=4.9.0.80
|
4 |
+
diffusers>=0.31.0
|
5 |
+
transformers>=4.49.0
|
6 |
+
tokenizers>=0.20.3
|
7 |
+
accelerate>=1.1.1
|
8 |
+
tqdm
|
9 |
+
imageio
|
10 |
+
easydict
|
11 |
+
ftfy
|
12 |
+
dashscope
|
13 |
+
imageio-ffmpeg
|
14 |
+
gradio>=5.0.0
|
15 |
+
numpy>=1.23.5,<2
|
scripts/distill/distill_cog.sh
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export WANDB_BASE_URL="https://api.wandb.ai"
|
2 |
+
export WANDB_MODE=online
|
3 |
+
export HF_ENDPOINT="https://hf-mirror.com"
|
4 |
+
export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
|
5 |
+
#based on fm1500
|
6 |
+
torchrun --nnodes 1 --nproc_per_node 8 \
|
7 |
+
fastvideo/distill_i2v_cog.py \
|
8 |
+
--seed 42 \
|
9 |
+
--pretrained_model_name_or_path /DATA/bvac/personal/opensora/zhipu/pretrained/danzhen-i2v-97f-720P-16fps/danzhen-i2v-97f-720P-16fps \
|
10 |
+
--dit_model_name_or_path /DATA/bvac/personal/opensora/zhipu/pretrained/danzhen-i2v-97f-720P-16fps/danzhen-i2v-97f-720P-16fps \
|
11 |
+
--model_type "cog" \
|
12 |
+
--cache_dir .cache \
|
13 |
+
--data_json_path /DATA/bvac/personal/fastvideo/FastVideo-main/data/Image-Vid-Finetune-cog480P-49f12fps-fixvae/videos2caption.json \
|
14 |
+
--validation_prompt_dir /DATA/bvac/personal/fastvideo/FastVideo-main/data/Image-Vid-Finetune-cog480P-49f12fps-fixvae/validation \
|
15 |
+
--gradient_checkpointing \
|
16 |
+
--train_batch_size=1 \
|
17 |
+
--num_latent_t 24 \
|
18 |
+
--sp_size 1 \
|
19 |
+
--train_sp_batch_size 1 \
|
20 |
+
--dataloader_num_workers 4 \
|
21 |
+
--gradient_accumulation_steps=1 \
|
22 |
+
--max_train_steps=20000 \
|
23 |
+
--learning_rate=3e-6 \
|
24 |
+
--mixed_precision="bf16" \
|
25 |
+
--checkpointing_steps=400 \
|
26 |
+
--validation_steps 15000000 \
|
27 |
+
--validation_sampling_steps "2,4,8" \
|
28 |
+
--checkpoints_total_limit 3 \
|
29 |
+
--allow_tf32 \
|
30 |
+
--ema_start_step 0 \
|
31 |
+
--cfg 0.0 \
|
32 |
+
--log_validation \
|
33 |
+
--output_dir=outputs-test-cog_i2v_5B-wukong49-distill-3e-6-fixvae2 \
|
34 |
+
--tracker_project_name video3w480Ptest_cog49i2v_fix_distill-3e-6-fixvae2 \
|
35 |
+
--num_frames 49 \
|
36 |
+
--shift 17 \
|
37 |
+
--validation_guidance_scale "1.0" \
|
38 |
+
--num_euler_timesteps 50 \
|
39 |
+
--multi_phased_distill_schedule "4000-1" \
|
40 |
+
--not_apply_cfg_solver
|
scripts/distill/distill_cog720-49.sh
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export WANDB_BASE_URL="https://api.wandb.ai"
|
2 |
+
export WANDB_MODE=online
|
3 |
+
export HF_ENDPOINT="https://hf-mirror.com"
|
4 |
+
export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
|
5 |
+
#based on fm1500
|
6 |
+
torchrun --nnodes 3 --nproc_per_node 8 --node_rank=${NODE_RANK} --master_addr=10.156.32.11 --master_port=14431 \
|
7 |
+
fastvideo/distill_i2v_cog720-49.py \
|
8 |
+
--seed 42 \
|
9 |
+
--pretrained_model_name_or_path /DATA/bvac/personal/opensora/zhipu/pretrained/danzhen-i2v-97f-720P-16fps/danzhen-i2v-97f-720P-16fps-1600steps_bs24-fm6000 \
|
10 |
+
--dit_model_name_or_path /DATA/bvac/personal/opensora/zhipu/pretrained/danzhen-i2v-97f-720P-16fps/danzhen-i2v-97f-720P-16fps-1600steps_bs24-fm6000 \
|
11 |
+
--model_type "cog" \
|
12 |
+
--cache_dir /data/docker/data/video14w/.cache \
|
13 |
+
--data_json_path /data/docker/data/video14w/outputs/videos2caption.json \
|
14 |
+
--validation_prompt_dir /data/docker/data/video14w/outputs/validation \
|
15 |
+
--gradient_checkpointing \
|
16 |
+
--train_batch_size=1 \
|
17 |
+
--num_latent_t 24 \
|
18 |
+
--sp_size 1 \
|
19 |
+
--train_sp_batch_size 1 \
|
20 |
+
--dataloader_num_workers 4 \
|
21 |
+
--gradient_accumulation_steps=1 \
|
22 |
+
--max_train_steps=20000 \
|
23 |
+
--learning_rate=3e-6 \
|
24 |
+
--mixed_precision="bf16" \
|
25 |
+
--checkpointing_steps=250 \
|
26 |
+
--validation_steps 15000000 \
|
27 |
+
--validation_sampling_steps "2,4,8" \
|
28 |
+
--checkpoints_total_limit 3 \
|
29 |
+
--allow_tf32 \
|
30 |
+
--ema_start_step 0 \
|
31 |
+
--cfg 0.0 \
|
32 |
+
--log_validation \
|
33 |
+
--output_dir=outputs-cog_i2v_5B_down_vae_eryu49_3gpus_fix_text_rot_fvfm6k_distill \
|
34 |
+
--tracker_project_name cog_i2v_5B_down_vae_eryu49_3gpus_fix_text_rot_fvfm6k_distill \
|
35 |
+
--num_frames 49 \
|
36 |
+
--shift 17 \
|
37 |
+
--validation_guidance_scale "1.0" \
|
38 |
+
--num_euler_timesteps 50 \
|
39 |
+
--multi_phased_distill_schedule "4000-1" \
|
40 |
+
--not_apply_cfg_solver
|
scripts/distill/distill_cog720-49mix246adv.sh
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export WANDB_BASE_URL="https://api.wandb.ai"
|
2 |
+
export WANDB_MODE=online
|
3 |
+
export HF_ENDPOINT="https://hf-mirror.com"
|
4 |
+
export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
|
5 |
+
#based on fm1500
|
6 |
+
torchrun --nnodes 3 --nproc_per_node 8 --node_rank=${NODE_RANK} --master_addr=10.156.32.11 --master_port=14431 \
|
7 |
+
fastvideo/distill_adv-cog720-49.py \
|
8 |
+
--seed 42 \
|
9 |
+
--pretrained_model_name_or_path /DATA/bvac/personal/opensora/zhipu/pretrained/danzhen-i2v-97f-720P-16fps/danzhen-i2v-97f-720P-16fps-fm246mix \
|
10 |
+
--dit_model_name_or_path /DATA/bvac/personal/opensora/zhipu/pretrained/danzhen-i2v-97f-720P-16fps/danzhen-i2v-97f-720P-16fps-fm246mix \
|
11 |
+
--model_type "cog" \
|
12 |
+
--cache_dir /data/docker/data/video14w/.cache \
|
13 |
+
--data_json_path /data/docker/data/video14w/outputs/videos2caption.json \
|
14 |
+
--validation_prompt_dir /data/docker/data/video14w/outputs/validation \
|
15 |
+
--gradient_checkpointing \
|
16 |
+
--train_batch_size=1 \
|
17 |
+
--num_latent_t 24 \
|
18 |
+
--sp_size 1 \
|
19 |
+
--train_sp_batch_size 1 \
|
20 |
+
--dataloader_num_workers 4 \
|
21 |
+
--gradient_accumulation_steps=1 \
|
22 |
+
--max_train_steps=20000 \
|
23 |
+
--learning_rate=1e-6 \
|
24 |
+
--mixed_precision="bf16" \
|
25 |
+
--checkpointing_steps=64 \
|
26 |
+
--validation_steps 15000000 \
|
27 |
+
--validation_sampling_steps "2,4,8" \
|
28 |
+
--checkpoints_total_limit 3 \
|
29 |
+
--allow_tf32 \
|
30 |
+
--ema_start_step 0 \
|
31 |
+
--cfg 0.0 \
|
32 |
+
--log_validation \
|
33 |
+
--output_dir=outputs-cog_i2v_5B_down_vae_eryu49_3gpus_fix_text_rot_fvfm_mix246_distill1e6adv \
|
34 |
+
--tracker_project_name cog_i2v_5B_down_vae_eryu49_3gpus_fix_text_rot_fvfm_mix246_distill1e6adv \
|
35 |
+
--num_frames 49 \
|
36 |
+
--shift 17 \
|
37 |
+
--validation_guidance_scale "1.0" \
|
38 |
+
--num_euler_timesteps 50 \
|
39 |
+
--multi_phased_distill_schedule "4000-1" \
|
40 |
+
--not_apply_cfg_solver
|
scripts/distill/distill_cog720-49mix26.sh
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export WANDB_BASE_URL="https://api.wandb.ai"
|
2 |
+
export WANDB_MODE=online
|
3 |
+
export HF_ENDPOINT="https://hf-mirror.com"
|
4 |
+
export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
|
5 |
+
|
6 |
+
|
7 |
+
torchrun --nnodes 5 --nproc_per_node 8 --node_rank=${NODE_RANK} --master_addr=10.156.32.11 --master_port=14431 \
|
8 |
+
fastvideo/distill_i2v_cog720-4133-nonegrid.py \
|
9 |
+
--seed 42 \
|
10 |
+
--pretrained_model_name_or_path /DATA/bvac/personal/opensora/zhipu/pretrained/mix26 \
|
11 |
+
--dit_model_name_or_path /DATA/bvac/personal/opensora/zhipu/pretrained/mix26 \
|
12 |
+
--model_type "cog" \
|
13 |
+
--cache_dir /data/docker/data/video14w/.cache \
|
14 |
+
--data_json_path2 /DATA/bvac/personal/fastvideo/make-data/all33-41-49/videos2caption41.json \
|
15 |
+
--data_json_path3 /DATA/bvac/personal/fastvideo/make-data/all33-41-49/videos2caption33.json \
|
16 |
+
--validation_prompt_dir /data/docker/data/video14w/outputs/validation \
|
17 |
+
--gradient_checkpointing \
|
18 |
+
--train_batch_size=1 \
|
19 |
+
--num_latent_t 24 \
|
20 |
+
--sp_size 1 \
|
21 |
+
--train_sp_batch_size 1 \
|
22 |
+
--dataloader_num_workers 4 \
|
23 |
+
--gradient_accumulation_steps=1 \
|
24 |
+
--max_train_steps=20000 \
|
25 |
+
--learning_rate=1.5e-6 \
|
26 |
+
--mixed_precision="bf16" \
|
27 |
+
--checkpointing_steps=64 \
|
28 |
+
--validation_steps 15000000 \
|
29 |
+
--validation_sampling_steps "2,4,8" \
|
30 |
+
--checkpoints_total_limit 3 \
|
31 |
+
--allow_tf32 \
|
32 |
+
--ema_start_step 0 \
|
33 |
+
--cfg 0.0 \
|
34 |
+
--log_validation \
|
35 |
+
--output_dir=outputs-cog_i2sv_5B_down_vae_eryu4133_5gpus_fix_text_rot_fvfm_mix26_distill1d5e6 \
|
36 |
+
--tracker_project_name cog_i2v_5B_down_vae_eryu4133_5gpus_fix_text_rot_fvfm_mix26_distill1d5e6 \
|
37 |
+
--num_frames 41 \
|
38 |
+
--shift 17 \
|
39 |
+
--validation_guidance_scale "1.0" \
|
40 |
+
--num_euler_timesteps 50 \
|
41 |
+
--multi_phased_distill_schedule "4000-1" \
|
42 |
+
--not_apply_cfg_solver
|
scripts/distill/distill_cog720-49mix26b.sh
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export WANDB_BASE_URL="https://api.wandb.ai"
|
2 |
+
export WANDB_MODE=online
|
3 |
+
export HF_ENDPOINT="https://hf-mirror.com"
|
4 |
+
export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
|
5 |
+
|
6 |
+
|
7 |
+
torchrun --nnodes 4 --nproc_per_node 8 --node_rank=${NODE_RANK} --master_addr=10.156.32.11 --master_port=14431 \
|
8 |
+
fastvideo/distill_i2v_cog720-4133-nonegrid.py \
|
9 |
+
--seed 42 \
|
10 |
+
--pretrained_model_name_or_path /DATA/bvac/personal/opensora/zhipu/pretrained/cog_i2sv_5B_down_vae_eryu4133_5gpus_fix_text_rot_fvfm_mix26_distill1d5e6/mix5221 \
|
11 |
+
--dit_model_name_or_path /DATA/bvac/personal/opensora/zhipu/pretrained/cog_i2sv_5B_down_vae_eryu4133_5gpus_fix_text_rot_fvfm_mix26_distill1d5e6/mix5221 \
|
12 |
+
--model_type "cog" \
|
13 |
+
--cache_dir /data/docker/data/video14w/.cache \
|
14 |
+
--data_json_path2 /DATA/bvac/personal/fastvideo/make-data/all33-41-49/videos2caption41.json \
|
15 |
+
--data_json_path3 /DATA/bvac/personal/fastvideo/make-data/all33-41-49/videos2caption33.json \
|
16 |
+
--validation_prompt_dir /data/docker/data/video14w/outputs/validation \
|
17 |
+
--gradient_checkpointing \
|
18 |
+
--train_batch_size=1 \
|
19 |
+
--num_latent_t 24 \
|
20 |
+
--sp_size 1 \
|
21 |
+
--train_sp_batch_size 1 \
|
22 |
+
--dataloader_num_workers 4 \
|
23 |
+
--gradient_accumulation_steps=1 \
|
24 |
+
--max_train_steps=20000 \
|
25 |
+
--learning_rate=1e-6 \
|
26 |
+
--mixed_precision="bf16" \
|
27 |
+
--checkpointing_steps=64 \
|
28 |
+
--validation_steps 15000000 \
|
29 |
+
--validation_sampling_steps "2,4,8" \
|
30 |
+
--checkpoints_total_limit 3 \
|
31 |
+
--allow_tf32 \
|
32 |
+
--ema_start_step 0 \
|
33 |
+
--cfg 0.0 \
|
34 |
+
--log_validation \
|
35 |
+
--output_dir=outputs-cog_i2sv_5B_down_vae_eryu4133_4gpus_fix_text_rot_fvfm_mix26_distill1e6b \
|
36 |
+
--tracker_project_name cog_i2v_5B_down_vae_eryu4133_4gpus_fix_text_rot_fvfm_mix26_distill1e6b \
|
37 |
+
--num_frames 41 \
|
38 |
+
--shift 17 \
|
39 |
+
--validation_guidance_scale "1.0" \
|
40 |
+
--num_euler_timesteps 50 \
|
41 |
+
--multi_phased_distill_schedule "4000-1" \
|
42 |
+
--not_apply_cfg_solver
|
scripts/distill/distill_hunyuan.sh
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export WANDB_BASE_URL="https://api.wandb.ai"
|
2 |
+
export WANDB_MODE=online
|
3 |
+
|
4 |
+
DATA_DIR=./data
|
5 |
+
|
6 |
+
torchrun --nnodes 1 --nproc_per_node 8\
|
7 |
+
fastvideo/distill.py\
|
8 |
+
--seed 42\
|
9 |
+
--pretrained_model_name_or_path $DATA_DIR/hunyuan\
|
10 |
+
--dit_model_name_or_path $DATA_DIR/hunyuan/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt\
|
11 |
+
--model_type "hunyuan" \
|
12 |
+
--cache_dir "$DATA_DIR/.cache"\
|
13 |
+
--data_json_path "$DATA_DIR/Hunyuan-30K-Distill-Data/videos2caption.json"\
|
14 |
+
--validation_prompt_dir "$DATA_DIR/Hunyuan-Distill-Data/validation"\
|
15 |
+
--gradient_checkpointing\
|
16 |
+
--train_batch_size=1\
|
17 |
+
--num_latent_t 24\
|
18 |
+
--sp_size 1\
|
19 |
+
--train_sp_batch_size 1\
|
20 |
+
--dataloader_num_workers 4\
|
21 |
+
--gradient_accumulation_steps=1\
|
22 |
+
--max_train_steps=2000\
|
23 |
+
--learning_rate=1e-6\
|
24 |
+
--mixed_precision="bf16"\
|
25 |
+
--checkpointing_steps=64\
|
26 |
+
--validation_steps 64\
|
27 |
+
--validation_sampling_steps "2,4,8" \
|
28 |
+
--checkpoints_total_limit 3\
|
29 |
+
--allow_tf32\
|
30 |
+
--ema_start_step 0\
|
31 |
+
--cfg 0.0\
|
32 |
+
--log_validation\
|
33 |
+
--output_dir="$DATA_DIR/outputs/hy_phase1_shift17_bs_32"\
|
34 |
+
--tracker_project_name Hunyuan_Distill \
|
35 |
+
--num_frames 93 \
|
36 |
+
--shift 17 \
|
37 |
+
--validation_guidance_scale "1.0" \
|
38 |
+
--num_euler_timesteps 50 \
|
39 |
+
--multi_phased_distill_schedule "4000-1" \
|
40 |
+
--not_apply_cfg_solver
|
scripts/distill/distill_mochi.sh
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export WANDB_BASE_URL="https://api.wandb.ai"
|
2 |
+
export WANDB_MODE=online
|
3 |
+
|
4 |
+
torchrun --nnodes 1 --nproc_per_node 4 \
|
5 |
+
fastvideo/distill.py \
|
6 |
+
--seed 42 \
|
7 |
+
--pretrained_model_name_or_path data/mochi \
|
8 |
+
--model_type "mochi" \
|
9 |
+
--cache_dir data/.cache \
|
10 |
+
--data_json_path data/Merge-30k-Data/video2caption.json \
|
11 |
+
--validation_prompt_dir data/Image-Vid-Finetune-Mochi/validation \
|
12 |
+
--gradient_checkpointing \
|
13 |
+
--train_batch_size=1 \
|
14 |
+
--num_latent_t 28 \
|
15 |
+
--sp_size 4 \
|
16 |
+
--train_sp_batch_size 2 \
|
17 |
+
--dataloader_num_workers 4 \
|
18 |
+
--gradient_accumulation_steps=1 \
|
19 |
+
--max_train_steps=4000 \
|
20 |
+
--learning_rate=1e-6 \
|
21 |
+
--mixed_precision=bf16 \
|
22 |
+
--checkpointing_steps=64 \
|
23 |
+
--validation_steps 1 \
|
24 |
+
--validation_sampling_steps 8 \
|
25 |
+
--checkpoints_total_limit 3 \
|
26 |
+
--allow_tf32 \
|
27 |
+
--ema_start_step 0 \
|
28 |
+
--cfg 0.0 \
|
29 |
+
--log_validation \
|
30 |
+
--output_dir="data/outputs/lq_euler_50_thres0.1_lrg_0.75_phase1_lr1e-6_repro" \
|
31 |
+
--tracker_project_name PCM \
|
32 |
+
--num_frames 163 \
|
33 |
+
--scheduler_type pcm_linear_quadratic \
|
34 |
+
--validation_guidance_scale 0.5,1.5,2.5 \
|
35 |
+
--num_euler_timesteps 50 \
|
36 |
+
--linear_quadratic_threshold 0.1 \
|
37 |
+
--linear_range 0.75 \
|
38 |
+
--multi_phased_distill_schedule 4000-1
|
scripts/finetune/finetune_wan.sh
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export WANDB_BASE_URL="https://api.wandb.ai"
|
2 |
+
#export WANDB_MODE=online
|
3 |
+
export WANDB_MODE=offline
|
4 |
+
export WANDB_API_KEY=xxx
|
5 |
+
export HF_ENDPOINT="https://hf-mirror.com"
|
6 |
+
export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True #
|
7 |
+
export TORCH_NCCL_TRACE_BUFFER_SIZE=1048576 # 1MB
|
8 |
+
export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
9 |
+
export CUBLAS_WORKSPACE_CONFIG=:4096:8
|
10 |
+
|
11 |
+
torchrun --nnodes 1 --nproc_per_node 8 --node_rank=0 --master_addr=127.0.0.1 --master_port=24431 \
|
12 |
+
fastvideo/train.py \
|
13 |
+
--seed 142 \
|
14 |
+
--pretrained_model_name_or_path /DATA/bvac/personal/wan21/Wan2.1-I2V-14B-480P \
|
15 |
+
--model_type "wan" \
|
16 |
+
--data_json_path test_data/data.json \
|
17 |
+
--gradient_checkpointing \
|
18 |
+
--train_batch_size=1 \
|
19 |
+
--num_latent_t 240 \
|
20 |
+
--sp_size 8 \
|
21 |
+
--train_sp_batch_size 1 \
|
22 |
+
--dataloader_num_workers 4 \
|
23 |
+
--gradient_accumulation_steps=1 \
|
24 |
+
--max_train_steps=20000 \
|
25 |
+
--learning_rate=1e-5 \
|
26 |
+
--mixed_precision=bf16 \
|
27 |
+
--checkpointing_steps=400 \
|
28 |
+
--validation_steps 20000 \
|
29 |
+
--validation_sampling_steps 64 \
|
30 |
+
--checkpoints_total_limit 3 \
|
31 |
+
--allow_tf32 \
|
32 |
+
--ema_start_step 0 \
|
33 |
+
--cfg 0.0 \
|
34 |
+
--ema_decay 0.999 \
|
35 |
+
--log_validation \
|
36 |
+
--output_dir=outputs-sp8 \
|
37 |
+
--tracker_project_name sp8 \
|
38 |
+
--validation_guidance_scale "1.0" \
|
39 |
+
--group_frame
|
scripts/huggingface/download_hf.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import snapshot_download, hf_hub_download
|
2 |
+
import argparse
|
3 |
+
|
4 |
+
# set args for repo_id, local_dir, repo_type,
|
5 |
+
|
6 |
+
if __name__ == "__main__":
|
7 |
+
parser = argparse.ArgumentParser(
|
8 |
+
description="Download a dataset or model from the Hugging Face Hub"
|
9 |
+
)
|
10 |
+
parser.add_argument(
|
11 |
+
"--repo_id", type=str, help="The ID of the repository to download"
|
12 |
+
)
|
13 |
+
parser.add_argument(
|
14 |
+
"--local_dir",
|
15 |
+
type=str,
|
16 |
+
help="The local directory to download the repository to",
|
17 |
+
)
|
18 |
+
parser.add_argument(
|
19 |
+
"--repo_type",
|
20 |
+
type=str,
|
21 |
+
help="The type of repository to download (dataset or model)",
|
22 |
+
)
|
23 |
+
parser.add_argument("--file_name", type=str, help="The file name to download")
|
24 |
+
args = parser.parse_args()
|
25 |
+
if args.file_name:
|
26 |
+
hf_hub_download(
|
27 |
+
repo_id=args.repo_id,
|
28 |
+
filename=args.file_name,
|
29 |
+
repo_type=args.repo_type,
|
30 |
+
local_dir=args.local_dir,
|
31 |
+
)
|
32 |
+
else:
|
33 |
+
snapshot_download(
|
34 |
+
repo_id=args.repo_id,
|
35 |
+
local_dir=args.local_dir,
|
36 |
+
repo_type=args.repo_type,
|
37 |
+
local_dir_use_symlinks=False,
|
38 |
+
resume_download=True,
|
39 |
+
)
|
scripts/huggingface/upload_hf.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import HfApi
|
2 |
+
|
3 |
+
api = HfApi()
|
4 |
+
|
5 |
+
api.upload_folder(
|
6 |
+
folder_path="data/Black-Myth-Taylor-Src",
|
7 |
+
repo_id="FastVideo/Image-Vid-Finetune-Src",
|
8 |
+
repo_type="dataset",
|
9 |
+
)
|
scripts/inference/inference_diffusers_hunyuan.sh
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
num_gpus=1
|
4 |
+
export MODEL_BASE="data/FastHunyuan-diffusers"
|
5 |
+
torchrun --nnodes=1 --nproc_per_node=$num_gpus --master_port 12345 \
|
6 |
+
fastvideo/sample/sample_t2v_diffusers_hunyuan.py \
|
7 |
+
--height 720 \
|
8 |
+
--width 1280 \
|
9 |
+
--num_frames 45 \
|
10 |
+
--num_inference_steps 6 \
|
11 |
+
--guidance_scale 1 \
|
12 |
+
--embedded_cfg_scale 6 \
|
13 |
+
--flow_shift 17 \
|
14 |
+
--flow-reverse \
|
15 |
+
--prompt ./assets/prompt.txt \
|
16 |
+
--seed 1024 \
|
17 |
+
--output_path outputs_video/hunyuan_quant/nf4/ \
|
18 |
+
--model_path $MODEL_BASE \
|
19 |
+
--quantization "nf4" \
|
20 |
+
--cpu_offload
|
scripts/inference/inference_hunyuan.sh
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
num_gpus=4
|
4 |
+
export MODEL_BASE=data/FastHunyuan
|
5 |
+
torchrun --nnodes=1 --nproc_per_node=$num_gpus --master_port 29503 \
|
6 |
+
fastvideo/sample/sample_t2v_hunyuan.py \
|
7 |
+
--height 720 \
|
8 |
+
--width 1280 \
|
9 |
+
--num_frames 125 \
|
10 |
+
--num_inference_steps 6 \
|
11 |
+
--guidance_scale 1 \
|
12 |
+
--embedded_cfg_scale 6 \
|
13 |
+
--flow_shift 17 \
|
14 |
+
--flow-reverse \
|
15 |
+
--prompt ./assets/prompt.txt \
|
16 |
+
--seed 1024 \
|
17 |
+
--output_path outputs_video/hunyuan/cfg6/ \
|
18 |
+
--model_path $MODEL_BASE \
|
19 |
+
--dit-weight ${MODEL_BASE}/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt
|
scripts/inference/inference_mochi_sp.sh
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
num_gpus=4
|
4 |
+
|
5 |
+
torchrun --nnodes=1 --nproc_per_node=$num_gpus --master_port 29503 \
|
6 |
+
fastvideo/sample/sample_t2v_mochi.py \
|
7 |
+
--model_path data/FastMochi-diffusers \
|
8 |
+
--prompt_path "assets/prompt.txt" \
|
9 |
+
--num_frames 163 \
|
10 |
+
--height 480 \
|
11 |
+
--width 848 \
|
12 |
+
--num_inference_steps 8 \
|
13 |
+
--guidance_scale 1.5 \
|
14 |
+
--output_path outputs_video/mochi_sp/ \
|
15 |
+
--seed 1024 \
|
16 |
+
--scheduler_type "pcm_linear_quadratic" \
|
17 |
+
--linear_threshold 0.1 \
|
18 |
+
--linear_range 0.75
|
19 |
+
|
scripts/preprocess/preprocess_cog_data.sh
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# export WANDB_MODE="offline"#
|
2 |
+
######test-t2v-5B-wukong
|
3 |
+
export HF_ENDPOINT="https://hf-mirror.com"
|
4 |
+
GPU_NUM=4 # 2,4,8
|
5 |
+
MODEL_PATH="/DATA/bvac/personal/opensora/zhipu/pretrained/hf2b/cache/models--THUDM--CogVideoX-5b/snapshots/8d6ea3f817438460b25595a120f109b88d5fdfad"
|
6 |
+
MODEL_TYPE="cog"
|
7 |
+
DATA_MERGE_PATH="/DATA/bvac/personal/fastvideo/FastVideo-main/data/Image-Vid-Finetune-Src/merge.txt"
|
8 |
+
OUTPUT_DIR="/DATA/bvac/personal/fastvideo/FastVideo-main/data/Image-Vid-Finetune-cog480P-49f8fps"
|
9 |
+
VALIDATION_PATH="/DATA/bvac/personal/fastvideo/FastVideo-main/assets/prompt1.txt"
|
10 |
+
|
11 |
+
torchrun --nproc_per_node=$GPU_NUM \
|
12 |
+
fastvideo/data_preprocess/preprocess_vae_latents.py \
|
13 |
+
--model_path $MODEL_PATH \
|
14 |
+
--data_merge_path $DATA_MERGE_PATH \
|
15 |
+
--train_batch_size=1 \
|
16 |
+
--max_height=480 \
|
17 |
+
--max_width=720 \
|
18 |
+
--num_frames=49 \
|
19 |
+
--dataloader_num_workers 1 \
|
20 |
+
--output_dir=$OUTPUT_DIR \
|
21 |
+
--model_type $MODEL_TYPE \
|
22 |
+
--train_fps 12
|
23 |
+
|
24 |
+
torchrun --nproc_per_node=$GPU_NUM \
|
25 |
+
fastvideo/data_preprocess/preprocess_text_embeddings.py \
|
26 |
+
--model_type $MODEL_TYPE \
|
27 |
+
--model_path $MODEL_PATH \
|
28 |
+
--output_dir=$OUTPUT_DIR
|
29 |
+
|
30 |
+
torchrun --nproc_per_node=$GPU_NUM \
|
31 |
+
fastvideo/data_preprocess/preprocess_validation_text_embeddings.py \
|
32 |
+
--model_type $MODEL_TYPE \
|
33 |
+
--model_path $MODEL_PATH \
|
34 |
+
--output_dir=$OUTPUT_DIR \
|
35 |
+
--validation_prompt_txt $VALIDATION_PATH
|
scripts/preprocess/preprocess_hunyuan_data.sh
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# export WANDB_MODE="offline"
|
2 |
+
GPU_NUM=1 # 2,4,8
|
3 |
+
MODEL_PATH="data/hunyuan"
|
4 |
+
MODEL_TYPE="hunyuan"
|
5 |
+
DATA_MERGE_PATH="data/Image-Vid-Finetune-Src/merge.txt"
|
6 |
+
OUTPUT_DIR="data/Image-Vid-Finetune-HunYuan"
|
7 |
+
VALIDATION_PATH="assets/prompt.txt"
|
8 |
+
|
9 |
+
torchrun --nproc_per_node=$GPU_NUM \
|
10 |
+
fastvideo/data_preprocess/preprocess_vae_latents.py \
|
11 |
+
--model_path $MODEL_PATH \
|
12 |
+
--data_merge_path $DATA_MERGE_PATH \
|
13 |
+
--train_batch_size=1 \
|
14 |
+
--max_height=480 \
|
15 |
+
--max_width=848 \
|
16 |
+
--num_frames=93 \
|
17 |
+
--dataloader_num_workers 1 \
|
18 |
+
--output_dir=$OUTPUT_DIR \
|
19 |
+
--model_type $MODEL_TYPE \
|
20 |
+
--train_fps 24
|
21 |
+
|
22 |
+
torchrun --nproc_per_node=$GPU_NUM \
|
23 |
+
fastvideo/data_preprocess/preprocess_text_embeddings.py \
|
24 |
+
--model_type $MODEL_TYPE \
|
25 |
+
--model_path $MODEL_PATH \
|
26 |
+
--output_dir=$OUTPUT_DIR
|
27 |
+
|
28 |
+
torchrun --nproc_per_node=1 \
|
29 |
+
fastvideo/data_preprocess/preprocess_validation_text_embeddings.py \
|
30 |
+
--model_type $MODEL_TYPE \
|
31 |
+
--model_path $MODEL_PATH \
|
32 |
+
--output_dir=$OUTPUT_DIR \
|
33 |
+
--validation_prompt_txt $VALIDATION_PATH
|
scripts/preprocess/preprocess_mochi_data.sh
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# export WANDB_MODE="offline"
|
2 |
+
GPU_NUM=1 # 2,4,8
|
3 |
+
MODEL_PATH="data/FastMochi-diffusers"
|
4 |
+
MODEL_TYPE="mochi"
|
5 |
+
DATA_MERGE_PATH="data/Image-Vid-Finetune-Src/merge.txt"
|
6 |
+
OUTPUT_DIR="data/Image-Vid-Finetune-Mochi"
|
7 |
+
VALIDATION_PATH="assets/prompt.txt"
|
8 |
+
|
9 |
+
torchrun --nproc_per_node=$GPU_NUM \
|
10 |
+
fastvideo/data_preprocess/preprocess_vae_latents.py \
|
11 |
+
--model_path $MODEL_PATH \
|
12 |
+
--data_merge_path $DATA_MERGE_PATH \
|
13 |
+
--train_batch_size=1 \
|
14 |
+
--max_height=480 \
|
15 |
+
--max_width=848 \
|
16 |
+
--num_frames=93 \
|
17 |
+
--dataloader_num_workers 1 \
|
18 |
+
--output_dir=$OUTPUT_DIR \
|
19 |
+
--model_type $MODEL_TYPE \
|
20 |
+
--train_fps 24
|
21 |
+
|
22 |
+
torchrun --nproc_per_node=$GPU_NUM \
|
23 |
+
fastvideo/data_preprocess/preprocess_text_embeddings.py \
|
24 |
+
--model_type $MODEL_TYPE \
|
25 |
+
--model_path $MODEL_PATH \
|
26 |
+
--output_dir=$OUTPUT_DIR
|
27 |
+
|
28 |
+
torchrun --nproc_per_node=1 \
|
29 |
+
fastvideo/data_preprocess/preprocess_validation_text_embeddings.py \
|
30 |
+
--model_type $MODEL_TYPE \
|
31 |
+
--model_path $MODEL_PATH \
|
32 |
+
--output_dir=$OUTPUT_DIR \
|
33 |
+
--validation_prompt_txt $VALIDATION_PATH
|
wan/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from . import configs, distributed, modules
|
2 |
+
from .image2video import WanI2V
|
3 |
+
from .text2video import WanT2V
|
wan/configs/__init__.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
2 |
+
import copy
|
3 |
+
import os
|
4 |
+
|
5 |
+
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
|
6 |
+
|
7 |
+
from .wan_i2v_14B import i2v_14B
|
8 |
+
from .wan_t2v_1_3B import t2v_1_3B
|
9 |
+
from .wan_t2v_14B import t2v_14B
|
10 |
+
|
11 |
+
# the config of t2i_14B is the same as t2v_14B
|
12 |
+
t2i_14B = copy.deepcopy(t2v_14B)
|
13 |
+
t2i_14B.__name__ = 'Config: Wan T2I 14B'
|
14 |
+
|
15 |
+
WAN_CONFIGS = {
|
16 |
+
't2v-14B': t2v_14B,
|
17 |
+
't2v-1.3B': t2v_1_3B,
|
18 |
+
'i2v-14B': i2v_14B,
|
19 |
+
't2i-14B': t2i_14B,
|
20 |
+
}
|
21 |
+
|
22 |
+
SIZE_CONFIGS = {
|
23 |
+
'720*1280': (720, 1280),
|
24 |
+
'1280*720': (1280, 720),
|
25 |
+
'480*832': (480, 832),
|
26 |
+
'832*480': (832, 480),
|
27 |
+
'1024*1024': (1024, 1024),
|
28 |
+
'960*544': (960, 544),
|
29 |
+
}
|
30 |
+
|
31 |
+
MAX_AREA_CONFIGS = {
|
32 |
+
'720*1280': 720 * 1280,
|
33 |
+
'1280*720': 1280 * 720,
|
34 |
+
'480*832': 480 * 832,
|
35 |
+
'832*480': 832 * 480,
|
36 |
+
'960*544': 960 * 544,
|
37 |
+
}
|
38 |
+
|
39 |
+
SUPPORTED_SIZES = {
|
40 |
+
't2v-14B': ('720*1280', '1280*720', '480*832', '832*480'),
|
41 |
+
't2v-1.3B': ('480*832', '832*480'),
|
42 |
+
'i2v-14B': ('720*1280', '1280*720', '480*832', '832*480', '960*544'),
|
43 |
+
't2i-14B': tuple(SIZE_CONFIGS.keys()),
|
44 |
+
}
|
wan/configs/shared_config.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
2 |
+
import torch
|
3 |
+
from easydict import EasyDict
|
4 |
+
|
5 |
+
#------------------------ Wan shared config ------------------------#
|
6 |
+
wan_shared_cfg = EasyDict()
|
7 |
+
|
8 |
+
# t5
|
9 |
+
wan_shared_cfg.t5_model = 'umt5_xxl'
|
10 |
+
wan_shared_cfg.t5_dtype = torch.bfloat16
|
11 |
+
wan_shared_cfg.text_len = 512
|
12 |
+
|
13 |
+
# transformer
|
14 |
+
wan_shared_cfg.param_dtype = torch.bfloat16
|
15 |
+
|
16 |
+
# inference
|
17 |
+
wan_shared_cfg.num_train_timesteps = 1000
|
18 |
+
wan_shared_cfg.sample_fps = 16
|
19 |
+
wan_shared_cfg.sample_neg_prompt = '色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走'
|
wan/configs/wan_i2v_14B.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
2 |
+
import torch
|
3 |
+
from easydict import EasyDict
|
4 |
+
|
5 |
+
from .shared_config import wan_shared_cfg
|
6 |
+
|
7 |
+
#------------------------ Wan I2V 14B ------------------------#
|
8 |
+
|
9 |
+
i2v_14B = EasyDict(__name__='Config: Wan I2V 14B')
|
10 |
+
i2v_14B.update(wan_shared_cfg)
|
11 |
+
|
12 |
+
i2v_14B.t5_checkpoint = 'models_t5_umt5-xxl-enc-bf16.pth'
|
13 |
+
i2v_14B.t5_tokenizer = 'google/umt5-xxl'
|
14 |
+
|
15 |
+
# clip
|
16 |
+
i2v_14B.clip_model = 'clip_xlm_roberta_vit_h_14'
|
17 |
+
i2v_14B.clip_dtype = torch.float16
|
18 |
+
i2v_14B.clip_checkpoint = 'models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth'
|
19 |
+
i2v_14B.clip_tokenizer = 'xlm-roberta-large'
|
20 |
+
|
21 |
+
# vae
|
22 |
+
i2v_14B.vae_checkpoint = 'Wan2.1_VAE.pth'
|
23 |
+
i2v_14B.vae_stride = (4, 8, 8)
|
24 |
+
|
25 |
+
# transformer
|
26 |
+
i2v_14B.patch_size = (1, 2, 2)
|
27 |
+
i2v_14B.dim = 5120
|
28 |
+
i2v_14B.ffn_dim = 13824
|
29 |
+
i2v_14B.freq_dim = 256
|
30 |
+
i2v_14B.num_heads = 40
|
31 |
+
i2v_14B.num_layers = 40
|
32 |
+
i2v_14B.window_size = (-1, -1)
|
33 |
+
i2v_14B.qk_norm = True
|
34 |
+
i2v_14B.cross_attn_norm = True
|
35 |
+
i2v_14B.eps = 1e-6
|
wan/configs/wan_t2v_14B.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
2 |
+
from easydict import EasyDict
|
3 |
+
|
4 |
+
from .shared_config import wan_shared_cfg
|
5 |
+
|
6 |
+
#------------------------ Wan T2V 14B ------------------------#
|
7 |
+
|
8 |
+
t2v_14B = EasyDict(__name__='Config: Wan T2V 14B')
|
9 |
+
t2v_14B.update(wan_shared_cfg)
|
10 |
+
|
11 |
+
# t5
|
12 |
+
t2v_14B.t5_checkpoint = 'models_t5_umt5-xxl-enc-bf16.pth'
|
13 |
+
t2v_14B.t5_tokenizer = 'google/umt5-xxl'
|
14 |
+
|
15 |
+
# vae
|
16 |
+
t2v_14B.vae_checkpoint = 'Wan2.1_VAE.pth'
|
17 |
+
t2v_14B.vae_stride = (4, 8, 8)
|
18 |
+
|
19 |
+
# transformer
|
20 |
+
t2v_14B.patch_size = (1, 2, 2)
|
21 |
+
t2v_14B.dim = 5120
|
22 |
+
t2v_14B.ffn_dim = 13824
|
23 |
+
t2v_14B.freq_dim = 256
|
24 |
+
t2v_14B.num_heads = 40
|
25 |
+
t2v_14B.num_layers = 40
|
26 |
+
t2v_14B.window_size = (-1, -1)
|
27 |
+
t2v_14B.qk_norm = True
|
28 |
+
t2v_14B.cross_attn_norm = True
|
29 |
+
t2v_14B.eps = 1e-6
|
wan/configs/wan_t2v_1_3B.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
2 |
+
from easydict import EasyDict
|
3 |
+
|
4 |
+
from .shared_config import wan_shared_cfg
|
5 |
+
|
6 |
+
#------------------------ Wan T2V 1.3B ------------------------#
|
7 |
+
|
8 |
+
t2v_1_3B = EasyDict(__name__='Config: Wan T2V 1.3B')
|
9 |
+
t2v_1_3B.update(wan_shared_cfg)
|
10 |
+
|
11 |
+
# t5
|
12 |
+
t2v_1_3B.t5_checkpoint = 'models_t5_umt5-xxl-enc-bf16.pth'
|
13 |
+
t2v_1_3B.t5_tokenizer = 'google/umt5-xxl'
|
14 |
+
|
15 |
+
# vae
|
16 |
+
t2v_1_3B.vae_checkpoint = 'Wan2.1_VAE.pth'
|
17 |
+
t2v_1_3B.vae_stride = (4, 8, 8)
|
18 |
+
|
19 |
+
# transformer
|
20 |
+
t2v_1_3B.patch_size = (1, 2, 2)
|
21 |
+
t2v_1_3B.dim = 1536
|
22 |
+
t2v_1_3B.ffn_dim = 8960
|
23 |
+
t2v_1_3B.freq_dim = 256
|
24 |
+
t2v_1_3B.num_heads = 12
|
25 |
+
t2v_1_3B.num_layers = 30
|
26 |
+
t2v_1_3B.window_size = (-1, -1)
|
27 |
+
t2v_1_3B.qk_norm = True
|
28 |
+
t2v_1_3B.cross_attn_norm = True
|
29 |
+
t2v_1_3B.eps = 1e-6
|
wan/distributed/__init__.py
ADDED
File without changes
|
wan/distributed/fsdp.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
2 |
+
from functools import partial
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
6 |
+
from torch.distributed.fsdp import MixedPrecision, ShardingStrategy
|
7 |
+
from torch.distributed.fsdp.wrap import lambda_auto_wrap_policy
|
8 |
+
|
9 |
+
|
10 |
+
def shard_model(
|
11 |
+
model,
|
12 |
+
device_id,
|
13 |
+
param_dtype=torch.bfloat16,
|
14 |
+
reduce_dtype=torch.float32,
|
15 |
+
buffer_dtype=torch.float32,
|
16 |
+
process_group=None,
|
17 |
+
sharding_strategy=ShardingStrategy.FULL_SHARD,
|
18 |
+
sync_module_states=True,
|
19 |
+
):
|
20 |
+
model = FSDP(
|
21 |
+
module=model,
|
22 |
+
process_group=process_group,
|
23 |
+
sharding_strategy=sharding_strategy,
|
24 |
+
auto_wrap_policy=partial(
|
25 |
+
lambda_auto_wrap_policy, lambda_fn=lambda m: m in model.blocks),
|
26 |
+
mixed_precision=MixedPrecision(
|
27 |
+
param_dtype=param_dtype,
|
28 |
+
reduce_dtype=reduce_dtype,
|
29 |
+
buffer_dtype=buffer_dtype),
|
30 |
+
device_id=device_id,
|
31 |
+
sync_module_states=sync_module_states)
|
32 |
+
return model
|
wan/distributed/xdit_context_parallel.py
ADDED
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
2 |
+
import torch
|
3 |
+
import torch.cuda.amp as amp
|
4 |
+
from xfuser.core.distributed import (get_sequence_parallel_rank,
|
5 |
+
get_sequence_parallel_world_size,
|
6 |
+
get_sp_group)
|
7 |
+
from xfuser.core.long_ctx_attention import xFuserLongContextAttention
|
8 |
+
|
9 |
+
from ..modules.model import sinusoidal_embedding_1d
|
10 |
+
|
11 |
+
|
12 |
+
def pad_freqs(original_tensor, target_len):
|
13 |
+
seq_len, s1, s2 = original_tensor.shape
|
14 |
+
pad_size = target_len - seq_len
|
15 |
+
padding_tensor = torch.ones(
|
16 |
+
pad_size,
|
17 |
+
s1,
|
18 |
+
s2,
|
19 |
+
dtype=original_tensor.dtype,
|
20 |
+
device=original_tensor.device)
|
21 |
+
padded_tensor = torch.cat([original_tensor, padding_tensor], dim=0)
|
22 |
+
return padded_tensor
|
23 |
+
|
24 |
+
|
25 |
+
@amp.autocast(enabled=False)
|
26 |
+
def rope_apply(x, grid_sizes, freqs):
|
27 |
+
"""
|
28 |
+
x: [B, L, N, C].
|
29 |
+
grid_sizes: [B, 3].
|
30 |
+
freqs: [M, C // 2].
|
31 |
+
"""
|
32 |
+
s, n, c = x.size(1), x.size(2), x.size(3) // 2
|
33 |
+
# split freqs
|
34 |
+
freqs = freqs.split([c - 2 * (c // 3), c // 3, c // 3], dim=1)
|
35 |
+
|
36 |
+
# loop over samples
|
37 |
+
output = []
|
38 |
+
for i, (f, h, w) in enumerate(grid_sizes.tolist()):
|
39 |
+
seq_len = f * h * w
|
40 |
+
|
41 |
+
# precompute multipliers
|
42 |
+
x_i = torch.view_as_complex(x[i, :s].to(torch.float64).reshape(
|
43 |
+
s, n, -1, 2))
|
44 |
+
freqs_i = torch.cat([
|
45 |
+
freqs[0][:f].view(f, 1, 1, -1).expand(f, h, w, -1),
|
46 |
+
freqs[1][:h].view(1, h, 1, -1).expand(f, h, w, -1),
|
47 |
+
freqs[2][:w].view(1, 1, w, -1).expand(f, h, w, -1)
|
48 |
+
],
|
49 |
+
dim=-1).reshape(seq_len, 1, -1)
|
50 |
+
|
51 |
+
# apply rotary embedding
|
52 |
+
sp_size = get_sequence_parallel_world_size()
|
53 |
+
sp_rank = get_sequence_parallel_rank()
|
54 |
+
freqs_i = pad_freqs(freqs_i, s * sp_size)
|
55 |
+
s_per_rank = s
|
56 |
+
freqs_i_rank = freqs_i[(sp_rank * s_per_rank):((sp_rank + 1) *
|
57 |
+
s_per_rank), :, :]
|
58 |
+
x_i = torch.view_as_real(x_i * freqs_i_rank).flatten(2)
|
59 |
+
x_i = torch.cat([x_i, x[i, s:]])
|
60 |
+
|
61 |
+
# append to collection
|
62 |
+
output.append(x_i)
|
63 |
+
return torch.stack(output).float()
|
64 |
+
|
65 |
+
|
66 |
+
def usp_dit_forward(
|
67 |
+
self,
|
68 |
+
x,
|
69 |
+
t,
|
70 |
+
context,
|
71 |
+
seq_len,
|
72 |
+
clip_fea=None,
|
73 |
+
y=None,
|
74 |
+
):
|
75 |
+
"""
|
76 |
+
x: A list of videos each with shape [C, T, H, W].
|
77 |
+
t: [B].
|
78 |
+
context: A list of text embeddings each with shape [L, C].
|
79 |
+
"""
|
80 |
+
if self.model_type == 'i2v':
|
81 |
+
assert clip_fea is not None and y is not None
|
82 |
+
# params
|
83 |
+
device = self.patch_embedding.weight.device
|
84 |
+
if self.freqs.device != device:
|
85 |
+
self.freqs = self.freqs.to(device)
|
86 |
+
|
87 |
+
if y is not None:
|
88 |
+
x = [torch.cat([u, v], dim=0) for u, v in zip(x, y)]
|
89 |
+
|
90 |
+
# embeddings
|
91 |
+
x = [self.patch_embedding(u.unsqueeze(0)) for u in x]
|
92 |
+
grid_sizes = torch.stack(
|
93 |
+
[torch.tensor(u.shape[2:], dtype=torch.long) for u in x])
|
94 |
+
x = [u.flatten(2).transpose(1, 2) for u in x]
|
95 |
+
seq_lens = torch.tensor([u.size(1) for u in x], dtype=torch.long)
|
96 |
+
assert seq_lens.max() <= seq_len
|
97 |
+
x = torch.cat([
|
98 |
+
torch.cat([u, u.new_zeros(1, seq_len - u.size(1), u.size(2))], dim=1)
|
99 |
+
for u in x
|
100 |
+
])
|
101 |
+
|
102 |
+
# time embeddings
|
103 |
+
with amp.autocast(dtype=torch.float32):
|
104 |
+
e = self.time_embedding(
|
105 |
+
sinusoidal_embedding_1d(self.freq_dim, t).float())
|
106 |
+
e0 = self.time_projection(e).unflatten(1, (6, self.dim))
|
107 |
+
assert e.dtype == torch.float32 and e0.dtype == torch.float32
|
108 |
+
|
109 |
+
# context
|
110 |
+
context_lens = None
|
111 |
+
context = self.text_embedding(
|
112 |
+
torch.stack([
|
113 |
+
torch.cat([u, u.new_zeros(self.text_len - u.size(0), u.size(1))])
|
114 |
+
for u in context
|
115 |
+
]))
|
116 |
+
|
117 |
+
if clip_fea is not None:
|
118 |
+
context_clip = self.img_emb(clip_fea) # bs x 257 x dim
|
119 |
+
context = torch.concat([context_clip, context], dim=1)
|
120 |
+
|
121 |
+
# arguments
|
122 |
+
kwargs = dict(
|
123 |
+
e=e0,
|
124 |
+
seq_lens=seq_lens,
|
125 |
+
grid_sizes=grid_sizes,
|
126 |
+
freqs=self.freqs,
|
127 |
+
context=context,
|
128 |
+
context_lens=context_lens)
|
129 |
+
|
130 |
+
# Context Parallel
|
131 |
+
x = torch.chunk(
|
132 |
+
x, get_sequence_parallel_world_size(),
|
133 |
+
dim=1)[get_sequence_parallel_rank()]
|
134 |
+
|
135 |
+
for block in self.blocks:
|
136 |
+
x = block(x, **kwargs)
|
137 |
+
|
138 |
+
# head
|
139 |
+
x = self.head(x, e)
|
140 |
+
|
141 |
+
# Context Parallel
|
142 |
+
x = get_sp_group().all_gather(x, dim=1)
|
143 |
+
|
144 |
+
# unpatchify
|
145 |
+
x = self.unpatchify(x, grid_sizes)
|
146 |
+
return [u.float() for u in x]
|
147 |
+
|
148 |
+
|
149 |
+
def usp_attn_forward(self,
|
150 |
+
x,
|
151 |
+
seq_lens,
|
152 |
+
grid_sizes,
|
153 |
+
freqs,
|
154 |
+
dtype=torch.bfloat16):
|
155 |
+
b, s, n, d = *x.shape[:2], self.num_heads, self.head_dim
|
156 |
+
half_dtypes = (torch.float16, torch.bfloat16)
|
157 |
+
|
158 |
+
def half(x):
|
159 |
+
return x if x.dtype in half_dtypes else x.to(dtype)
|
160 |
+
|
161 |
+
# query, key, value function
|
162 |
+
def qkv_fn(x):
|
163 |
+
q = self.norm_q(self.q(x)).view(b, s, n, d)
|
164 |
+
k = self.norm_k(self.k(x)).view(b, s, n, d)
|
165 |
+
v = self.v(x).view(b, s, n, d)
|
166 |
+
return q, k, v
|
167 |
+
|
168 |
+
q, k, v = qkv_fn(x)
|
169 |
+
q = rope_apply(q, grid_sizes, freqs)
|
170 |
+
k = rope_apply(k, grid_sizes, freqs)
|
171 |
+
|
172 |
+
# TODO: We should use unpaded q,k,v for attention.
|
173 |
+
# k_lens = seq_lens // get_sequence_parallel_world_size()
|
174 |
+
# if k_lens is not None:
|
175 |
+
# q = torch.cat([u[:l] for u, l in zip(q, k_lens)]).unsqueeze(0)
|
176 |
+
# k = torch.cat([u[:l] for u, l in zip(k, k_lens)]).unsqueeze(0)
|
177 |
+
# v = torch.cat([u[:l] for u, l in zip(v, k_lens)]).unsqueeze(0)
|
178 |
+
|
179 |
+
x = xFuserLongContextAttention()(
|
180 |
+
None,
|
181 |
+
query=half(q),
|
182 |
+
key=half(k),
|
183 |
+
value=half(v),
|
184 |
+
window_size=self.window_size)
|
185 |
+
|
186 |
+
# TODO: padding after attention.
|
187 |
+
# x = torch.cat([x, x.new_zeros(b, s - x.size(1), n, d)], dim=1)
|
188 |
+
|
189 |
+
# output
|
190 |
+
x = x.flatten(2)
|
191 |
+
x = self.o(x)
|
192 |
+
return x
|
wan/image2video.py
ADDED
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
2 |
+
import gc
|
3 |
+
import logging
|
4 |
+
import math
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
import sys
|
8 |
+
import types
|
9 |
+
from contextlib import contextmanager
|
10 |
+
from functools import partial
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
import torch
|
14 |
+
import torch.cuda.amp as amp
|
15 |
+
import torch.distributed as dist
|
16 |
+
import torchvision.transforms.functional as TF
|
17 |
+
from tqdm import tqdm
|
18 |
+
|
19 |
+
from .distributed.fsdp import shard_model
|
20 |
+
from .modules.clip import CLIPModel
|
21 |
+
from .modules.model_infer import WanModel
|
22 |
+
from .modules.t5 import T5EncoderModel
|
23 |
+
from .modules.vae import WanVAE
|
24 |
+
from .utils.fm_solvers import (FlowDPMSolverMultistepScheduler,
|
25 |
+
get_sampling_sigmas, retrieve_timesteps)
|
26 |
+
from .utils.fm_solvers_unipc import FlowUniPCMultistepScheduler
|
27 |
+
|
28 |
+
|
29 |
+
class WanI2V:
|
30 |
+
|
31 |
+
def __init__(
|
32 |
+
self,
|
33 |
+
config,
|
34 |
+
checkpoint_dir,
|
35 |
+
device_id=0,
|
36 |
+
rank=0,
|
37 |
+
t5_fsdp=False,
|
38 |
+
dit_fsdp=False,
|
39 |
+
use_usp=False,
|
40 |
+
t5_cpu=False,
|
41 |
+
init_on_cpu=True,
|
42 |
+
):
|
43 |
+
r"""
|
44 |
+
Initializes the image-to-video generation model components.
|
45 |
+
|
46 |
+
Args:
|
47 |
+
config (EasyDict):
|
48 |
+
Object containing model parameters initialized from config.py
|
49 |
+
checkpoint_dir (`str`):
|
50 |
+
Path to directory containing model checkpoints
|
51 |
+
device_id (`int`, *optional*, defaults to 0):
|
52 |
+
Id of target GPU device
|
53 |
+
rank (`int`, *optional*, defaults to 0):
|
54 |
+
Process rank for distributed training
|
55 |
+
t5_fsdp (`bool`, *optional*, defaults to False):
|
56 |
+
Enable FSDP sharding for T5 model
|
57 |
+
dit_fsdp (`bool`, *optional*, defaults to False):
|
58 |
+
Enable FSDP sharding for DiT model
|
59 |
+
use_usp (`bool`, *optional*, defaults to False):
|
60 |
+
Enable distribution strategy of USP.
|
61 |
+
t5_cpu (`bool`, *optional*, defaults to False):
|
62 |
+
Whether to place T5 model on CPU. Only works without t5_fsdp.
|
63 |
+
init_on_cpu (`bool`, *optional*, defaults to True):
|
64 |
+
Enable initializing Transformer Model on CPU. Only works without FSDP or USP.
|
65 |
+
"""
|
66 |
+
self.device = torch.device(f"cuda:{device_id}")
|
67 |
+
self.config = config
|
68 |
+
self.rank = rank
|
69 |
+
self.use_usp = use_usp
|
70 |
+
self.t5_cpu = t5_cpu
|
71 |
+
|
72 |
+
self.num_train_timesteps = config.num_train_timesteps
|
73 |
+
self.param_dtype = config.param_dtype
|
74 |
+
|
75 |
+
shard_fn = partial(shard_model, device_id=device_id)
|
76 |
+
self.text_encoder = T5EncoderModel(
|
77 |
+
text_len=config.text_len,
|
78 |
+
dtype=config.t5_dtype,
|
79 |
+
device=torch.device('cpu'),
|
80 |
+
checkpoint_path=os.path.join(checkpoint_dir, config.t5_checkpoint),
|
81 |
+
tokenizer_path=os.path.join(checkpoint_dir, config.t5_tokenizer),
|
82 |
+
shard_fn=shard_fn if t5_fsdp else None,
|
83 |
+
)
|
84 |
+
|
85 |
+
self.vae_stride = config.vae_stride
|
86 |
+
self.patch_size = config.patch_size
|
87 |
+
self.vae = WanVAE(
|
88 |
+
vae_pth=os.path.join(checkpoint_dir, config.vae_checkpoint),
|
89 |
+
device=self.device)
|
90 |
+
|
91 |
+
self.clip = CLIPModel(
|
92 |
+
dtype=config.clip_dtype,
|
93 |
+
device=self.device,
|
94 |
+
checkpoint_path=os.path.join(checkpoint_dir,
|
95 |
+
config.clip_checkpoint),
|
96 |
+
tokenizer_path=os.path.join(checkpoint_dir, config.clip_tokenizer))
|
97 |
+
|
98 |
+
logging.info(f"Creating WanModel from {checkpoint_dir}")
|
99 |
+
self.model = WanModel.from_pretrained(checkpoint_dir)
|
100 |
+
self.model.eval().requires_grad_(False)
|
101 |
+
|
102 |
+
if t5_fsdp or dit_fsdp or use_usp:
|
103 |
+
init_on_cpu = False
|
104 |
+
|
105 |
+
if use_usp:
|
106 |
+
from xfuser.core.distributed import \
|
107 |
+
get_sequence_parallel_world_size
|
108 |
+
|
109 |
+
from .distributed.xdit_context_parallel import (usp_attn_forward,
|
110 |
+
usp_dit_forward)
|
111 |
+
for block in self.model.blocks:
|
112 |
+
block.self_attn.forward = types.MethodType(
|
113 |
+
usp_attn_forward, block.self_attn)
|
114 |
+
self.model.forward = types.MethodType(usp_dit_forward, self.model)
|
115 |
+
self.sp_size = get_sequence_parallel_world_size()
|
116 |
+
else:
|
117 |
+
self.sp_size = 1
|
118 |
+
|
119 |
+
if dist.is_initialized():
|
120 |
+
dist.barrier()
|
121 |
+
if dit_fsdp:
|
122 |
+
self.model = shard_fn(self.model)
|
123 |
+
else:
|
124 |
+
if not init_on_cpu:
|
125 |
+
self.model=self.model.to(self.device)
|
126 |
+
|
127 |
+
self.sample_neg_prompt = config.sample_neg_prompt
|
128 |
+
|
129 |
+
def generate(self,
|
130 |
+
input_prompt,
|
131 |
+
img,
|
132 |
+
max_area=720 * 1280,
|
133 |
+
frame_num=81,
|
134 |
+
shift=5.0,
|
135 |
+
sample_solver='unipc',
|
136 |
+
sampling_steps=40,
|
137 |
+
guide_scale=5.0,
|
138 |
+
n_prompt="",
|
139 |
+
seed=-1,
|
140 |
+
offload_model=True):
|
141 |
+
r"""
|
142 |
+
Generates video frames from input image and text prompt using diffusion process.
|
143 |
+
|
144 |
+
Args:
|
145 |
+
input_prompt (`str`):
|
146 |
+
Text prompt for content generation.
|
147 |
+
img (PIL.Image.Image):
|
148 |
+
Input image tensor. Shape: [3, H, W]
|
149 |
+
max_area (`int`, *optional*, defaults to 720*1280):
|
150 |
+
Maximum pixel area for latent space calculation. Controls video resolution scaling
|
151 |
+
frame_num (`int`, *optional*, defaults to 81):
|
152 |
+
How many frames to sample from a video. The number should be 4n+1
|
153 |
+
shift (`float`, *optional*, defaults to 5.0):
|
154 |
+
Noise schedule shift parameter. Affects temporal dynamics
|
155 |
+
[NOTE]: If you want to generate a 480p video, it is recommended to set the shift value to 3.0.
|
156 |
+
sample_solver (`str`, *optional*, defaults to 'unipc'):
|
157 |
+
Solver used to sample the video.
|
158 |
+
sampling_steps (`int`, *optional*, defaults to 40):
|
159 |
+
Number of diffusion sampling steps. Higher values improve quality but slow generation
|
160 |
+
guide_scale (`float`, *optional*, defaults 5.0):
|
161 |
+
Classifier-free guidance scale. Controls prompt adherence vs. creativity
|
162 |
+
n_prompt (`str`, *optional*, defaults to ""):
|
163 |
+
Negative prompt for content exclusion. If not given, use `config.sample_neg_prompt`
|
164 |
+
seed (`int`, *optional*, defaults to -1):
|
165 |
+
Random seed for noise generation. If -1, use random seed
|
166 |
+
offload_model (`bool`, *optional*, defaults to True):
|
167 |
+
If True, offloads models to CPU during generation to save VRAM
|
168 |
+
|
169 |
+
Returns:
|
170 |
+
torch.Tensor:
|
171 |
+
Generated video frames tensor. Dimensions: (C, N H, W) where:
|
172 |
+
- C: Color channels (3 for RGB)
|
173 |
+
- N: Number of frames (81)
|
174 |
+
- H: Frame height (from max_area)
|
175 |
+
- W: Frame width from max_area)
|
176 |
+
"""
|
177 |
+
img = TF.to_tensor(img).sub_(0.5).div_(0.5).to(self.device)
|
178 |
+
|
179 |
+
F = frame_num
|
180 |
+
h, w = img.shape[1:]
|
181 |
+
aspect_ratio = h / w
|
182 |
+
lat_h = round(
|
183 |
+
np.sqrt(max_area * aspect_ratio) // self.vae_stride[1] //
|
184 |
+
self.patch_size[1] * self.patch_size[1])
|
185 |
+
lat_w = round(
|
186 |
+
np.sqrt(max_area / aspect_ratio) // self.vae_stride[2] //
|
187 |
+
self.patch_size[2] * self.patch_size[2])
|
188 |
+
h = lat_h * self.vae_stride[1]
|
189 |
+
w = lat_w * self.vae_stride[2]
|
190 |
+
|
191 |
+
max_seq_len = ((F - 1) // self.vae_stride[0] + 1) * lat_h * lat_w // (
|
192 |
+
self.patch_size[1] * self.patch_size[2])
|
193 |
+
max_seq_len = int(math.ceil(max_seq_len / self.sp_size)) * self.sp_size
|
194 |
+
|
195 |
+
seed = seed if seed >= 0 else random.randint(0, sys.maxsize)
|
196 |
+
seed_g = torch.Generator(device=self.device)
|
197 |
+
seed_g.manual_seed(seed)
|
198 |
+
noise = torch.randn(
|
199 |
+
16,
|
200 |
+
F//4+1,
|
201 |
+
lat_h,
|
202 |
+
lat_w,
|
203 |
+
dtype=torch.float32,
|
204 |
+
generator=seed_g,
|
205 |
+
device=self.device)
|
206 |
+
|
207 |
+
msk = torch.ones(1, F, lat_h, lat_w, device=self.device)
|
208 |
+
msk[:, 1:] = 0
|
209 |
+
msk = torch.concat([
|
210 |
+
torch.repeat_interleave(msk[:, 0:1], repeats=4, dim=1), msk[:, 1:]
|
211 |
+
],dim=1)
|
212 |
+
msk = msk.view(1, msk.shape[1] // 4, 4, lat_h, lat_w)
|
213 |
+
msk = msk.transpose(1, 2)[0]
|
214 |
+
|
215 |
+
if n_prompt == "":
|
216 |
+
n_prompt = self.sample_neg_prompt
|
217 |
+
|
218 |
+
# preprocess
|
219 |
+
if not self.t5_cpu:
|
220 |
+
self.text_encoder.model=self.text_encoder.model.to(self.device)
|
221 |
+
context = self.text_encoder([input_prompt], self.device)
|
222 |
+
context_null = self.text_encoder([n_prompt], self.device)
|
223 |
+
if offload_model:
|
224 |
+
self.text_encoder.model=self.text_encoder.model.cpu()
|
225 |
+
else:
|
226 |
+
context = self.text_encoder([input_prompt], torch.device('cpu'))
|
227 |
+
context_null = self.text_encoder([n_prompt], torch.device('cpu'))
|
228 |
+
context = [t.to(self.device) for t in context]
|
229 |
+
context_null = [t.to(self.device) for t in context_null]
|
230 |
+
|
231 |
+
self.clip.model=self.clip.model.to(self.device)
|
232 |
+
clip_context = self.clip.visual([img[:, None, :, :]])
|
233 |
+
if offload_model:
|
234 |
+
self.clip.model=self.clip.model.cpu()
|
235 |
+
torch.cuda.empty_cache()
|
236 |
+
y = self.vae.encode([
|
237 |
+
torch.concat([
|
238 |
+
torch.nn.functional.interpolate(
|
239 |
+
img[None].cpu(), size=(h, w), mode='bicubic').transpose(
|
240 |
+
0, 1),
|
241 |
+
# torch.zeros(3, 80, h, w)
|
242 |
+
torch.zeros(3, 48, h, w)
|
243 |
+
],dim=1).to(self.device)
|
244 |
+
])[0]
|
245 |
+
y = torch.concat([msk, y])
|
246 |
+
|
247 |
+
@contextmanager
|
248 |
+
def noop_no_sync():
|
249 |
+
yield
|
250 |
+
|
251 |
+
no_sync = getattr(self.model, 'no_sync', noop_no_sync)
|
252 |
+
|
253 |
+
# evaluation mode
|
254 |
+
with amp.autocast(dtype=self.param_dtype), torch.no_grad(), no_sync():
|
255 |
+
|
256 |
+
if sample_solver == 'unipc':
|
257 |
+
sample_scheduler = FlowUniPCMultistepScheduler(
|
258 |
+
num_train_timesteps=self.num_train_timesteps,
|
259 |
+
shift=1,
|
260 |
+
use_dynamic_shifting=False)
|
261 |
+
sample_scheduler.set_timesteps(
|
262 |
+
sampling_steps, device=self.device, shift=shift)
|
263 |
+
timesteps = sample_scheduler.timesteps
|
264 |
+
elif sample_solver == 'dpm++':
|
265 |
+
sample_scheduler = FlowDPMSolverMultistepScheduler(
|
266 |
+
num_train_timesteps=self.num_train_timesteps,
|
267 |
+
shift=1,
|
268 |
+
use_dynamic_shifting=False)
|
269 |
+
sampling_sigmas = get_sampling_sigmas(sampling_steps, shift)
|
270 |
+
timesteps, _ = retrieve_timesteps(
|
271 |
+
sample_scheduler,
|
272 |
+
device=self.device,
|
273 |
+
sigmas=sampling_sigmas)
|
274 |
+
else:
|
275 |
+
raise NotImplementedError("Unsupported solver.")
|
276 |
+
|
277 |
+
# sample videos
|
278 |
+
latent = noise
|
279 |
+
|
280 |
+
arg_c = {
|
281 |
+
'context': [context[0]],
|
282 |
+
'clip_fea': clip_context,
|
283 |
+
'seq_len': max_seq_len,
|
284 |
+
'y': [y],
|
285 |
+
}
|
286 |
+
|
287 |
+
arg_null = {
|
288 |
+
'context': context_null,
|
289 |
+
'clip_fea': clip_context,
|
290 |
+
'seq_len': max_seq_len,
|
291 |
+
'y': [y],
|
292 |
+
}
|
293 |
+
|
294 |
+
if offload_model:
|
295 |
+
torch.cuda.empty_cache()
|
296 |
+
|
297 |
+
self.model=self.model.to(self.device)
|
298 |
+
for _, t in enumerate(tqdm(timesteps)):
|
299 |
+
latent_model_input = [latent.to(self.device)]
|
300 |
+
timestep = [t]
|
301 |
+
|
302 |
+
timestep = torch.stack(timestep).to(self.device)
|
303 |
+
# print(timestep)
|
304 |
+
noise_pred_cond = self.model(latent_model_input, t=timestep, **arg_c)[0].to(torch.device('cpu') if offload_model else self.device)
|
305 |
+
# noise_pred_cond = latent_model_input[0]
|
306 |
+
if offload_model:
|
307 |
+
torch.cuda.empty_cache()
|
308 |
+
noise_pred_uncond = self.model(latent_model_input, t=timestep, **arg_null)[0].to(torch.device('cpu') if offload_model else self.device)
|
309 |
+
|
310 |
+
# noise_pred_uncond = latent_model_input[0]
|
311 |
+
if offload_model:
|
312 |
+
torch.cuda.empty_cache()
|
313 |
+
noise_pred = noise_pred_uncond + guide_scale * (
|
314 |
+
noise_pred_cond - noise_pred_uncond)
|
315 |
+
|
316 |
+
latent = latent.to(
|
317 |
+
torch.device('cpu') if offload_model else self.device)
|
318 |
+
|
319 |
+
temp_x0 = sample_scheduler.step(
|
320 |
+
noise_pred.unsqueeze(0),
|
321 |
+
t,
|
322 |
+
latent.unsqueeze(0),
|
323 |
+
return_dict=False,
|
324 |
+
generator=seed_g)[0]
|
325 |
+
latent = temp_x0.squeeze(0)
|
326 |
+
|
327 |
+
x0 = [latent.to(self.device)]
|
328 |
+
del latent_model_input, timestep
|
329 |
+
|
330 |
+
if offload_model:
|
331 |
+
self.model=self.model.cpu()
|
332 |
+
torch.cuda.empty_cache()
|
333 |
+
|
334 |
+
if self.rank == 0:
|
335 |
+
videos = self.vae.decode(x0)
|
336 |
+
|
337 |
+
del noise, latent
|
338 |
+
del sample_scheduler
|
339 |
+
if offload_model:
|
340 |
+
gc.collect()
|
341 |
+
torch.cuda.synchronize()
|
342 |
+
if dist.is_initialized():
|
343 |
+
dist.barrier()
|
344 |
+
|
345 |
+
return videos[0] if self.rank == 0 else None
|
wan/image2video_if_oss.py
ADDED
@@ -0,0 +1,380 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
2 |
+
import sys
|
3 |
+
sys.path.append('../OSS')
|
4 |
+
from OSS.OSS import search_OSS_video, infer_OSS
|
5 |
+
from OSS.model_wrap import _WrappedModel_Wan
|
6 |
+
import gc
|
7 |
+
import logging
|
8 |
+
import math
|
9 |
+
import os
|
10 |
+
import random
|
11 |
+
import sys
|
12 |
+
import types
|
13 |
+
from contextlib import contextmanager
|
14 |
+
from functools import partial
|
15 |
+
|
16 |
+
import numpy as np
|
17 |
+
import torch
|
18 |
+
import torch.cuda.amp as amp
|
19 |
+
import torch.distributed as dist
|
20 |
+
import torchvision.transforms.functional as TF
|
21 |
+
from tqdm import tqdm
|
22 |
+
|
23 |
+
from .distributed.fsdp import shard_model
|
24 |
+
from .modules.clip import CLIPModel
|
25 |
+
from .modules.model_infer import WanModel
|
26 |
+
from .modules.t5 import T5EncoderModel
|
27 |
+
from .modules.vae import WanVAE
|
28 |
+
from .utils.fm_solvers import (FlowDPMSolverMultistepScheduler,
|
29 |
+
get_sampling_sigmas, retrieve_timesteps)
|
30 |
+
from .utils.fm_solvers_unipc import FlowUniPCMultistepScheduler
|
31 |
+
|
32 |
+
|
33 |
+
class WanI2V:
|
34 |
+
|
35 |
+
def __init__(
|
36 |
+
self,
|
37 |
+
config,
|
38 |
+
checkpoint_dir,
|
39 |
+
device_id=0,
|
40 |
+
rank=0,
|
41 |
+
t5_fsdp=False,
|
42 |
+
dit_fsdp=False,
|
43 |
+
use_usp=False,
|
44 |
+
t5_cpu=False,
|
45 |
+
init_on_cpu=True,
|
46 |
+
):
|
47 |
+
r"""
|
48 |
+
Initializes the image-to-video generation model components.
|
49 |
+
|
50 |
+
Args:
|
51 |
+
config (EasyDict):
|
52 |
+
Object containing model parameters initialized from config.py
|
53 |
+
checkpoint_dir (`str`):
|
54 |
+
Path to directory containing model checkpoints
|
55 |
+
device_id (`int`, *optional*, defaults to 0):
|
56 |
+
Id of target GPU device
|
57 |
+
rank (`int`, *optional*, defaults to 0):
|
58 |
+
Process rank for distributed training
|
59 |
+
t5_fsdp (`bool`, *optional*, defaults to False):
|
60 |
+
Enable FSDP sharding for T5 model
|
61 |
+
dit_fsdp (`bool`, *optional*, defaults to False):
|
62 |
+
Enable FSDP sharding for DiT model
|
63 |
+
use_usp (`bool`, *optional*, defaults to False):
|
64 |
+
Enable distribution strategy of USP.
|
65 |
+
t5_cpu (`bool`, *optional*, defaults to False):
|
66 |
+
Whether to place T5 model on CPU. Only works without t5_fsdp.
|
67 |
+
init_on_cpu (`bool`, *optional*, defaults to True):
|
68 |
+
Enable initializing Transformer Model on CPU. Only works without FSDP or USP.
|
69 |
+
"""
|
70 |
+
self.device = torch.device(f"cuda:{device_id}")
|
71 |
+
self.config = config
|
72 |
+
self.rank = rank
|
73 |
+
self.use_usp = use_usp
|
74 |
+
self.t5_cpu = t5_cpu
|
75 |
+
|
76 |
+
self.num_train_timesteps = config.num_train_timesteps
|
77 |
+
self.param_dtype = config.param_dtype
|
78 |
+
|
79 |
+
shard_fn = partial(shard_model, device_id=device_id)
|
80 |
+
self.text_encoder = T5EncoderModel(
|
81 |
+
text_len=config.text_len,
|
82 |
+
dtype=config.t5_dtype,
|
83 |
+
device=torch.device('cpu'),
|
84 |
+
checkpoint_path=os.path.join(checkpoint_dir, config.t5_checkpoint),
|
85 |
+
tokenizer_path=os.path.join(checkpoint_dir, config.t5_tokenizer),
|
86 |
+
shard_fn=shard_fn if t5_fsdp else None,
|
87 |
+
)
|
88 |
+
|
89 |
+
self.vae_stride = config.vae_stride
|
90 |
+
self.patch_size = config.patch_size
|
91 |
+
self.vae = WanVAE(
|
92 |
+
vae_pth=os.path.join(checkpoint_dir, config.vae_checkpoint),
|
93 |
+
device=self.device)
|
94 |
+
|
95 |
+
self.clip = CLIPModel(
|
96 |
+
dtype=config.clip_dtype,
|
97 |
+
device=self.device,
|
98 |
+
checkpoint_path=os.path.join(checkpoint_dir,
|
99 |
+
config.clip_checkpoint),
|
100 |
+
tokenizer_path=os.path.join(checkpoint_dir, config.clip_tokenizer))
|
101 |
+
|
102 |
+
logging.info(f"Creating WanModel from {checkpoint_dir}")
|
103 |
+
self.model = WanModel.from_pretrained(checkpoint_dir)
|
104 |
+
self.model.eval().requires_grad_(False)
|
105 |
+
|
106 |
+
if t5_fsdp or dit_fsdp or use_usp:
|
107 |
+
init_on_cpu = False
|
108 |
+
|
109 |
+
if use_usp:
|
110 |
+
from xfuser.core.distributed import \
|
111 |
+
get_sequence_parallel_world_size
|
112 |
+
|
113 |
+
from .distributed.xdit_context_parallel import (usp_attn_forward,
|
114 |
+
usp_dit_forward)
|
115 |
+
for block in self.model.blocks:
|
116 |
+
block.self_attn.forward = types.MethodType(
|
117 |
+
usp_attn_forward, block.self_attn)
|
118 |
+
self.model.forward = types.MethodType(usp_dit_forward, self.model)
|
119 |
+
self.sp_size = get_sequence_parallel_world_size()
|
120 |
+
else:
|
121 |
+
self.sp_size = 1
|
122 |
+
|
123 |
+
if dist.is_initialized():
|
124 |
+
dist.barrier()
|
125 |
+
if dit_fsdp:
|
126 |
+
self.model = shard_fn(self.model)
|
127 |
+
else:
|
128 |
+
if not init_on_cpu:
|
129 |
+
self.model=self.model.to(self.device)
|
130 |
+
|
131 |
+
self.sample_neg_prompt = config.sample_neg_prompt
|
132 |
+
|
133 |
+
def generate(self,
|
134 |
+
input_prompt,
|
135 |
+
img,
|
136 |
+
max_area=720 * 1280,
|
137 |
+
frame_num=81,
|
138 |
+
shift=5.0,
|
139 |
+
sample_solver='unipc',
|
140 |
+
sampling_steps=40,
|
141 |
+
guide_scale=5.0,
|
142 |
+
n_prompt="",
|
143 |
+
seed=-1,
|
144 |
+
offload_model=True,speed=0):
|
145 |
+
r"""
|
146 |
+
Generates video frames from input image and text prompt using diffusion process.
|
147 |
+
|
148 |
+
Args:
|
149 |
+
input_prompt (`str`):
|
150 |
+
Text prompt for content generation.
|
151 |
+
img (PIL.Image.Image):
|
152 |
+
Input image tensor. Shape: [3, H, W]
|
153 |
+
max_area (`int`, *optional*, defaults to 720*1280):
|
154 |
+
Maximum pixel area for latent space calculation. Controls video resolution scaling
|
155 |
+
frame_num (`int`, *optional*, defaults to 81):
|
156 |
+
How many frames to sample from a video. The number should be 4n+1
|
157 |
+
shift (`float`, *optional*, defaults to 5.0):
|
158 |
+
Noise schedule shift parameter. Affects temporal dynamics
|
159 |
+
[NOTE]: If you want to generate a 480p video, it is recommended to set the shift value to 3.0.
|
160 |
+
sample_solver (`str`, *optional*, defaults to 'unipc'):
|
161 |
+
Solver used to sample the video.
|
162 |
+
sampling_steps (`int`, *optional*, defaults to 40):
|
163 |
+
Number of diffusion sampling steps. Higher values improve quality but slow generation
|
164 |
+
guide_scale (`float`, *optional*, defaults 5.0):
|
165 |
+
Classifier-free guidance scale. Controls prompt adherence vs. creativity
|
166 |
+
n_prompt (`str`, *optional*, defaults to ""):
|
167 |
+
Negative prompt for content exclusion. If not given, use `config.sample_neg_prompt`
|
168 |
+
seed (`int`, *optional*, defaults to -1):
|
169 |
+
Random seed for noise generation. If -1, use random seed
|
170 |
+
offload_model (`bool`, *optional*, defaults to True):
|
171 |
+
If True, offloads models to CPU during generation to save VRAM
|
172 |
+
|
173 |
+
Returns:
|
174 |
+
torch.Tensor:
|
175 |
+
Generated video frames tensor. Dimensions: (C, N H, W) where:
|
176 |
+
- C: Color channels (3 for RGB)
|
177 |
+
- N: Number of frames (81)
|
178 |
+
- H: Frame height (from max_area)
|
179 |
+
- W: Frame width from max_area)
|
180 |
+
"""
|
181 |
+
img = TF.to_tensor(img).sub_(0.5).div_(0.5).to(self.device)
|
182 |
+
|
183 |
+
F = frame_num
|
184 |
+
h, w = img.shape[1:]
|
185 |
+
aspect_ratio = h / w
|
186 |
+
lat_h = round(
|
187 |
+
np.sqrt(max_area * aspect_ratio) // self.vae_stride[1] //
|
188 |
+
self.patch_size[1] * self.patch_size[1])
|
189 |
+
lat_w = round(
|
190 |
+
np.sqrt(max_area / aspect_ratio) // self.vae_stride[2] //
|
191 |
+
self.patch_size[2] * self.patch_size[2])
|
192 |
+
h = lat_h * self.vae_stride[1]
|
193 |
+
w = lat_w * self.vae_stride[2]
|
194 |
+
|
195 |
+
max_seq_len = ((F - 1) // self.vae_stride[0] + 1) * lat_h * lat_w // (
|
196 |
+
self.patch_size[1] * self.patch_size[2])
|
197 |
+
max_seq_len = int(math.ceil(max_seq_len / self.sp_size)) * self.sp_size
|
198 |
+
|
199 |
+
seed = seed if seed >= 0 else random.randint(0, sys.maxsize)
|
200 |
+
seed_g = torch.Generator(device=self.device)
|
201 |
+
seed_g.manual_seed(seed)
|
202 |
+
noise = torch.randn(
|
203 |
+
16,
|
204 |
+
F//4+1,
|
205 |
+
lat_h,
|
206 |
+
lat_w,
|
207 |
+
dtype=torch.float32,
|
208 |
+
generator=seed_g,
|
209 |
+
device=self.device)
|
210 |
+
|
211 |
+
msk = torch.ones(1, F, lat_h, lat_w, device=self.device)
|
212 |
+
msk[:, 1:] = 0
|
213 |
+
msk = torch.concat([
|
214 |
+
torch.repeat_interleave(msk[:, 0:1], repeats=4, dim=1), msk[:, 1:]
|
215 |
+
],dim=1)
|
216 |
+
msk = msk.view(1, msk.shape[1] // 4, 4, lat_h, lat_w)
|
217 |
+
msk = msk.transpose(1, 2)[0]
|
218 |
+
|
219 |
+
if n_prompt == "":
|
220 |
+
n_prompt = self.sample_neg_prompt
|
221 |
+
|
222 |
+
# preprocess
|
223 |
+
if not self.t5_cpu:
|
224 |
+
self.text_encoder.model=self.text_encoder.model.to(self.device)
|
225 |
+
context = self.text_encoder([input_prompt], self.device)
|
226 |
+
context_null = self.text_encoder([n_prompt], self.device)
|
227 |
+
if offload_model:
|
228 |
+
self.text_encoder.model=self.text_encoder.model.cpu()
|
229 |
+
else:
|
230 |
+
context = self.text_encoder([input_prompt], torch.device('cpu'))
|
231 |
+
context_null = self.text_encoder([n_prompt], torch.device('cpu'))
|
232 |
+
context = [t.to(self.device) for t in context]
|
233 |
+
context_null = [t.to(self.device) for t in context_null]
|
234 |
+
|
235 |
+
self.clip.model=self.clip.model.to(self.device)
|
236 |
+
clip_context = self.clip.visual([img[:, None, :, :]])
|
237 |
+
if offload_model:
|
238 |
+
self.clip.model=self.clip.model.cpu()
|
239 |
+
torch.cuda.empty_cache()
|
240 |
+
y = self.vae.encode([
|
241 |
+
torch.concat([
|
242 |
+
torch.nn.functional.interpolate(
|
243 |
+
img[None].cpu(), size=(h, w), mode='bicubic').transpose(
|
244 |
+
0, 1),
|
245 |
+
torch.zeros(3, F-1, h, w)
|
246 |
+
],dim=1).to(self.device)
|
247 |
+
])[0]
|
248 |
+
y = torch.concat([msk, y])
|
249 |
+
|
250 |
+
@contextmanager
|
251 |
+
def noop_no_sync():
|
252 |
+
yield
|
253 |
+
|
254 |
+
no_sync = getattr(self.model, 'no_sync', noop_no_sync)
|
255 |
+
|
256 |
+
# evaluation mode
|
257 |
+
with amp.autocast(dtype=self.param_dtype), torch.no_grad(), no_sync():
|
258 |
+
# sample videos
|
259 |
+
latents =latent = noise
|
260 |
+
|
261 |
+
arg_c = {
|
262 |
+
'context': [context[0]],
|
263 |
+
'clip_fea': clip_context,
|
264 |
+
'seq_len': max_seq_len,
|
265 |
+
'y': [y],
|
266 |
+
}
|
267 |
+
|
268 |
+
arg_null = {
|
269 |
+
'context': context_null,
|
270 |
+
'clip_fea': clip_context,
|
271 |
+
'seq_len': max_seq_len,
|
272 |
+
'y': [y],
|
273 |
+
}
|
274 |
+
|
275 |
+
if offload_model:
|
276 |
+
torch.cuda.empty_cache()
|
277 |
+
|
278 |
+
self.model=self.model.to(self.device)
|
279 |
+
if speed==0:
|
280 |
+
if sample_solver == 'unipc':
|
281 |
+
sample_scheduler = FlowUniPCMultistepScheduler(
|
282 |
+
num_train_timesteps=self.num_train_timesteps,
|
283 |
+
shift=1,
|
284 |
+
use_dynamic_shifting=False)
|
285 |
+
sample_scheduler.set_timesteps(
|
286 |
+
sampling_steps, device=self.device, shift=shift)
|
287 |
+
timesteps = sample_scheduler.timesteps
|
288 |
+
elif sample_solver == 'dpm++':
|
289 |
+
sample_scheduler = FlowDPMSolverMultistepScheduler(
|
290 |
+
num_train_timesteps=self.num_train_timesteps,
|
291 |
+
shift=1,
|
292 |
+
use_dynamic_shifting=False)
|
293 |
+
sampling_sigmas = get_sampling_sigmas(sampling_steps, shift)
|
294 |
+
timesteps, _ = retrieve_timesteps(
|
295 |
+
sample_scheduler,
|
296 |
+
device=self.device,
|
297 |
+
sigmas=sampling_sigmas)
|
298 |
+
else:
|
299 |
+
raise NotImplementedError("Unsupported solver.")
|
300 |
+
for _, t in enumerate(tqdm(timesteps)):
|
301 |
+
latent_model_input = [latent.to(self.device)]
|
302 |
+
timestep = [t]
|
303 |
+
|
304 |
+
timestep = torch.stack(timestep).to(self.device)
|
305 |
+
# print(timestep)
|
306 |
+
noise_pred_cond = self.model(latent_model_input, t=timestep, **arg_c)[0].to(torch.device('cpu') if offload_model else self.device)
|
307 |
+
# noise_pred_cond = latent_model_input[0]
|
308 |
+
if offload_model:
|
309 |
+
torch.cuda.empty_cache()
|
310 |
+
noise_pred_uncond = self.model(latent_model_input, t=timestep, **arg_null)[0].to(torch.device('cpu') if offload_model else self.device)
|
311 |
+
|
312 |
+
# noise_pred_uncond = latent_model_input[0]
|
313 |
+
if offload_model:
|
314 |
+
torch.cuda.empty_cache()
|
315 |
+
noise_pred = noise_pred_uncond + guide_scale * (
|
316 |
+
noise_pred_cond - noise_pred_uncond)
|
317 |
+
|
318 |
+
latent = latent.to(
|
319 |
+
torch.device('cpu') if offload_model else self.device)
|
320 |
+
|
321 |
+
temp_x0 = sample_scheduler.step(
|
322 |
+
noise_pred.unsqueeze(0),
|
323 |
+
t,
|
324 |
+
latent.unsqueeze(0),
|
325 |
+
return_dict=False,
|
326 |
+
generator=seed_g)[0]
|
327 |
+
latent = temp_x0.squeeze(0)
|
328 |
+
|
329 |
+
x0 = [latent.to(self.device)]
|
330 |
+
del latent_model_input, timestep
|
331 |
+
else:
|
332 |
+
n_ts=96
|
333 |
+
if sample_solver == 'unipc':
|
334 |
+
sample_scheduler = FlowUniPCMultistepScheduler(
|
335 |
+
num_train_timesteps=self.num_train_timesteps,
|
336 |
+
shift=1,
|
337 |
+
use_dynamic_shifting=False)
|
338 |
+
sample_scheduler.set_timesteps(
|
339 |
+
n_ts, device=self.device, shift=shift)
|
340 |
+
timesteps = sample_scheduler.timesteps
|
341 |
+
elif sample_solver == 'dpm++':
|
342 |
+
sample_scheduler = FlowDPMSolverMultistepScheduler(
|
343 |
+
num_train_timesteps=self.num_train_timesteps,
|
344 |
+
shift=1,
|
345 |
+
use_dynamic_shifting=False)
|
346 |
+
sampling_sigmas = get_sampling_sigmas(n_ts, shift)
|
347 |
+
timesteps, _ = retrieve_timesteps(
|
348 |
+
sample_scheduler,
|
349 |
+
device=self.device,
|
350 |
+
sigmas=sampling_sigmas)
|
351 |
+
else:
|
352 |
+
raise NotImplementedError("Unsupported solver.")
|
353 |
+
# pre-process
|
354 |
+
model = _WrappedModel_Wan(self.model, timesteps, self.num_train_timesteps, context_null, guide_scale)
|
355 |
+
model_kwargs = {
|
356 |
+
'seq_len': max_seq_len,
|
357 |
+
'y': [y],
|
358 |
+
'clip_fea': clip_context,
|
359 |
+
}
|
360 |
+
latents = latents.unsqueeze(0)
|
361 |
+
|
362 |
+
oss_steps = [2, 6, 14, 28, 44, 56, 66, 74, 79, 84, 87, 90, 93, 94, 95, 96] ####oss544Pmed96-16
|
363 |
+
x0 = infer_OSS(oss_steps, model, latents, context, self.device, model_kwargs=model_kwargs)
|
364 |
+
|
365 |
+
if offload_model:
|
366 |
+
self.model=self.model.cpu()
|
367 |
+
torch.cuda.empty_cache()
|
368 |
+
|
369 |
+
if self.rank == 0:
|
370 |
+
videos = self.vae.decode(x0)
|
371 |
+
|
372 |
+
del noise, latent, latents
|
373 |
+
del sample_scheduler
|
374 |
+
if offload_model:
|
375 |
+
gc.collect()
|
376 |
+
torch.cuda.synchronize()
|
377 |
+
if dist.is_initialized():
|
378 |
+
dist.barrier()
|
379 |
+
|
380 |
+
return videos[0] if self.rank == 0 else None
|
wan/image2video_mdinfer_oss_stu.py
ADDED
@@ -0,0 +1,454 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
2 |
+
import sys
|
3 |
+
sys.path.append('../OSS')
|
4 |
+
from OSS.OSS import search_OSS_video, infer_OSS
|
5 |
+
from OSS.model_wrap import _WrappedModel_Wan
|
6 |
+
import gc
|
7 |
+
import logging
|
8 |
+
import math
|
9 |
+
import os
|
10 |
+
import pdb
|
11 |
+
import random
|
12 |
+
import sys
|
13 |
+
import types
|
14 |
+
from contextlib import contextmanager
|
15 |
+
from functools import partial
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
import torch
|
19 |
+
import torch.cuda.amp as amp
|
20 |
+
import torch.distributed as dist
|
21 |
+
import torchvision.transforms.functional as TF
|
22 |
+
from tqdm import tqdm
|
23 |
+
|
24 |
+
from .distributed.fsdp import shard_model
|
25 |
+
from .modules.clip import CLIPModel
|
26 |
+
from .modules.model_infer import WanModel
|
27 |
+
from .modules.t5 import T5EncoderModel
|
28 |
+
from .modules.vae import WanVAE
|
29 |
+
# from .utils.fm_solvers import (FlowDPMSolverMultistepScheduler,get_sampling_sigmas, retrieve_timesteps)
|
30 |
+
from .utils.fm_solvers import (FlowDPMSolverMultistepScheduler)
|
31 |
+
from .utils.fm_solvers_unipc import FlowUniPCMultistepScheduler
|
32 |
+
|
33 |
+
from diffusers import FlowMatchEulerDiscreteScheduler
|
34 |
+
|
35 |
+
import inspect
|
36 |
+
import math
|
37 |
+
from typing import Callable, Dict, List, Optional, Tuple, Union
|
38 |
+
|
39 |
+
import torch
|
40 |
+
import numpy as np
|
41 |
+
import random
|
42 |
+
def set_seed(seed):
|
43 |
+
if seed == -1:
|
44 |
+
seed = random.randint(0, 1000000)
|
45 |
+
seed = int(seed)
|
46 |
+
random.seed(seed)
|
47 |
+
os.environ["PYTHONHASHSEED"] = str(seed)
|
48 |
+
np.random.seed(seed)
|
49 |
+
torch.manual_seed(seed)
|
50 |
+
torch.cuda.manual_seed(seed)
|
51 |
+
class FlowMatchScheduler():
|
52 |
+
|
53 |
+
def __init__(self, num_inference_steps=100, num_train_timesteps=1000, shift=3.0, sigma_max=1.0, sigma_min=0.003 / 1.002, inverse_timesteps=False, extra_one_step=False, reverse_sigmas=False):
|
54 |
+
self.num_train_timesteps = num_train_timesteps
|
55 |
+
self.shift = shift
|
56 |
+
self.sigma_max = sigma_max
|
57 |
+
self.sigma_min = sigma_min
|
58 |
+
self.inverse_timesteps = inverse_timesteps
|
59 |
+
self.extra_one_step = extra_one_step
|
60 |
+
self.reverse_sigmas = reverse_sigmas
|
61 |
+
self.set_timesteps(num_inference_steps)
|
62 |
+
|
63 |
+
def set_timesteps(self, num_inference_steps=100, denoising_strength=1.0, training=False, shift=None):
|
64 |
+
if shift is not None:
|
65 |
+
self.shift = shift
|
66 |
+
sigma_start = self.sigma_min + (self.sigma_max - self.sigma_min) * denoising_strength
|
67 |
+
if self.extra_one_step:
|
68 |
+
self.sigmas = torch.linspace(sigma_start, self.sigma_min, num_inference_steps + 1)[:-1]
|
69 |
+
else:
|
70 |
+
self.sigmas = torch.linspace(sigma_start, self.sigma_min, num_inference_steps)
|
71 |
+
if self.inverse_timesteps:
|
72 |
+
self.sigmas = torch.flip(self.sigmas, dims=[0])
|
73 |
+
self.sigmas = self.shift * self.sigmas / (1 + (self.shift - 1) * self.sigmas)
|
74 |
+
if self.reverse_sigmas:
|
75 |
+
self.sigmas = 1 - self.sigmas
|
76 |
+
self.timesteps = self.sigmas * self.num_train_timesteps
|
77 |
+
if training:
|
78 |
+
x = self.timesteps
|
79 |
+
y = torch.exp(-2 * ((x - num_inference_steps / 2) / num_inference_steps) ** 2)
|
80 |
+
y_shifted = y - y.min()
|
81 |
+
bsmntw_weighing = y_shifted * (num_inference_steps / y_shifted.sum())
|
82 |
+
self.linear_timesteps_weights = bsmntw_weighing
|
83 |
+
|
84 |
+
def step(self, model_output, timestep, sample, to_final=False):
|
85 |
+
if isinstance(timestep, torch.Tensor):
|
86 |
+
timestep = timestep.cpu()
|
87 |
+
timestep_id = torch.argmin((self.timesteps - timestep).abs())
|
88 |
+
sigma = self.sigmas[timestep_id]
|
89 |
+
if to_final or timestep_id + 1 >= len(self.timesteps):
|
90 |
+
sigma_ = 1 if (self.inverse_timesteps or self.reverse_sigmas) else 0
|
91 |
+
else:
|
92 |
+
sigma_ = self.sigmas[timestep_id + 1]
|
93 |
+
prev_sample = sample + model_output * (sigma_ - sigma)
|
94 |
+
return prev_sample
|
95 |
+
|
96 |
+
def return_to_timestep(self, timestep, sample, sample_stablized):
|
97 |
+
if isinstance(timestep, torch.Tensor):
|
98 |
+
timestep = timestep.cpu()
|
99 |
+
timestep_id = torch.argmin((self.timesteps - timestep).abs())
|
100 |
+
sigma = self.sigmas[timestep_id]
|
101 |
+
model_output = (sample - sample_stablized) / sigma
|
102 |
+
return model_output
|
103 |
+
|
104 |
+
def add_noise(self, original_samples, noise, timestep):
|
105 |
+
if isinstance(timestep, torch.Tensor):
|
106 |
+
timestep = timestep.cpu()
|
107 |
+
timestep_id = torch.argmin((self.timesteps - timestep).abs())
|
108 |
+
sigma = self.sigmas[timestep_id]
|
109 |
+
sample = (1 - sigma) * original_samples + sigma * noise
|
110 |
+
return sample
|
111 |
+
|
112 |
+
def training_target(self, sample, noise, timestep):
|
113 |
+
target = noise - sample
|
114 |
+
return target
|
115 |
+
|
116 |
+
def training_weight(self, timestep):
|
117 |
+
timestep_id = torch.argmin((self.timesteps - timestep.to(self.timesteps.device)).abs())
|
118 |
+
weights = self.linear_timesteps_weights[timestep_id]
|
119 |
+
return weights
|
120 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
121 |
+
def retrieve_timesteps(
|
122 |
+
scheduler,
|
123 |
+
num_inference_steps: Optional[int] = None,
|
124 |
+
device: Optional[Union[str, torch.device]] = None,
|
125 |
+
timesteps: Optional[List[int]] = None,
|
126 |
+
sigmas: Optional[List[float]] = None,
|
127 |
+
**kwargs,
|
128 |
+
):
|
129 |
+
r"""
|
130 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
131 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
132 |
+
|
133 |
+
Args:
|
134 |
+
scheduler (`SchedulerMixin`):
|
135 |
+
The scheduler to get timesteps from.
|
136 |
+
num_inference_steps (`int`):
|
137 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
138 |
+
must be `None`.
|
139 |
+
device (`str` or `torch.device`, *optional*):
|
140 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
141 |
+
timesteps (`List[int]`, *optional*):
|
142 |
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
143 |
+
`num_inference_steps` and `sigmas` must be `None`.
|
144 |
+
sigmas (`List[float]`, *optional*):
|
145 |
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
146 |
+
`num_inference_steps` and `timesteps` must be `None`.
|
147 |
+
|
148 |
+
Returns:
|
149 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
150 |
+
second element is the number of inference steps.
|
151 |
+
"""
|
152 |
+
if timesteps is not None and sigmas is not None:
|
153 |
+
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
154 |
+
if timesteps is not None:
|
155 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
156 |
+
if not accepts_timesteps:
|
157 |
+
raise ValueError(
|
158 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
159 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
160 |
+
)
|
161 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
162 |
+
timesteps = scheduler.timesteps
|
163 |
+
num_inference_steps = len(timesteps)
|
164 |
+
elif sigmas is not None:
|
165 |
+
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
166 |
+
if not accept_sigmas:
|
167 |
+
raise ValueError(
|
168 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
169 |
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
170 |
+
)
|
171 |
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
172 |
+
timesteps = scheduler.timesteps
|
173 |
+
num_inference_steps = len(timesteps)
|
174 |
+
else:
|
175 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
176 |
+
timesteps = scheduler.timesteps
|
177 |
+
return timesteps, num_inference_steps
|
178 |
+
|
179 |
+
class WanI2V:
|
180 |
+
|
181 |
+
def __init__(
|
182 |
+
self,
|
183 |
+
config,
|
184 |
+
checkpoint_dir,
|
185 |
+
device_id=0,
|
186 |
+
rank=0,
|
187 |
+
t5_fsdp=False,
|
188 |
+
dit_fsdp=False,
|
189 |
+
use_usp=False,
|
190 |
+
t5_cpu=False,
|
191 |
+
init_on_cpu=True,
|
192 |
+
):
|
193 |
+
r"""
|
194 |
+
Initializes the image-to-video generation model components.
|
195 |
+
|
196 |
+
Args:
|
197 |
+
config (EasyDict):
|
198 |
+
Object containing model parameters initialized from config.py
|
199 |
+
checkpoint_dir (`str`):
|
200 |
+
Path to directory containing model checkpoints
|
201 |
+
device_id (`int`, *optional*, defaults to 0):
|
202 |
+
Id of target GPU device
|
203 |
+
rank (`int`, *optional*, defaults to 0):
|
204 |
+
Process rank for distributed training
|
205 |
+
t5_fsdp (`bool`, *optional*, defaults to False):
|
206 |
+
Enable FSDP sharding for T5 model
|
207 |
+
dit_fsdp (`bool`, *optional*, defaults to False):
|
208 |
+
Enable FSDP sharding for DiT model
|
209 |
+
use_usp (`bool`, *optional*, defaults to False):
|
210 |
+
Enable distribution strategy of USP.
|
211 |
+
t5_cpu (`bool`, *optional*, defaults to False):
|
212 |
+
Whether to place T5 model on CPU. Only works without t5_fsdp.
|
213 |
+
init_on_cpu (`bool`, *optional*, defaults to True):
|
214 |
+
Enable initializing Transformer Model on CPU. Only works without FSDP or USP.
|
215 |
+
"""
|
216 |
+
self.device = torch.device(f"cuda:{device_id}")
|
217 |
+
self.config = config
|
218 |
+
self.rank = rank
|
219 |
+
self.use_usp = use_usp
|
220 |
+
self.t5_cpu = t5_cpu
|
221 |
+
self.scheduler =FlowMatchScheduler(shift=5, sigma_min=0.0, extra_one_step=True)
|
222 |
+
# self.scheduler =FlowMatchScheduler(shift=17, sigma_min=0.0, extra_one_step=True)
|
223 |
+
self.num_train_timesteps = config.num_train_timesteps
|
224 |
+
self.param_dtype = config.param_dtype
|
225 |
+
|
226 |
+
shard_fn = partial(shard_model, device_id=device_id)
|
227 |
+
self.text_encoder = T5EncoderModel(
|
228 |
+
text_len=config.text_len,
|
229 |
+
dtype=config.t5_dtype,
|
230 |
+
device=torch.device('cpu'),
|
231 |
+
checkpoint_path=os.path.join(checkpoint_dir, config.t5_checkpoint),
|
232 |
+
tokenizer_path=os.path.join(checkpoint_dir, config.t5_tokenizer),
|
233 |
+
shard_fn=shard_fn if t5_fsdp else None,
|
234 |
+
)
|
235 |
+
|
236 |
+
self.vae_stride = config.vae_stride
|
237 |
+
self.patch_size = config.patch_size
|
238 |
+
self.vae = WanVAE(
|
239 |
+
vae_pth=os.path.join(checkpoint_dir, config.vae_checkpoint),
|
240 |
+
device=self.device)
|
241 |
+
|
242 |
+
self.clip = CLIPModel(
|
243 |
+
dtype=config.clip_dtype,
|
244 |
+
device=self.device,
|
245 |
+
checkpoint_path=os.path.join(checkpoint_dir,config.clip_checkpoint),
|
246 |
+
tokenizer_path=os.path.join(checkpoint_dir, config.clip_tokenizer))
|
247 |
+
|
248 |
+
logging.info(f"Creating WanModel from {checkpoint_dir}")
|
249 |
+
self.model = WanModel.from_pretrained(checkpoint_dir)
|
250 |
+
self.model.eval().requires_grad_(False)
|
251 |
+
|
252 |
+
if t5_fsdp or dit_fsdp or use_usp:
|
253 |
+
init_on_cpu = False
|
254 |
+
|
255 |
+
if use_usp:
|
256 |
+
from xfuser.core.distributed import \
|
257 |
+
get_sequence_parallel_world_size
|
258 |
+
|
259 |
+
from .distributed.xdit_context_parallel import (usp_attn_forward,usp_dit_forward)
|
260 |
+
for block in self.model.blocks:
|
261 |
+
block.self_attn.forward = types.MethodType(
|
262 |
+
usp_attn_forward, block.self_attn)
|
263 |
+
self.model.forward = types.MethodType(usp_dit_forward, self.model)
|
264 |
+
self.sp_size = get_sequence_parallel_world_size()
|
265 |
+
else:
|
266 |
+
self.sp_size = 1
|
267 |
+
|
268 |
+
if dist.is_initialized():
|
269 |
+
dist.barrier()
|
270 |
+
if dit_fsdp:
|
271 |
+
self.model = shard_fn(self.model)
|
272 |
+
else:
|
273 |
+
if not init_on_cpu:
|
274 |
+
self.model=self.model.to(self.device)
|
275 |
+
|
276 |
+
self.sample_neg_prompt = config.sample_neg_prompt
|
277 |
+
|
278 |
+
|
279 |
+
def generate(self,
|
280 |
+
input_prompt,
|
281 |
+
img,
|
282 |
+
max_area=720 * 1280,
|
283 |
+
frame_num=81,
|
284 |
+
shift=5.0,
|
285 |
+
sample_solver='unipc',
|
286 |
+
sampling_steps=40,
|
287 |
+
guide_scale=5.0,
|
288 |
+
n_prompt="",
|
289 |
+
seed=-1,
|
290 |
+
offload_model=True,
|
291 |
+
|
292 |
+
student_steps=20,
|
293 |
+
norm=2,
|
294 |
+
frame_type="all",
|
295 |
+
channel_type="all",
|
296 |
+
random_channel=False,
|
297 |
+
):
|
298 |
+
r"""
|
299 |
+
Generates video frames from input image and text prompt using diffusion process.
|
300 |
+
|
301 |
+
Args:
|
302 |
+
input_prompt (`str`):
|
303 |
+
Text prompt for content generation.
|
304 |
+
img (PIL.Image.Image):
|
305 |
+
Input image tensor. Shape: [3, H, W]
|
306 |
+
max_area (`int`, *optional*, defaults to 720*1280):
|
307 |
+
Maximum pixel area for latent space calculation. Controls video resolution scaling
|
308 |
+
frame_num (`int`, *optional*, defaults to 81):
|
309 |
+
How many frames to sample from a video. The number should be 4n+1
|
310 |
+
shift (`float`, *optional*, defaults to 5.0):
|
311 |
+
Noise schedule shift parameter. Affects temporal dynamics
|
312 |
+
[NOTE]: If you want to generate a 480p video, it is recommended to set the shift value to 3.0.
|
313 |
+
sample_solver (`str`, *optional*, defaults to 'unipc'):
|
314 |
+
Solver used to sample the video.
|
315 |
+
sampling_steps (`int`, *optional*, defaults to 40):
|
316 |
+
Number of diffusion sampling steps. Higher values improve quality but slow generation
|
317 |
+
guide_scale (`float`, *optional*, defaults 5.0):
|
318 |
+
Classifier-free guidance scale. Controls prompt adherence vs. creativity
|
319 |
+
n_prompt (`str`, *optional*, defaults to ""):
|
320 |
+
Negative prompt for content exclusion. If not given, use `config.sample_neg_prompt`
|
321 |
+
seed (`int`, *optional*, defaults to -1):
|
322 |
+
Random seed for noise generation. If -1, use random seed
|
323 |
+
offload_model (`bool`, *optional*, defaults to True):
|
324 |
+
If True, offloads models to CPU during generation to save VRAM
|
325 |
+
|
326 |
+
Returns:
|
327 |
+
torch.Tensor:
|
328 |
+
Generated video frames tensor. Dimensions: (C, N H, W) where:
|
329 |
+
- C: Color channels (3 for RGB)
|
330 |
+
- N: Number of frames (81)
|
331 |
+
- H: Frame height (from max_area)
|
332 |
+
- W: Frame width from max_area)
|
333 |
+
"""
|
334 |
+
img = TF.to_tensor(img).sub_(0.5).div_(0.5).to(self.device)
|
335 |
+
|
336 |
+
F = frame_num
|
337 |
+
h, w = img.shape[1:]
|
338 |
+
aspect_ratio = h / w
|
339 |
+
lat_h = round(
|
340 |
+
np.sqrt(max_area * aspect_ratio) // self.vae_stride[1] //
|
341 |
+
self.patch_size[1] * self.patch_size[1])
|
342 |
+
lat_w = round(
|
343 |
+
np.sqrt(max_area / aspect_ratio) // self.vae_stride[2] //
|
344 |
+
self.patch_size[2] * self.patch_size[2])
|
345 |
+
h = lat_h * self.vae_stride[1]
|
346 |
+
w = lat_w * self.vae_stride[2]
|
347 |
+
|
348 |
+
max_seq_len = ((F - 1) // self.vae_stride[0] + 1) * lat_h * lat_w // (
|
349 |
+
self.patch_size[1] * self.patch_size[2])
|
350 |
+
max_seq_len = int(math.ceil(max_seq_len / self.sp_size)) * self.sp_size
|
351 |
+
|
352 |
+
seed = seed if seed >= 0 else random.randint(0, sys.maxsize)
|
353 |
+
if seed >= 0:
|
354 |
+
set_seed(seed)
|
355 |
+
seed_g = torch.Generator(device=self.device)
|
356 |
+
seed_g.manual_seed(seed)
|
357 |
+
noise = torch.randn(
|
358 |
+
16,
|
359 |
+
F//4+1,
|
360 |
+
lat_h,
|
361 |
+
lat_w,
|
362 |
+
dtype=torch.float32,
|
363 |
+
generator=seed_g,
|
364 |
+
device=self.device)
|
365 |
+
|
366 |
+
msk = torch.ones(1, F, lat_h, lat_w, device=self.device)
|
367 |
+
msk[:, 1:] = 0
|
368 |
+
msk = torch.concat([
|
369 |
+
torch.repeat_interleave(msk[:, 0:1], repeats=4, dim=1), msk[:, 1:]
|
370 |
+
],dim=1)
|
371 |
+
msk = msk.view(1, msk.shape[1] // 4, 4, lat_h, lat_w)
|
372 |
+
msk = msk.transpose(1, 2)[0]
|
373 |
+
|
374 |
+
if n_prompt == "":
|
375 |
+
n_prompt = self.sample_neg_prompt
|
376 |
+
|
377 |
+
# preprocess
|
378 |
+
if not self.t5_cpu:
|
379 |
+
self.text_encoder.model=self.text_encoder.model.to(self.device)
|
380 |
+
context = self.text_encoder([input_prompt], self.device)
|
381 |
+
context_null = self.text_encoder([n_prompt], self.device)
|
382 |
+
if offload_model:
|
383 |
+
self.text_encoder.model=self.text_encoder.model.cpu()
|
384 |
+
else:
|
385 |
+
context = self.text_encoder([input_prompt], torch.device('cpu'))
|
386 |
+
context_null = self.text_encoder([n_prompt], torch.device('cpu'))
|
387 |
+
context = [t.to(self.device) for t in context]
|
388 |
+
context_null = [t.to(self.device) for t in context_null]
|
389 |
+
|
390 |
+
self.clip.model=self.clip.model.to(self.device)
|
391 |
+
clip_context = self.clip.visual([img[:, None, :, :]])
|
392 |
+
if offload_model:
|
393 |
+
self.clip.model=self.clip.model.cpu()
|
394 |
+
torch.cuda.empty_cache()
|
395 |
+
y = self.vae.encode([
|
396 |
+
torch.concat([
|
397 |
+
torch.nn.functional.interpolate(
|
398 |
+
img[None].cpu(), size=(h, w), mode='bicubic').transpose(
|
399 |
+
0, 1),
|
400 |
+
torch.zeros(3, F-1, h, w)
|
401 |
+
],dim=1).to(self.device)
|
402 |
+
])[0]
|
403 |
+
y = torch.concat([msk, y])
|
404 |
+
|
405 |
+
@contextmanager
|
406 |
+
def noop_no_sync():
|
407 |
+
yield
|
408 |
+
|
409 |
+
no_sync = getattr(self.model, 'no_sync', noop_no_sync)
|
410 |
+
|
411 |
+
# sampling_steps=10
|
412 |
+
# evaluation mode
|
413 |
+
with amp.autocast(dtype=self.param_dtype), torch.no_grad(), no_sync():
|
414 |
+
device = self.device
|
415 |
+
num_inference_steps=sampling_steps
|
416 |
+
self.scheduler.set_timesteps(num_inference_steps, 1.0, shift=5.0)
|
417 |
+
|
418 |
+
# sample videos
|
419 |
+
latents = noise
|
420 |
+
if offload_model:
|
421 |
+
torch.cuda.empty_cache()
|
422 |
+
|
423 |
+
self.model=self.model.to(self.device)
|
424 |
+
|
425 |
+
# pre-process
|
426 |
+
model = _WrappedModel_Wan(self.model, self.scheduler.timesteps, self.num_train_timesteps, context_null, guide_scale)
|
427 |
+
model_kwargs = {
|
428 |
+
'seq_len': max_seq_len,
|
429 |
+
'y': [y],
|
430 |
+
'clip_fea': clip_context,
|
431 |
+
}
|
432 |
+
latents = latents.unsqueeze(0)
|
433 |
+
|
434 |
+
oss_steps=[2, 6, 14, 28, 44, 56, 66, 74, 79, 84, 87, 90, 93, 94, 95, 96]####oss544Pmed96-16
|
435 |
+
# oss_steps=[2, 5, 11, 21, 36, 49, 63, 71, 80, 84, 87, 89, 92, 94, 95, 96]####oss544Pmed96-16
|
436 |
+
latents_oss = infer_OSS(oss_steps, model, latents, context, self.device, model_kwargs=model_kwargs)
|
437 |
+
|
438 |
+
x0_oss = latents_oss
|
439 |
+
|
440 |
+
if offload_model:
|
441 |
+
self.model.cpu()
|
442 |
+
torch.cuda.empty_cache()
|
443 |
+
if self.rank == 0:
|
444 |
+
videos_oss = self.vae.decode(x0_oss)
|
445 |
+
|
446 |
+
del noise, latents
|
447 |
+
# del self.scheduler
|
448 |
+
if offload_model:
|
449 |
+
gc.collect()
|
450 |
+
torch.cuda.synchronize()
|
451 |
+
if dist.is_initialized():
|
452 |
+
dist.barrier()
|
453 |
+
|
454 |
+
return videos_oss[0] if self.rank == 0 else None
|
wan/image2video_mdinfer_oss_tea.py
ADDED
@@ -0,0 +1,513 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
2 |
+
import sys,os
|
3 |
+
sys.path.append('../OSS')
|
4 |
+
from OSS.OSS import search_OSS_video, infer_OSS
|
5 |
+
from OSS.model_wrap import _WrappedModel_Wan
|
6 |
+
import gc
|
7 |
+
import logging
|
8 |
+
import math
|
9 |
+
import os
|
10 |
+
import pdb
|
11 |
+
import random
|
12 |
+
import sys
|
13 |
+
import types
|
14 |
+
from contextlib import contextmanager
|
15 |
+
from functools import partial
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
import torch
|
19 |
+
import torch.cuda.amp as amp
|
20 |
+
import torch.distributed as dist
|
21 |
+
import torchvision.transforms.functional as TF
|
22 |
+
from tqdm import tqdm
|
23 |
+
|
24 |
+
from .distributed.fsdp import shard_model
|
25 |
+
from .modules.clip import CLIPModel
|
26 |
+
from .modules.model_infer import WanModel
|
27 |
+
from .modules.t5 import T5EncoderModel
|
28 |
+
from .modules.vae import WanVAE
|
29 |
+
# from .utils.fm_solvers import (FlowDPMSolverMultistepScheduler,get_sampling_sigmas, retrieve_timesteps)
|
30 |
+
from .utils.fm_solvers import (FlowDPMSolverMultistepScheduler)
|
31 |
+
from .utils.fm_solvers_unipc import FlowUniPCMultistepScheduler
|
32 |
+
|
33 |
+
from diffusers import FlowMatchEulerDiscreteScheduler
|
34 |
+
|
35 |
+
import inspect
|
36 |
+
import math
|
37 |
+
from typing import Callable, Dict, List, Optional, Tuple, Union
|
38 |
+
|
39 |
+
import torch
|
40 |
+
import numpy as np
|
41 |
+
import random
|
42 |
+
def set_seed(seed):
|
43 |
+
if seed == -1:
|
44 |
+
seed = random.randint(0, 1000000)
|
45 |
+
seed = int(seed)
|
46 |
+
random.seed(seed)
|
47 |
+
os.environ["PYTHONHASHSEED"] = str(seed)
|
48 |
+
np.random.seed(seed)
|
49 |
+
torch.manual_seed(seed)
|
50 |
+
torch.cuda.manual_seed(seed)
|
51 |
+
class FlowMatchScheduler():
|
52 |
+
|
53 |
+
def __init__(self, num_inference_steps=100, num_train_timesteps=1000, shift=3.0, sigma_max=1.0, sigma_min=0.003 / 1.002, inverse_timesteps=False, extra_one_step=False, reverse_sigmas=False):
|
54 |
+
self.num_train_timesteps = num_train_timesteps
|
55 |
+
self.shift = shift
|
56 |
+
self.sigma_max = sigma_max
|
57 |
+
self.sigma_min = sigma_min
|
58 |
+
self.inverse_timesteps = inverse_timesteps
|
59 |
+
self.extra_one_step = extra_one_step
|
60 |
+
self.reverse_sigmas = reverse_sigmas
|
61 |
+
self.set_timesteps(num_inference_steps)
|
62 |
+
|
63 |
+
def set_timesteps(self, num_inference_steps=100, denoising_strength=1.0, training=False, shift=None):
|
64 |
+
if shift is not None:
|
65 |
+
self.shift = shift
|
66 |
+
sigma_start = self.sigma_min + (self.sigma_max - self.sigma_min) * denoising_strength
|
67 |
+
if self.extra_one_step:
|
68 |
+
self.sigmas = torch.linspace(sigma_start, self.sigma_min, num_inference_steps + 1)[:-1]
|
69 |
+
else:
|
70 |
+
self.sigmas = torch.linspace(sigma_start, self.sigma_min, num_inference_steps)
|
71 |
+
if self.inverse_timesteps:
|
72 |
+
self.sigmas = torch.flip(self.sigmas, dims=[0])
|
73 |
+
self.sigmas = self.shift * self.sigmas / (1 + (self.shift - 1) * self.sigmas)
|
74 |
+
if self.reverse_sigmas:
|
75 |
+
self.sigmas = 1 - self.sigmas
|
76 |
+
self.timesteps = self.sigmas * self.num_train_timesteps
|
77 |
+
if training:
|
78 |
+
x = self.timesteps
|
79 |
+
y = torch.exp(-2 * ((x - num_inference_steps / 2) / num_inference_steps) ** 2)
|
80 |
+
y_shifted = y - y.min()
|
81 |
+
bsmntw_weighing = y_shifted * (num_inference_steps / y_shifted.sum())
|
82 |
+
self.linear_timesteps_weights = bsmntw_weighing
|
83 |
+
|
84 |
+
def step(self, model_output, timestep, sample, to_final=False):
|
85 |
+
if isinstance(timestep, torch.Tensor):
|
86 |
+
timestep = timestep.cpu()
|
87 |
+
timestep_id = torch.argmin((self.timesteps - timestep).abs())
|
88 |
+
sigma = self.sigmas[timestep_id]
|
89 |
+
if to_final or timestep_id + 1 >= len(self.timesteps):
|
90 |
+
sigma_ = 1 if (self.inverse_timesteps or self.reverse_sigmas) else 0
|
91 |
+
else:
|
92 |
+
sigma_ = self.sigmas[timestep_id + 1]
|
93 |
+
prev_sample = sample + model_output * (sigma_ - sigma)
|
94 |
+
return prev_sample
|
95 |
+
|
96 |
+
def return_to_timestep(self, timestep, sample, sample_stablized):
|
97 |
+
if isinstance(timestep, torch.Tensor):
|
98 |
+
timestep = timestep.cpu()
|
99 |
+
timestep_id = torch.argmin((self.timesteps - timestep).abs())
|
100 |
+
sigma = self.sigmas[timestep_id]
|
101 |
+
model_output = (sample - sample_stablized) / sigma
|
102 |
+
return model_output
|
103 |
+
|
104 |
+
def add_noise(self, original_samples, noise, timestep):
|
105 |
+
if isinstance(timestep, torch.Tensor):
|
106 |
+
timestep = timestep.cpu()
|
107 |
+
timestep_id = torch.argmin((self.timesteps - timestep).abs())
|
108 |
+
sigma = self.sigmas[timestep_id]
|
109 |
+
sample = (1 - sigma) * original_samples + sigma * noise
|
110 |
+
return sample
|
111 |
+
|
112 |
+
def training_target(self, sample, noise, timestep):
|
113 |
+
target = noise - sample
|
114 |
+
return target
|
115 |
+
|
116 |
+
def training_weight(self, timestep):
|
117 |
+
timestep_id = torch.argmin((self.timesteps - timestep.to(self.timesteps.device)).abs())
|
118 |
+
weights = self.linear_timesteps_weights[timestep_id]
|
119 |
+
return weights
|
120 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
121 |
+
def retrieve_timesteps(
|
122 |
+
scheduler,
|
123 |
+
num_inference_steps: Optional[int] = None,
|
124 |
+
device: Optional[Union[str, torch.device]] = None,
|
125 |
+
timesteps: Optional[List[int]] = None,
|
126 |
+
sigmas: Optional[List[float]] = None,
|
127 |
+
**kwargs,
|
128 |
+
):
|
129 |
+
r"""
|
130 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
131 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
132 |
+
|
133 |
+
Args:
|
134 |
+
scheduler (`SchedulerMixin`):
|
135 |
+
The scheduler to get timesteps from.
|
136 |
+
num_inference_steps (`int`):
|
137 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
138 |
+
must be `None`.
|
139 |
+
device (`str` or `torch.device`, *optional*):
|
140 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
141 |
+
timesteps (`List[int]`, *optional*):
|
142 |
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
143 |
+
`num_inference_steps` and `sigmas` must be `None`.
|
144 |
+
sigmas (`List[float]`, *optional*):
|
145 |
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
146 |
+
`num_inference_steps` and `timesteps` must be `None`.
|
147 |
+
|
148 |
+
Returns:
|
149 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
150 |
+
second element is the number of inference steps.
|
151 |
+
"""
|
152 |
+
if timesteps is not None and sigmas is not None:
|
153 |
+
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
154 |
+
if timesteps is not None:
|
155 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
156 |
+
if not accepts_timesteps:
|
157 |
+
raise ValueError(
|
158 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
159 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
160 |
+
)
|
161 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
162 |
+
timesteps = scheduler.timesteps
|
163 |
+
num_inference_steps = len(timesteps)
|
164 |
+
elif sigmas is not None:
|
165 |
+
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
166 |
+
if not accept_sigmas:
|
167 |
+
raise ValueError(
|
168 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
169 |
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
170 |
+
)
|
171 |
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
172 |
+
timesteps = scheduler.timesteps
|
173 |
+
num_inference_steps = len(timesteps)
|
174 |
+
else:
|
175 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
176 |
+
timesteps = scheduler.timesteps
|
177 |
+
return timesteps, num_inference_steps
|
178 |
+
|
179 |
+
class WanI2V:
|
180 |
+
|
181 |
+
def __init__(
|
182 |
+
self,
|
183 |
+
config,
|
184 |
+
checkpoint_dir,
|
185 |
+
device_id=0,
|
186 |
+
rank=0,
|
187 |
+
t5_fsdp=False,
|
188 |
+
dit_fsdp=False,
|
189 |
+
use_usp=False,
|
190 |
+
t5_cpu=False,
|
191 |
+
init_on_cpu=True,
|
192 |
+
):
|
193 |
+
r"""
|
194 |
+
Initializes the image-to-video generation model components.
|
195 |
+
|
196 |
+
Args:
|
197 |
+
config (EasyDict):
|
198 |
+
Object containing model parameters initialized from config.py
|
199 |
+
checkpoint_dir (`str`):
|
200 |
+
Path to directory containing model checkpoints
|
201 |
+
device_id (`int`, *optional*, defaults to 0):
|
202 |
+
Id of target GPU device
|
203 |
+
rank (`int`, *optional*, defaults to 0):
|
204 |
+
Process rank for distributed training
|
205 |
+
t5_fsdp (`bool`, *optional*, defaults to False):
|
206 |
+
Enable FSDP sharding for T5 model
|
207 |
+
dit_fsdp (`bool`, *optional*, defaults to False):
|
208 |
+
Enable FSDP sharding for DiT model
|
209 |
+
use_usp (`bool`, *optional*, defaults to False):
|
210 |
+
Enable distribution strategy of USP.
|
211 |
+
t5_cpu (`bool`, *optional*, defaults to False):
|
212 |
+
Whether to place T5 model on CPU. Only works without t5_fsdp.
|
213 |
+
init_on_cpu (`bool`, *optional*, defaults to True):
|
214 |
+
Enable initializing Transformer Model on CPU. Only works without FSDP or USP.
|
215 |
+
"""
|
216 |
+
self.device = torch.device(f"cuda:{device_id}")
|
217 |
+
self.config = config
|
218 |
+
self.rank = rank
|
219 |
+
self.use_usp = use_usp
|
220 |
+
self.t5_cpu = t5_cpu
|
221 |
+
self.scheduler =FlowMatchScheduler(shift=5, sigma_min=0.0, extra_one_step=True)
|
222 |
+
# self.scheduler =FlowMatchScheduler(shift=17, sigma_min=0.0, extra_one_step=True)
|
223 |
+
self.num_train_timesteps = config.num_train_timesteps
|
224 |
+
self.param_dtype = config.param_dtype
|
225 |
+
|
226 |
+
shard_fn = partial(shard_model, device_id=device_id)
|
227 |
+
self.text_encoder = T5EncoderModel(
|
228 |
+
text_len=config.text_len,
|
229 |
+
dtype=config.t5_dtype,
|
230 |
+
device=torch.device('cpu'),
|
231 |
+
checkpoint_path=os.path.join(checkpoint_dir, config.t5_checkpoint),
|
232 |
+
tokenizer_path=os.path.join(checkpoint_dir, config.t5_tokenizer),
|
233 |
+
shard_fn=shard_fn if t5_fsdp else None,
|
234 |
+
)
|
235 |
+
|
236 |
+
self.vae_stride = config.vae_stride
|
237 |
+
self.patch_size = config.patch_size
|
238 |
+
self.vae = WanVAE(
|
239 |
+
vae_pth=os.path.join(checkpoint_dir, config.vae_checkpoint),
|
240 |
+
device=self.device)
|
241 |
+
|
242 |
+
self.clip = CLIPModel(
|
243 |
+
dtype=config.clip_dtype,
|
244 |
+
device=self.device,
|
245 |
+
checkpoint_path=os.path.join(checkpoint_dir,config.clip_checkpoint),
|
246 |
+
tokenizer_path=os.path.join(checkpoint_dir, config.clip_tokenizer))
|
247 |
+
|
248 |
+
logging.info(f"Creating WanModel from {checkpoint_dir}")
|
249 |
+
self.model = WanModel.from_pretrained(checkpoint_dir)
|
250 |
+
self.model.eval().requires_grad_(False)
|
251 |
+
|
252 |
+
if t5_fsdp or dit_fsdp or use_usp:
|
253 |
+
init_on_cpu = False
|
254 |
+
|
255 |
+
if use_usp:
|
256 |
+
from xfuser.core.distributed import \
|
257 |
+
get_sequence_parallel_world_size
|
258 |
+
|
259 |
+
from .distributed.xdit_context_parallel import (usp_attn_forward,usp_dit_forward)
|
260 |
+
for block in self.model.blocks:
|
261 |
+
block.self_attn.forward = types.MethodType(
|
262 |
+
usp_attn_forward, block.self_attn)
|
263 |
+
self.model.forward = types.MethodType(usp_dit_forward, self.model)
|
264 |
+
self.sp_size = get_sequence_parallel_world_size()
|
265 |
+
else:
|
266 |
+
self.sp_size = 1
|
267 |
+
|
268 |
+
if dist.is_initialized():
|
269 |
+
dist.barrier()
|
270 |
+
if dit_fsdp:
|
271 |
+
self.model = shard_fn(self.model)
|
272 |
+
else:
|
273 |
+
if not init_on_cpu:
|
274 |
+
self.model=self.model.to(self.device)
|
275 |
+
|
276 |
+
self.sample_neg_prompt = config.sample_neg_prompt
|
277 |
+
|
278 |
+
|
279 |
+
def generate(self,
|
280 |
+
args,
|
281 |
+
input_prompt,
|
282 |
+
img,
|
283 |
+
max_area=720 * 1280,
|
284 |
+
frame_num=81,
|
285 |
+
shift=5.0,
|
286 |
+
sample_solver='unipc',
|
287 |
+
sampling_steps=40,
|
288 |
+
guide_scale=5.0,
|
289 |
+
n_prompt="",
|
290 |
+
seed=-1,
|
291 |
+
offload_model=True,
|
292 |
+
|
293 |
+
student_steps=20,
|
294 |
+
norm=2,
|
295 |
+
frame_type="all",
|
296 |
+
channel_type="all",
|
297 |
+
random_channel=False,
|
298 |
+
):
|
299 |
+
r"""
|
300 |
+
Generates video frames from input image and text prompt using diffusion process.
|
301 |
+
|
302 |
+
Args:
|
303 |
+
input_prompt (`str`):
|
304 |
+
Text prompt for content generation.
|
305 |
+
img (PIL.Image.Image):
|
306 |
+
Input image tensor. Shape: [3, H, W]
|
307 |
+
max_area (`int`, *optional*, defaults to 720*1280):
|
308 |
+
Maximum pixel area for latent space calculation. Controls video resolution scaling
|
309 |
+
frame_num (`int`, *optional*, defaults to 81):
|
310 |
+
How many frames to sample from a video. The number should be 4n+1
|
311 |
+
shift (`float`, *optional*, defaults to 5.0):
|
312 |
+
Noise schedule shift parameter. Affects temporal dynamics
|
313 |
+
[NOTE]: If you want to generate a 480p video, it is recommended to set the shift value to 3.0.
|
314 |
+
sample_solver (`str`, *optional*, defaults to 'unipc'):
|
315 |
+
Solver used to sample the video.
|
316 |
+
sampling_steps (`int`, *optional*, defaults to 40):
|
317 |
+
Number of diffusion sampling steps. Higher values improve quality but slow generation
|
318 |
+
guide_scale (`float`, *optional*, defaults 5.0):
|
319 |
+
Classifier-free guidance scale. Controls prompt adherence vs. creativity
|
320 |
+
n_prompt (`str`, *optional*, defaults to ""):
|
321 |
+
Negative prompt for content exclusion. If not given, use `config.sample_neg_prompt`
|
322 |
+
seed (`int`, *optional*, defaults to -1):
|
323 |
+
Random seed for noise generation. If -1, use random seed
|
324 |
+
offload_model (`bool`, *optional*, defaults to True):
|
325 |
+
If True, offloads models to CPU during generation to save VRAM
|
326 |
+
|
327 |
+
Returns:
|
328 |
+
torch.Tensor:
|
329 |
+
Generated video frames tensor. Dimensions: (C, N H, W) where:
|
330 |
+
- C: Color channels (3 for RGB)
|
331 |
+
- N: Number of frames (81)
|
332 |
+
- H: Frame height (from max_area)
|
333 |
+
- W: Frame width from max_area)
|
334 |
+
"""
|
335 |
+
img = TF.to_tensor(img).sub_(0.5).div_(0.5).to(self.device)
|
336 |
+
|
337 |
+
F = frame_num
|
338 |
+
h, w = img.shape[1:]
|
339 |
+
aspect_ratio = h / w
|
340 |
+
lat_h = round(
|
341 |
+
np.sqrt(max_area * aspect_ratio) // self.vae_stride[1] //
|
342 |
+
self.patch_size[1] * self.patch_size[1])
|
343 |
+
lat_w = round(
|
344 |
+
np.sqrt(max_area / aspect_ratio) // self.vae_stride[2] //
|
345 |
+
self.patch_size[2] * self.patch_size[2])
|
346 |
+
h = lat_h * self.vae_stride[1]
|
347 |
+
w = lat_w * self.vae_stride[2]
|
348 |
+
|
349 |
+
max_seq_len = ((F - 1) // self.vae_stride[0] + 1) * lat_h * lat_w // (
|
350 |
+
self.patch_size[1] * self.patch_size[2])
|
351 |
+
max_seq_len = int(math.ceil(max_seq_len / self.sp_size)) * self.sp_size
|
352 |
+
|
353 |
+
seed = seed if seed >= 0 else random.randint(0, sys.maxsize)
|
354 |
+
if seed >= 0:
|
355 |
+
set_seed(seed)
|
356 |
+
seed_g = torch.Generator(device=self.device)
|
357 |
+
seed_g.manual_seed(seed)
|
358 |
+
noise = torch.randn(
|
359 |
+
16,
|
360 |
+
F//4+1,
|
361 |
+
lat_h,
|
362 |
+
lat_w,
|
363 |
+
dtype=torch.float32,
|
364 |
+
generator=seed_g,
|
365 |
+
device=self.device)
|
366 |
+
|
367 |
+
msk = torch.ones(1, F, lat_h, lat_w, device=self.device)
|
368 |
+
msk[:, 1:] = 0
|
369 |
+
msk = torch.concat([
|
370 |
+
torch.repeat_interleave(msk[:, 0:1], repeats=4, dim=1), msk[:, 1:]
|
371 |
+
],dim=1)
|
372 |
+
msk = msk.view(1, msk.shape[1] // 4, 4, lat_h, lat_w)
|
373 |
+
msk = msk.transpose(1, 2)[0]
|
374 |
+
|
375 |
+
if n_prompt == "":
|
376 |
+
n_prompt = self.sample_neg_prompt
|
377 |
+
|
378 |
+
# preprocess
|
379 |
+
if not self.t5_cpu:
|
380 |
+
self.text_encoder.model=self.text_encoder.model.to(self.device)
|
381 |
+
context = self.text_encoder([input_prompt], self.device)
|
382 |
+
context_null = self.text_encoder([n_prompt], self.device)
|
383 |
+
if offload_model:
|
384 |
+
self.text_encoder.model=self.text_encoder.model.cpu()
|
385 |
+
else:
|
386 |
+
context = self.text_encoder([input_prompt], torch.device('cpu'))
|
387 |
+
context_null = self.text_encoder([n_prompt], torch.device('cpu'))
|
388 |
+
context = [t.to(self.device) for t in context]
|
389 |
+
context_null = [t.to(self.device) for t in context_null]
|
390 |
+
|
391 |
+
self.clip.model=self.clip.model.to(self.device)
|
392 |
+
clip_context = self.clip.visual([img[:, None, :, :]])
|
393 |
+
if offload_model:
|
394 |
+
self.clip.model=self.clip.model.cpu()
|
395 |
+
torch.cuda.empty_cache()
|
396 |
+
y = self.vae.encode([
|
397 |
+
torch.concat([
|
398 |
+
torch.nn.functional.interpolate(
|
399 |
+
img[None].cpu(), size=(h, w), mode='bicubic').transpose(
|
400 |
+
0, 1),
|
401 |
+
torch.zeros(3, F-1, h, w)
|
402 |
+
],dim=1).to(self.device)
|
403 |
+
])[0]
|
404 |
+
y = torch.concat([msk, y])
|
405 |
+
|
406 |
+
@contextmanager
|
407 |
+
def noop_no_sync():
|
408 |
+
yield
|
409 |
+
|
410 |
+
no_sync = getattr(self.model, 'no_sync', noop_no_sync)
|
411 |
+
|
412 |
+
# sampling_steps=10
|
413 |
+
# evaluation mode
|
414 |
+
with amp.autocast(dtype=self.param_dtype), torch.no_grad(), no_sync():
|
415 |
+
device = self.device
|
416 |
+
num_inference_steps=sampling_steps
|
417 |
+
self.scheduler.set_timesteps(num_inference_steps, 1.0, shift=5.0)
|
418 |
+
|
419 |
+
# sample videos
|
420 |
+
latents = noise
|
421 |
+
if offload_model:
|
422 |
+
torch.cuda.empty_cache()
|
423 |
+
|
424 |
+
self.model=self.model.to(self.device)
|
425 |
+
|
426 |
+
|
427 |
+
|
428 |
+
# arg_c = {
|
429 |
+
# 'context': [context[0]],
|
430 |
+
# 'clip_fea': clip_context,
|
431 |
+
# 'seq_len': max_seq_len,
|
432 |
+
# 'y': [y],
|
433 |
+
# }
|
434 |
+
#
|
435 |
+
# arg_null = {
|
436 |
+
# 'context': context_null,
|
437 |
+
# 'clip_fea': clip_context,
|
438 |
+
# 'seq_len': max_seq_len,
|
439 |
+
# 'y': [y],
|
440 |
+
# }
|
441 |
+
|
442 |
+
# pre-process
|
443 |
+
model = _WrappedModel_Wan(self.model, self.scheduler.timesteps, self.num_train_timesteps, context_null, guide_scale)
|
444 |
+
model_kwargs = {
|
445 |
+
'seq_len': max_seq_len,
|
446 |
+
'y': [y],
|
447 |
+
'clip_fea': clip_context,
|
448 |
+
}
|
449 |
+
B = 1
|
450 |
+
# latents = latents[0].unsqueeze(0)
|
451 |
+
latents = latents.unsqueeze(0)
|
452 |
+
|
453 |
+
oss_steps = search_OSS_video(model, latents, B, context, self.device, teacher_steps=sampling_steps, student_steps=student_steps, norm=norm, model_kwargs=model_kwargs, frame_type=frame_type, channel_type=channel_type, random_channel=random_channel)
|
454 |
+
latents_oss = infer_OSS(oss_steps, model, latents, context, self.device, model_kwargs=model_kwargs)
|
455 |
+
|
456 |
+
with open("%s.txt"%args.save_file,"w")as f:f.write(str(oss_steps))
|
457 |
+
os._exit(2333)
|
458 |
+
# pdb.set_trace()
|
459 |
+
# teacher video
|
460 |
+
teacher_steps = list(range(1, sampling_steps+1))
|
461 |
+
latents_tea = infer_OSS(teacher_steps, model, latents, context, self.device, model_kwargs=model_kwargs)
|
462 |
+
|
463 |
+
x0_oss = latents_oss
|
464 |
+
x0_tea = latents_tea
|
465 |
+
|
466 |
+
if offload_model:
|
467 |
+
self.model.cpu()
|
468 |
+
torch.cuda.empty_cache()
|
469 |
+
if self.rank == 0:
|
470 |
+
videos_oss = self.vae.decode(x0_oss)
|
471 |
+
videos_tea = self.vae.decode(x0_tea)
|
472 |
+
|
473 |
+
# for idx, t in enumerate(tqdm(self.scheduler.timesteps)):
|
474 |
+
# latent_model_input = [latent.to(self.device)]
|
475 |
+
# timestep = [t]
|
476 |
+
#
|
477 |
+
# timestep = torch.stack(timestep).to(self.device)
|
478 |
+
# noise_pred_cond = self.model(latent_model_input, t=timestep, **arg_c)[0].to(torch.device('cpu') if offload_model else self.device)
|
479 |
+
# if offload_model:
|
480 |
+
# torch.cuda.empty_cache()
|
481 |
+
# noise_pred_uncond = self.model(latent_model_input, t=timestep, **arg_null)[0].to(torch.device('cpu') if offload_model else self.device)
|
482 |
+
# if offload_model:
|
483 |
+
# torch.cuda.empty_cache()
|
484 |
+
# noise_pred = noise_pred_uncond + guide_scale * (noise_pred_cond - noise_pred_uncond)
|
485 |
+
# # noise_pred = noise_pred_cond
|
486 |
+
# latent = latent.to(torch.device('cpu') if offload_model else self.device)
|
487 |
+
#
|
488 |
+
# # latents = self.scheduler.step(noise_pred, self.scheduler.timesteps[progress_id], latents)
|
489 |
+
# temp_x0 = self.scheduler.step(
|
490 |
+
# noise_pred.unsqueeze(0),
|
491 |
+
# self.scheduler.timesteps[idx],
|
492 |
+
# latent.unsqueeze(0))[0]
|
493 |
+
# latent = temp_x0.squeeze(0)
|
494 |
+
#
|
495 |
+
# x0 = [latent.to(self.device)]
|
496 |
+
# del latent_model_input, timestep
|
497 |
+
#
|
498 |
+
# if offload_model:
|
499 |
+
# self.model=self.model.cpu()
|
500 |
+
# torch.cuda.empty_cache()
|
501 |
+
#
|
502 |
+
# if self.rank == 0:
|
503 |
+
# videos = self.vae.decode(x0)
|
504 |
+
|
505 |
+
del noise, latents
|
506 |
+
# del self.scheduler
|
507 |
+
if offload_model:
|
508 |
+
gc.collect()
|
509 |
+
torch.cuda.synchronize()
|
510 |
+
if dist.is_initialized():
|
511 |
+
dist.barrier()
|
512 |
+
|
513 |
+
return videos[0] if self.rank == 0 else None
|
wan/modules/__init__.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .attention import flash_attention
|
2 |
+
from .model import WanModel
|
3 |
+
from .t5 import T5Decoder, T5Encoder, T5EncoderModel, T5Model
|
4 |
+
from .tokenizers import HuggingfaceTokenizer
|
5 |
+
from .vae import WanVAE
|
6 |
+
|
7 |
+
__all__ = [
|
8 |
+
'WanVAE',
|
9 |
+
'WanModel',
|
10 |
+
'T5Model',
|
11 |
+
'T5Encoder',
|
12 |
+
'T5Decoder',
|
13 |
+
'T5EncoderModel',
|
14 |
+
'HuggingfaceTokenizer',
|
15 |
+
'flash_attention',
|
16 |
+
]
|
wan/modules/attention.py
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
2 |
+
import torch
|
3 |
+
|
4 |
+
try:
|
5 |
+
import flash_attn_interface
|
6 |
+
FLASH_ATTN_3_AVAILABLE = True
|
7 |
+
except ModuleNotFoundError:
|
8 |
+
FLASH_ATTN_3_AVAILABLE = False
|
9 |
+
|
10 |
+
try:
|
11 |
+
import flash_attn
|
12 |
+
FLASH_ATTN_2_AVAILABLE = True
|
13 |
+
except ModuleNotFoundError:
|
14 |
+
FLASH_ATTN_2_AVAILABLE = False
|
15 |
+
|
16 |
+
import warnings
|
17 |
+
|
18 |
+
__all__ = [
|
19 |
+
'flash_attention',
|
20 |
+
'attention',
|
21 |
+
]
|
22 |
+
|
23 |
+
# try:
|
24 |
+
# # from sageattention import sageattn_varlen#sage1
|
25 |
+
# from sageattention import sageattn # sage2
|
26 |
+
#
|
27 |
+
# print("using sageattn2")
|
28 |
+
#
|
29 |
+
#
|
30 |
+
# @torch.compiler.disable()
|
31 |
+
# def sageattn_wrapper(
|
32 |
+
# q, k, v,
|
33 |
+
# attention_length
|
34 |
+
# ):
|
35 |
+
# padding_length = q.shape[0] - attention_length
|
36 |
+
# q = q[:attention_length, :, :].unsqueeze(0)
|
37 |
+
# k = k[:attention_length, :, :].unsqueeze(0)
|
38 |
+
# v = v[:attention_length, :, :].unsqueeze(0)
|
39 |
+
#
|
40 |
+
# o = sageattn(q, k, v, tensor_layout="NHD").squeeze(0)
|
41 |
+
# if padding_length > 0:
|
42 |
+
# o = torch.cat([o, torch.empty((padding_length, *o.shape[-2:]), dtype=o.dtype, device=o.device)], 0)
|
43 |
+
#
|
44 |
+
# return o
|
45 |
+
# except:
|
46 |
+
# sageattn_wrapper=None
|
47 |
+
# traceback.print_exc()
|
48 |
+
|
49 |
+
def flash_attention(
|
50 |
+
q,
|
51 |
+
k,
|
52 |
+
v,
|
53 |
+
q_lens=None,
|
54 |
+
k_lens=None,
|
55 |
+
dropout_p=0.,
|
56 |
+
softmax_scale=None,
|
57 |
+
q_scale=None,
|
58 |
+
causal=False,
|
59 |
+
window_size=(-1, -1),
|
60 |
+
deterministic=True,#False
|
61 |
+
dtype=torch.bfloat16,
|
62 |
+
version=None,
|
63 |
+
):
|
64 |
+
"""
|
65 |
+
q: [B, Lq, Nq, C1].
|
66 |
+
k: [B, Lk, Nk, C1].
|
67 |
+
v: [B, Lk, Nk, C2]. Nq must be divisible by Nk.
|
68 |
+
q_lens: [B].
|
69 |
+
k_lens: [B].
|
70 |
+
dropout_p: float. Dropout probability.
|
71 |
+
softmax_scale: float. The scaling of QK^T before applying softmax.
|
72 |
+
causal: bool. Whether to apply causal attention mask.
|
73 |
+
window_size: (left right). If not (-1, -1), apply sliding window local attention.
|
74 |
+
deterministic: bool. If True, slightly slower and uses more memory.
|
75 |
+
dtype: torch.dtype. Apply when dtype of q/k/v is not float16/bfloat16.
|
76 |
+
"""
|
77 |
+
half_dtypes = (torch.float16, torch.bfloat16)
|
78 |
+
assert dtype in half_dtypes
|
79 |
+
assert q.device.type == 'cuda' and q.size(-1) <= 256
|
80 |
+
|
81 |
+
# params
|
82 |
+
b, lq, lk, out_dtype = q.size(0), q.size(1), k.size(1), q.dtype
|
83 |
+
|
84 |
+
def half(x):
|
85 |
+
return x if x.dtype in half_dtypes else x.to(dtype)
|
86 |
+
|
87 |
+
# preprocess query
|
88 |
+
if q_lens is None:
|
89 |
+
q = half(q.flatten(0, 1))
|
90 |
+
q_lens = torch.tensor(
|
91 |
+
[lq] * b, dtype=torch.int32).to(
|
92 |
+
device=q.device, non_blocking=True)
|
93 |
+
else:
|
94 |
+
q = half(torch.cat([u[:v] for u, v in zip(q, q_lens)]))
|
95 |
+
|
96 |
+
# preprocess key, value
|
97 |
+
if k_lens is None:
|
98 |
+
k = half(k.flatten(0, 1))
|
99 |
+
v = half(v.flatten(0, 1))
|
100 |
+
k_lens = torch.tensor(
|
101 |
+
[lk] * b, dtype=torch.int32).to(
|
102 |
+
device=k.device, non_blocking=True)
|
103 |
+
else:
|
104 |
+
k = half(torch.cat([u[:v] for u, v in zip(k, k_lens)]))
|
105 |
+
v = half(torch.cat([u[:v] for u, v in zip(v, k_lens)]))
|
106 |
+
|
107 |
+
q = q.to(v.dtype)
|
108 |
+
k = k.to(v.dtype)
|
109 |
+
|
110 |
+
if q_scale is not None:
|
111 |
+
q = q * q_scale
|
112 |
+
|
113 |
+
# if type(sageattn_wrapper)!=type(None):
|
114 |
+
# x = sageattn_wrapper(q, k, v, lq).unsqueeze(0)
|
115 |
+
# return x.type(out_dtype)
|
116 |
+
|
117 |
+
if version is not None and version == 3 and not FLASH_ATTN_3_AVAILABLE:
|
118 |
+
warnings.warn(
|
119 |
+
'Flash attention 3 is not available, use flash attention 2 instead.'
|
120 |
+
)
|
121 |
+
|
122 |
+
# apply attention
|
123 |
+
if (version is None or version == 3) and FLASH_ATTN_3_AVAILABLE:
|
124 |
+
# Note: dropout_p, window_size are not supported in FA3 now.
|
125 |
+
x = flash_attn_interface.flash_attn_varlen_func(
|
126 |
+
q=q,
|
127 |
+
k=k,
|
128 |
+
v=v,
|
129 |
+
cu_seqlens_q=torch.cat([q_lens.new_zeros([1]), q_lens]).cumsum(
|
130 |
+
0, dtype=torch.int32).to(q.device, non_blocking=True),
|
131 |
+
cu_seqlens_k=torch.cat([k_lens.new_zeros([1]), k_lens]).cumsum(
|
132 |
+
0, dtype=torch.int32).to(q.device, non_blocking=True),
|
133 |
+
seqused_q=None,
|
134 |
+
seqused_k=None,
|
135 |
+
max_seqlen_q=lq,
|
136 |
+
max_seqlen_k=lk,
|
137 |
+
softmax_scale=softmax_scale,
|
138 |
+
causal=causal,
|
139 |
+
deterministic=deterministic)[0].unflatten(0, (b, lq))
|
140 |
+
else:
|
141 |
+
assert FLASH_ATTN_2_AVAILABLE
|
142 |
+
x = flash_attn.flash_attn_varlen_func(
|
143 |
+
q=q,
|
144 |
+
k=k,
|
145 |
+
v=v,
|
146 |
+
cu_seqlens_q=torch.cat([q_lens.new_zeros([1]), q_lens]).cumsum(
|
147 |
+
0, dtype=torch.int32).to(q.device, non_blocking=True),
|
148 |
+
cu_seqlens_k=torch.cat([k_lens.new_zeros([1]), k_lens]).cumsum(
|
149 |
+
0, dtype=torch.int32).to(q.device, non_blocking=True),
|
150 |
+
max_seqlen_q=lq,
|
151 |
+
max_seqlen_k=lk,
|
152 |
+
dropout_p=dropout_p,
|
153 |
+
softmax_scale=softmax_scale,
|
154 |
+
causal=causal,
|
155 |
+
window_size=window_size,
|
156 |
+
deterministic=deterministic).unflatten(0, (b, lq))
|
157 |
+
|
158 |
+
# output
|
159 |
+
return x.type(out_dtype)
|
160 |
+
|
161 |
+
|
162 |
+
def attention(
|
163 |
+
q,
|
164 |
+
k,
|
165 |
+
v,
|
166 |
+
q_lens=None,
|
167 |
+
k_lens=None,
|
168 |
+
dropout_p=0.,
|
169 |
+
softmax_scale=None,
|
170 |
+
q_scale=None,
|
171 |
+
causal=False,
|
172 |
+
window_size=(-1, -1),
|
173 |
+
deterministic=True,#False
|
174 |
+
dtype=torch.bfloat16,
|
175 |
+
fa_version=None,
|
176 |
+
):
|
177 |
+
if FLASH_ATTN_2_AVAILABLE or FLASH_ATTN_3_AVAILABLE:
|
178 |
+
return flash_attention(
|
179 |
+
q=q,
|
180 |
+
k=k,
|
181 |
+
v=v,
|
182 |
+
q_lens=q_lens,
|
183 |
+
k_lens=k_lens,
|
184 |
+
dropout_p=dropout_p,
|
185 |
+
softmax_scale=softmax_scale,
|
186 |
+
q_scale=q_scale,
|
187 |
+
causal=causal,
|
188 |
+
window_size=window_size,
|
189 |
+
deterministic=deterministic,
|
190 |
+
dtype=dtype,
|
191 |
+
version=fa_version,
|
192 |
+
)
|
193 |
+
else:
|
194 |
+
if q_lens is not None or k_lens is not None:
|
195 |
+
warnings.warn(
|
196 |
+
'Padding mask is disabled when using scaled_dot_product_attention. It can have a significant impact on performance.'
|
197 |
+
)
|
198 |
+
attn_mask = None
|
199 |
+
|
200 |
+
q = q.transpose(1, 2).to(dtype)
|
201 |
+
k = k.transpose(1, 2).to(dtype)
|
202 |
+
v = v.transpose(1, 2).to(dtype)
|
203 |
+
|
204 |
+
out = torch.nn.functional.scaled_dot_product_attention(
|
205 |
+
q, k, v, attn_mask=attn_mask, is_causal=causal, dropout_p=dropout_p)
|
206 |
+
|
207 |
+
out = out.transpose(1, 2).contiguous()
|
208 |
+
return out
|