multimodalart HF Staff commited on
Commit
abdc3ca
·
verified ·
1 Parent(s): 5aa4bd3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -156,6 +156,7 @@ def generate_video(input_image, prompt, height, width,
156
  pattern, dense_layers, dense_timesteps, decay_factor,
157
  progress=gr.Progress(track_tqdm=True)):
158
 
 
159
  @torch.no_grad()
160
  def WanImageToVideoPipeline_Sparse__call__(
161
  self,
@@ -309,7 +310,7 @@ def generate_video(input_image, prompt, height, width,
309
  if not return_dict:
310
  return (video,)
311
  return WanPipelineOutput(frames=video)
312
-
313
  def replace_wan_attention_for_i2v(
314
  pipe,
315
  height,
@@ -361,7 +362,7 @@ def generate_video(input_image, prompt, height, width,
361
  if isinstance(m, Attention) and hasattr(m.processor, "layer_idx"):
362
  layer_idx = m.processor.layer_idx
363
  m.set_processor(AttnModule(layer_idx))
364
-
365
  if input_image is None:
366
  raise gr.Error("Please upload an input image.")
367
 
@@ -375,7 +376,7 @@ def generate_video(input_image, prompt, height, width,
375
  decay_factor=decay_factor,
376
  sparsity_type=pattern,
377
  )
378
-
379
  target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
380
  target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
381
 
@@ -384,7 +385,7 @@ def generate_video(input_image, prompt, height, width,
384
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
385
 
386
  resized_image = input_image.resize((target_w, target_h))
387
-
388
  with torch.inference_mode():
389
  output_frames_list = pipe(
390
  image=resized_image, prompt=prompt, negative_prompt=negative_prompt,
@@ -393,7 +394,7 @@ def generate_video(input_image, prompt, height, width,
393
  generator=torch.Generator(device="cuda").manual_seed(current_seed),
394
  callback_on_step_end=progress
395
  ).frames[0]
396
-
397
  with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
398
  video_path = tmpfile.name
399
  export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
 
156
  pattern, dense_layers, dense_timesteps, decay_factor,
157
  progress=gr.Progress(track_tqdm=True)):
158
 
159
+ print("Test")
160
  @torch.no_grad()
161
  def WanImageToVideoPipeline_Sparse__call__(
162
  self,
 
310
  if not return_dict:
311
  return (video,)
312
  return WanPipelineOutput(frames=video)
313
+ print("Test2")
314
  def replace_wan_attention_for_i2v(
315
  pipe,
316
  height,
 
362
  if isinstance(m, Attention) and hasattr(m.processor, "layer_idx"):
363
  layer_idx = m.processor.layer_idx
364
  m.set_processor(AttnModule(layer_idx))
365
+ print("Test3")
366
  if input_image is None:
367
  raise gr.Error("Please upload an input image.")
368
 
 
376
  decay_factor=decay_factor,
377
  sparsity_type=pattern,
378
  )
379
+ print("Test4")
380
  target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
381
  target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
382
 
 
385
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
386
 
387
  resized_image = input_image.resize((target_w, target_h))
388
+ print("Test5")
389
  with torch.inference_mode():
390
  output_frames_list = pipe(
391
  image=resized_image, prompt=prompt, negative_prompt=negative_prompt,
 
394
  generator=torch.Generator(device="cuda").manual_seed(current_seed),
395
  callback_on_step_end=progress
396
  ).frames[0]
397
+ print("Test6")
398
  with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
399
  video_path = tmpfile.name
400
  export_to_video(output_frames_list, video_path, fps=FIXED_FPS)