ovi054 commited on
Commit
ffc79bb
·
verified ·
1 Parent(s): ef22e42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -4
app.py CHANGED
@@ -15,7 +15,7 @@ import spaces
15
  model_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
16
  vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
17
  pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16)
18
- flow_shift = 5.0 #5.0 1.0 for image, 5.0 for 720P, 3.0 for 480P
19
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift)
20
 
21
  # Configure DDIMScheduler with a beta schedule
@@ -35,11 +35,22 @@ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow
35
  # flow_shift=flow_shift # Retain flow_shift for WanPipeline compatibility
36
  # )
37
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  @spaces.GPU()
40
  def generate(prompt, negative_prompt, width=1024, height=1024, num_inference_steps=30, lora_id=None, progress=gr.Progress(track_tqdm=True)):
41
  if lora_id and lora_id.strip() != "":
42
- pipe.unload_lora_weights()
43
  pipe.load_lora_weights(lora_id.strip())
44
  pipe.to("cuda")
45
  # apply_first_block_cache(pipe.transformer, FirstBlockCacheConfig(threshold=0.2))
@@ -55,14 +66,15 @@ def generate(prompt, negative_prompt, width=1024, height=1024, num_inference_ste
55
  width=width,
56
  num_frames=1,
57
  num_inference_steps=num_inference_steps,
58
- guidance_scale=5.0, #5.0
59
  )
60
  image = output.frames[0][0]
61
  image = (image * 255).astype(np.uint8)
62
  return Image.fromarray(image)
63
  finally:
64
  if lora_id and lora_id.strip() != "":
65
- pipe.unload_lora_weights()
 
66
 
67
  iface = gr.Interface(
68
  fn=generate,
 
15
  model_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
16
  vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
17
  pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16)
18
+ flow_shift = 1.0 #5.0 1.0 for image, 5.0 for 720P, 3.0 for 480P
19
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift)
20
 
21
  # Configure DDIMScheduler with a beta schedule
 
35
  # flow_shift=flow_shift # Retain flow_shift for WanPipeline compatibility
36
  # )
37
 
38
+ CAUSVID_LORA_REPO = "WanVideo_comfy"
39
+ CAUSVID_LORA_FILENAME = "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors"
40
+
41
+ try:
42
+ causvid_path = hf_hub_download(repo_id=CAUSVID_LORA_REPO, filename=CAUSVID_LORA_FILENAME)
43
+ pipe.load_lora_weights(causvid_path, adapter_name="causvid_lora")
44
+ print("✅ CausVid LoRA loaded (strength: 1.0)")
45
+ except Exception as e:
46
+ print(f"⚠️ CausVid LoRA not loaded: {e}")
47
+ causvid_path = None
48
+
49
 
50
  @spaces.GPU()
51
  def generate(prompt, negative_prompt, width=1024, height=1024, num_inference_steps=30, lora_id=None, progress=gr.Progress(track_tqdm=True)):
52
  if lora_id and lora_id.strip() != "":
53
+ # pipe.unload_lora_weights()
54
  pipe.load_lora_weights(lora_id.strip())
55
  pipe.to("cuda")
56
  # apply_first_block_cache(pipe.transformer, FirstBlockCacheConfig(threshold=0.2))
 
66
  width=width,
67
  num_frames=1,
68
  num_inference_steps=num_inference_steps,
69
+ guidance_scale=1.0, #5.0
70
  )
71
  image = output.frames[0][0]
72
  image = (image * 255).astype(np.uint8)
73
  return Image.fromarray(image)
74
  finally:
75
  if lora_id and lora_id.strip() != "":
76
+ pass
77
+ # pipe.unload_lora_weights()
78
 
79
  iface = gr.Interface(
80
  fn=generate,