rahul7star commited on
Commit
9cb7778
·
verified ·
1 Parent(s): 5ef3204

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -9
app.py CHANGED
@@ -4,6 +4,7 @@ import subprocess
4
  import tempfile
5
  from huggingface_hub import snapshot_download
6
  import gradio as gr
 
7
 
8
  # ---------------- Step 1: Download Model ----------------
9
  repo_id = "Wan-AI/Wan2.2-TI2V-5B"
@@ -11,9 +12,26 @@ print(f"Downloading/loading checkpoints for {repo_id}...")
11
  ckpt_dir = snapshot_download(repo_id, local_dir_use_symlinks=False)
12
  print(f"Using checkpoints from {ckpt_dir}")
13
 
14
- # ---------------- Step 2: Generation Functions ----------------
15
- @spaces.GPU
16
- def generate_t2v(prompt, size="1280*704"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  """Text-to-Video generation."""
18
  if not prompt.strip():
19
  return None, "Please enter a prompt."
@@ -47,9 +65,8 @@ def generate_t2v(prompt, size="1280*704"):
47
 
48
  return output_path, "Text-to-Video generated successfully!"
49
 
50
-
51
- @spaces.GPU(duration=100)
52
- def generate_i2v(image, prompt, size="1280*704"):
53
  """Image-to-Video generation."""
54
  if image is None or not prompt.strip():
55
  return None, "Please upload an image and enter a prompt."
@@ -87,7 +104,7 @@ def generate_i2v(image, prompt, size="1280*704"):
87
 
88
  return output_path, "Image-to-Video generated successfully!"
89
 
90
- # ---------------- Step 3: Gradio UI ----------------
91
  with gr.Blocks() as demo:
92
  gr.Markdown("## 🎥 Wan2.2-TI2V-5B Video Generator")
93
  gr.Markdown("Choose **Text-to-Video** or **Image-to-Video** mode below.")
@@ -98,10 +115,16 @@ with gr.Blocks() as demo:
98
  value="Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage"
99
  )
100
  t2v_size = gr.Textbox(label="Video Size", value="1280*704")
 
 
101
  t2v_btn = gr.Button("Generate from Text")
102
  t2v_video = gr.Video(label="Generated Video")
103
  t2v_status = gr.Textbox(label="Status")
104
- t2v_btn.click(generate_t2v, [t2v_prompt, t2v_size], [t2v_video, t2v_status])
 
 
 
 
105
 
106
  with gr.Tab("Image-to-Video"):
107
  i2v_image = gr.Image(type="pil", label="Upload Image")
@@ -117,10 +140,16 @@ with gr.Blocks() as demo:
117
  )
118
  )
119
  i2v_size = gr.Textbox(label="Video Size", value="1280*704")
 
 
120
  i2v_btn = gr.Button("Generate from Image")
121
  i2v_video = gr.Video(label="Generated Video")
122
  i2v_status = gr.Textbox(label="Status")
123
- i2v_btn.click(generate_i2v, [i2v_image, i2v_prompt, i2v_size], [i2v_video, i2v_status])
 
 
 
 
124
 
125
  if __name__ == "__main__":
126
  demo.launch()
 
4
  import tempfile
5
  from huggingface_hub import snapshot_download
6
  import gradio as gr
7
+ import spaces # Hugging Face Spaces GPU decorator
8
 
9
  # ---------------- Step 1: Download Model ----------------
10
  repo_id = "Wan-AI/Wan2.2-TI2V-5B"
 
12
  ckpt_dir = snapshot_download(repo_id, local_dir_use_symlinks=False)
13
  print(f"Using checkpoints from {ckpt_dir}")
14
 
15
+ # ---------------- Step 2: Duration Calculation ----------------
16
+ def get_duration(prompt, size, duration_seconds, steps):
17
+ """
18
+ Calculate GPU duration dynamically.
19
+ - duration_seconds: estimated video length in seconds
20
+ - steps: number of inference steps
21
+ """
22
+ try:
23
+ h, w = size.lower().replace(" ", "").split("*")
24
+ h, w = int(h), int(w)
25
+ except Exception:
26
+ h, w = 704, 1280 # fallback
27
+
28
+ # Simple rule: time grows with steps and video duration
29
+ duration = int(duration_seconds) * int(steps) * 2.25 + 5
30
+ return duration
31
+
32
+ # ---------------- Step 3: Generation Functions ----------------
33
+ @spaces.GPU(duration=get_duration)
34
+ def generate_t2v(prompt, size="1280*704", duration_seconds=5, steps=25):
35
  """Text-to-Video generation."""
36
  if not prompt.strip():
37
  return None, "Please enter a prompt."
 
65
 
66
  return output_path, "Text-to-Video generated successfully!"
67
 
68
+ @spaces.GPU(duration=get_duration)
69
+ def generate_i2v(image, prompt, size="1280*704", duration_seconds=5, steps=25):
 
70
  """Image-to-Video generation."""
71
  if image is None or not prompt.strip():
72
  return None, "Please upload an image and enter a prompt."
 
104
 
105
  return output_path, "Image-to-Video generated successfully!"
106
 
107
+ # ---------------- Step 4: Gradio UI ----------------
108
  with gr.Blocks() as demo:
109
  gr.Markdown("## 🎥 Wan2.2-TI2V-5B Video Generator")
110
  gr.Markdown("Choose **Text-to-Video** or **Image-to-Video** mode below.")
 
115
  value="Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage"
116
  )
117
  t2v_size = gr.Textbox(label="Video Size", value="1280*704")
118
+ t2v_duration = gr.Number(label="Video Length (seconds)", value=5)
119
+ t2v_steps = gr.Number(label="Inference Steps", value=25)
120
  t2v_btn = gr.Button("Generate from Text")
121
  t2v_video = gr.Video(label="Generated Video")
122
  t2v_status = gr.Textbox(label="Status")
123
+ t2v_btn.click(
124
+ generate_t2v,
125
+ [t2v_prompt, t2v_size, t2v_duration, t2v_steps],
126
+ [t2v_video, t2v_status]
127
+ )
128
 
129
  with gr.Tab("Image-to-Video"):
130
  i2v_image = gr.Image(type="pil", label="Upload Image")
 
140
  )
141
  )
142
  i2v_size = gr.Textbox(label="Video Size", value="1280*704")
143
+ i2v_duration = gr.Number(label="Video Length (seconds)", value=5)
144
+ i2v_steps = gr.Number(label="Inference Steps", value=25)
145
  i2v_btn = gr.Button("Generate from Image")
146
  i2v_video = gr.Video(label="Generated Video")
147
  i2v_status = gr.Textbox(label="Status")
148
+ i2v_btn.click(
149
+ generate_i2v,
150
+ [i2v_image, i2v_prompt, i2v_size, i2v_duration, i2v_steps],
151
+ [i2v_video, i2v_status]
152
+ )
153
 
154
  if __name__ == "__main__":
155
  demo.launch()