rahul7star commited on
Commit
e81aa63
·
verified ·
1 Parent(s): 61e4131

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -67
app.py CHANGED
@@ -1,10 +1,9 @@
1
- import spaces
2
  import os
3
  import subprocess
4
  import tempfile
5
  from huggingface_hub import snapshot_download
6
  import gradio as gr
7
- import spaces # Hugging Face Spaces GPU decorator
8
 
9
  # ---------------- Step 1: Download Model ----------------
10
  repo_id = "Wan-AI/Wan2.2-TI2V-5B"
@@ -13,31 +12,44 @@ ckpt_dir = snapshot_download(repo_id, local_dir_use_symlinks=False)
13
  print(f"Using checkpoints from {ckpt_dir}")
14
 
15
  # ---------------- Step 2: Duration Calculation ----------------
16
- def get_duration(prompt, size, duration_seconds, steps):
17
- """
18
- Calculate GPU duration dynamically.
19
- - duration_seconds: estimated video length in seconds
20
- - steps: number of inference steps
21
- """
22
  try:
23
  h, w = size.lower().replace(" ", "").split("*")
24
  h, w = int(h), int(w)
25
  except Exception:
26
- h, w = 704, 1280 # fallback
27
-
28
- # Simple rule: time grows with steps and video duration
29
- duration = int(duration_seconds) * int(steps) * 2.25 + 5
30
  return duration
31
 
32
- # ---------------- Step 3: Generation Functions ----------------
33
- @spaces.GPU(duration=get_duration)
34
- def generate_t2v(prompt, size="1280*704", duration_seconds=5, steps=25):
35
- """Text-to-Video generation."""
36
- if not prompt.strip():
37
- return None, "Please enter a prompt."
 
38
 
 
 
 
 
 
 
 
 
 
 
 
39
  temp_dir = tempfile.mkdtemp()
40
- output_path = os.path.join(temp_dir, "output.mp4")
 
 
 
 
 
 
 
 
41
 
42
  cmd = [
43
  "python", "generate.py",
@@ -47,36 +59,21 @@ def generate_t2v(prompt, size="1280*704", duration_seconds=5, steps=25):
47
  "--offload_model", "True",
48
  "--convert_model_dtype",
49
  "--t5_cpu",
50
- "--prompt", prompt
 
 
51
  ]
52
-
53
- print(f"[T2V] Running command: {' '.join(cmd)}")
54
- try:
55
- subprocess.run(cmd, check=True)
56
- except subprocess.CalledProcessError as e:
57
- return None, f"Error during T2V generation: {e}"
58
-
59
- if os.path.exists("output.mp4"):
60
- os.rename("output.mp4", output_path)
61
- elif os.path.exists(output_path):
62
- pass
63
- else:
64
- return None, "Generation finished but output file not found."
65
-
66
- return output_path, "Text-to-Video generated successfully!"
67
 
68
  @spaces.GPU(duration=get_duration)
69
- def generate_i2v(image, prompt, size="1280*704", duration_seconds=5, steps=25):
70
- """Image-to-Video generation."""
71
  if image is None or not prompt.strip():
72
- return None, "Please upload an image and enter a prompt."
73
 
74
  temp_dir = tempfile.mkdtemp()
75
  image_path = os.path.join(temp_dir, "input.jpg")
76
  image.save(image_path)
77
 
78
- output_path = os.path.join(temp_dir, "output.mp4")
79
-
80
  cmd = [
81
  "python", "generate.py",
82
  "--task", "ti2v-5B",
@@ -86,28 +83,16 @@ def generate_i2v(image, prompt, size="1280*704", duration_seconds=5, steps=25):
86
  "--convert_model_dtype",
87
  "--t5_cpu",
88
  "--image", image_path,
89
- "--prompt", prompt
 
 
90
  ]
 
91
 
92
- print(f"[I2V] Running command: {' '.join(cmd)}")
93
- try:
94
- subprocess.run(cmd, check=True)
95
- except subprocess.CalledProcessError as e:
96
- return None, f"Error during I2V generation: {e}"
97
-
98
- if os.path.exists("output.mp4"):
99
- os.rename("output.mp4", output_path)
100
- elif os.path.exists(output_path):
101
- pass
102
- else:
103
- return None, "Generation finished but output file not found."
104
-
105
- return output_path, "Image-to-Video generated successfully!"
106
-
107
- # ---------------- Step 4: Gradio UI ----------------
108
  with gr.Blocks() as demo:
109
  gr.Markdown("## 🎥 Wan2.2-TI2V-5B Video Generator")
110
- gr.Markdown("Choose **Text-to-Video** or **Image-to-Video** mode below.")
111
 
112
  with gr.Tab("Text-to-Video"):
113
  t2v_prompt = gr.Textbox(
@@ -116,14 +101,14 @@ with gr.Blocks() as demo:
116
  )
117
  t2v_size = gr.Textbox(label="Video Size", value="1280*704")
118
  t2v_duration = gr.Number(label="Video Length (seconds)", value=5)
119
- t2v_steps = gr.Number(label="Inference Steps", value=25)
120
  t2v_btn = gr.Button("Generate from Text")
121
- t2v_video = gr.Video(label="Generated Video")
 
122
  t2v_status = gr.Textbox(label="Status")
123
  t2v_btn.click(
124
  generate_t2v,
125
- [t2v_prompt, t2v_size, t2v_duration, t2v_steps],
126
- [t2v_video, t2v_status]
127
  )
128
 
129
  with gr.Tab("Image-to-Video"):
@@ -141,15 +126,15 @@ with gr.Blocks() as demo:
141
  )
142
  i2v_size = gr.Textbox(label="Video Size", value="1280*704")
143
  i2v_duration = gr.Number(label="Video Length (seconds)", value=5)
144
- i2v_steps = gr.Number(label="Inference Steps", value=25)
145
  i2v_btn = gr.Button("Generate from Image")
146
- i2v_video = gr.Video(label="Generated Video")
 
147
  i2v_status = gr.Textbox(label="Status")
148
  i2v_btn.click(
149
  generate_i2v,
150
- [i2v_image, i2v_prompt, i2v_size, i2v_duration, i2v_steps],
151
- [i2v_video, i2v_status]
152
  )
153
 
154
  if __name__ == "__main__":
155
- demo.launch()
 
 
1
  import os
2
  import subprocess
3
  import tempfile
4
  from huggingface_hub import snapshot_download
5
  import gradio as gr
6
+ import spaces
7
 
8
  # ---------------- Step 1: Download Model ----------------
9
  repo_id = "Wan-AI/Wan2.2-TI2V-5B"
 
12
  print(f"Using checkpoints from {ckpt_dir}")
13
 
14
  # ---------------- Step 2: Duration Calculation ----------------
15
+ def get_duration(prompt, size, duration_seconds):
 
 
 
 
 
16
  try:
17
  h, w = size.lower().replace(" ", "").split("*")
18
  h, w = int(h), int(w)
19
  except Exception:
20
+ h, w = 704, 1280
21
+ duration = int(duration_seconds) * 50 * 2.25 + 5 # 50 is fixed sample_steps
 
 
22
  return duration
23
 
24
+ # ---------------- Step 3: Helpers ----------------
25
+ def find_generated_mp4():
26
+ mp4_files = [f for f in os.listdir(".") if f.lower().endswith(".mp4")]
27
+ if not mp4_files:
28
+ return None
29
+ mp4_files.sort(key=lambda x: os.path.getmtime(x), reverse=True)
30
+ return mp4_files[0]
31
 
32
+ def run_generate_command(cmd):
33
+ print(f"[RUN] {' '.join(cmd)}")
34
+ try:
35
+ subprocess.run(cmd, check=True)
36
+ except subprocess.CalledProcessError as e:
37
+ return None, None, f"Error: {e}"
38
+
39
+ mp4_file = find_generated_mp4()
40
+ if not mp4_file:
41
+ return None, None, "No output video found."
42
+
43
  temp_dir = tempfile.mkdtemp()
44
+ output_path = os.path.join(temp_dir, mp4_file)
45
+ os.rename(mp4_file, output_path)
46
+ return output_path, output_path, "Generation successful!"
47
+
48
+ # ---------------- Step 4: Generation Functions ----------------
49
+ @spaces.GPU(duration=get_duration)
50
+ def generate_t2v(prompt, size="1280*704", duration_seconds=5):
51
+ if not prompt.strip():
52
+ return None, None, "Please enter a prompt."
53
 
54
  cmd = [
55
  "python", "generate.py",
 
59
  "--offload_model", "True",
60
  "--convert_model_dtype",
61
  "--t5_cpu",
62
+ "--prompt", prompt,
63
+ "--steps", "50",
64
+ "--guidance_scale", "5.0"
65
  ]
66
+ return run_generate_command(cmd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
  @spaces.GPU(duration=get_duration)
69
+ def generate_i2v(image, prompt, size="1280*704", duration_seconds=5):
 
70
  if image is None or not prompt.strip():
71
+ return None, None, "Please upload an image and enter a prompt."
72
 
73
  temp_dir = tempfile.mkdtemp()
74
  image_path = os.path.join(temp_dir, "input.jpg")
75
  image.save(image_path)
76
 
 
 
77
  cmd = [
78
  "python", "generate.py",
79
  "--task", "ti2v-5B",
 
83
  "--convert_model_dtype",
84
  "--t5_cpu",
85
  "--image", image_path,
86
+ "--prompt", prompt,
87
+ "--steps", "50",
88
+ "--guidance_scale", "5.0"
89
  ]
90
+ return run_generate_command(cmd)
91
 
92
+ # ---------------- Step 5: Gradio UI ----------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  with gr.Blocks() as demo:
94
  gr.Markdown("## 🎥 Wan2.2-TI2V-5B Video Generator")
95
+ gr.Markdown("Generate AI videos from text or image prompts with download option.")
96
 
97
  with gr.Tab("Text-to-Video"):
98
  t2v_prompt = gr.Textbox(
 
101
  )
102
  t2v_size = gr.Textbox(label="Video Size", value="1280*704")
103
  t2v_duration = gr.Number(label="Video Length (seconds)", value=5)
 
104
  t2v_btn = gr.Button("Generate from Text")
105
+ t2v_video = gr.Video(label="Generated Video", autoplay=True)
106
+ t2v_download = gr.File(label="Download Video")
107
  t2v_status = gr.Textbox(label="Status")
108
  t2v_btn.click(
109
  generate_t2v,
110
+ [t2v_prompt, t2v_size, t2v_duration],
111
+ [t2v_video, t2v_download, t2v_status]
112
  )
113
 
114
  with gr.Tab("Image-to-Video"):
 
126
  )
127
  i2v_size = gr.Textbox(label="Video Size", value="1280*704")
128
  i2v_duration = gr.Number(label="Video Length (seconds)", value=5)
 
129
  i2v_btn = gr.Button("Generate from Image")
130
+ i2v_video = gr.Video(label="Generated Video", autoplay=True)
131
+ i2v_download = gr.File(label="Download Video")
132
  i2v_status = gr.Textbox(label="Status")
133
  i2v_btn.click(
134
  generate_i2v,
135
+ [i2v_image, i2v_prompt, i2v_size, i2v_duration],
136
+ [i2v_video, i2v_download, i2v_status]
137
  )
138
 
139
  if __name__ == "__main__":
140
+ demo.launch()