Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,8 @@ import sys
|
|
3 |
import gradio as gr
|
4 |
import subprocess
|
5 |
import json
|
|
|
|
|
6 |
|
7 |
# Set environment variables for HF Spaces
|
8 |
os.environ["GRADIO_SERVER_NAME"] = "0.0.0.0"
|
@@ -14,30 +16,42 @@ os.environ["HUGGINGFACE_HUB_CACHE"] = "/tmp/hf_cache"
|
|
14 |
|
15 |
# Fix potential Hunyuan Video Avatar issues
|
16 |
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
|
|
17 |
|
18 |
def setup_environment():
|
19 |
"""Setup environment for HF Spaces with WanGP v6.3"""
|
20 |
# Install additional dependencies if needed
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
34 |
|
35 |
def download_essential_models():
|
36 |
"""Pre-download essential models for faster startup"""
|
37 |
-
from huggingface_hub import snapshot_download
|
38 |
-
|
39 |
-
print("Downloading Hunyuan Video Avatar models...")
|
40 |
try:
|
|
|
|
|
|
|
|
|
41 |
# Download Hunyuan Video Avatar base models
|
42 |
snapshot_download(
|
43 |
repo_id="tencent/HunyuanVideo-Avatar",
|
@@ -54,6 +68,8 @@ def download_essential_models():
|
|
54 |
ignore_patterns=["*.mp4", "*.avi"]
|
55 |
)
|
56 |
|
|
|
|
|
57 |
except Exception as e:
|
58 |
print(f"Model download warning: {e}")
|
59 |
print("Models will be downloaded on-demand during first use.")
|
@@ -87,30 +103,174 @@ def create_hf_config():
|
|
87 |
}
|
88 |
}
|
89 |
|
90 |
-
|
|
|
91 |
json.dump(config, f, indent=2)
|
92 |
|
93 |
return config
|
94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
if __name__ == "__main__":
|
96 |
print("🚀 Starting WanGP v6.3 with Hunyuan Video Avatar...")
|
97 |
|
|
|
98 |
setup_environment()
|
|
|
|
|
99 |
config = create_hf_config()
|
100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
print("✅ Setup complete! Launching application...")
|
103 |
|
104 |
-
|
105 |
-
|
106 |
-
main(
|
107 |
-
profile=3, # Higher profile for A10G Large
|
108 |
-
attention="sage", # Use Sage attention for better performance
|
109 |
-
server_name="0.0.0.0",
|
110 |
server_port=7860,
|
111 |
-
quantize_transformer=True,
|
112 |
-
teacache="2.0", # Enable TeaCache for Avatar acceleration
|
113 |
-
compile=False, # Disabled for HF Spaces stability
|
114 |
share=False, # HF Spaces handles sharing
|
115 |
-
|
116 |
-
|
|
|
|
3 |
import gradio as gr
|
4 |
import subprocess
|
5 |
import json
|
6 |
+
import torch
|
7 |
+
from pathlib import Path
|
8 |
|
9 |
# Set environment variables for HF Spaces
|
10 |
os.environ["GRADIO_SERVER_NAME"] = "0.0.0.0"
|
|
|
16 |
|
17 |
# Fix potential Hunyuan Video Avatar issues
|
18 |
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
19 |
+
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
|
20 |
|
21 |
def setup_environment():
|
22 |
"""Setup environment for HF Spaces with WanGP v6.3"""
|
23 |
# Install additional dependencies if needed
|
24 |
+
dependencies = [
|
25 |
+
"sageattention==1.0.6",
|
26 |
+
"insightface",
|
27 |
+
"facexlib",
|
28 |
+
"diffusers>=0.30.0",
|
29 |
+
"transformers>=4.44.0",
|
30 |
+
"accelerate>=0.34.0",
|
31 |
+
"xformers",
|
32 |
+
"opencv-python",
|
33 |
+
"imageio[ffmpeg]",
|
34 |
+
"moviepy",
|
35 |
+
"librosa",
|
36 |
+
"soundfile"
|
37 |
+
]
|
38 |
|
39 |
+
for dep in dependencies:
|
40 |
+
try:
|
41 |
+
module_name = dep.split("==")[0].split(">=")[0]
|
42 |
+
__import__(module_name.replace("-", "_"))
|
43 |
+
except ImportError:
|
44 |
+
print(f"Installing {dep}...")
|
45 |
+
subprocess.run([sys.executable, "-m", "pip", "install", dep],
|
46 |
+
check=True, capture_output=True)
|
47 |
|
48 |
def download_essential_models():
|
49 |
"""Pre-download essential models for faster startup"""
|
|
|
|
|
|
|
50 |
try:
|
51 |
+
from huggingface_hub import snapshot_download
|
52 |
+
|
53 |
+
print("Downloading Hunyuan Video Avatar models...")
|
54 |
+
|
55 |
# Download Hunyuan Video Avatar base models
|
56 |
snapshot_download(
|
57 |
repo_id="tencent/HunyuanVideo-Avatar",
|
|
|
68 |
ignore_patterns=["*.mp4", "*.avi"]
|
69 |
)
|
70 |
|
71 |
+
print("✅ Models downloaded successfully!")
|
72 |
+
|
73 |
except Exception as e:
|
74 |
print(f"Model download warning: {e}")
|
75 |
print("Models will be downloaded on-demand during first use.")
|
|
|
103 |
}
|
104 |
}
|
105 |
|
106 |
+
config_path = "/tmp/hf_config.json"
|
107 |
+
with open(config_path, "w") as f:
|
108 |
json.dump(config, f, indent=2)
|
109 |
|
110 |
return config
|
111 |
|
112 |
+
class WanGPInterface:
|
113 |
+
"""WanGP Interface for HF Spaces"""
|
114 |
+
|
115 |
+
def __init__(self, config):
|
116 |
+
self.config = config
|
117 |
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
118 |
+
self.models_loaded = False
|
119 |
+
|
120 |
+
def load_models(self):
|
121 |
+
"""Load models on demand"""
|
122 |
+
if self.models_loaded:
|
123 |
+
return
|
124 |
+
|
125 |
+
try:
|
126 |
+
print("Loading Hunyuan Video Avatar models...")
|
127 |
+
# Model loading logic would go here
|
128 |
+
# This is a placeholder for the actual model loading
|
129 |
+
self.models_loaded = True
|
130 |
+
print("✅ Models loaded successfully!")
|
131 |
+
except Exception as e:
|
132 |
+
print(f"❌ Error loading models: {e}")
|
133 |
+
raise e
|
134 |
+
|
135 |
+
def generate_avatar_video(self, audio_file, avatar_image, prompt="", emotion="neutral"):
|
136 |
+
"""Generate avatar video from audio and image"""
|
137 |
+
try:
|
138 |
+
self.load_models()
|
139 |
+
|
140 |
+
# Placeholder for actual generation logic
|
141 |
+
# This would call the real Hunyuan Video Avatar pipeline
|
142 |
+
|
143 |
+
return "Video generation completed! (This is a placeholder)"
|
144 |
+
|
145 |
+
except Exception as e:
|
146 |
+
return f"Error: {str(e)}"
|
147 |
+
|
148 |
+
def generate_video(self, prompt, duration=5, resolution="512x512"):
|
149 |
+
"""Generate video from text prompt"""
|
150 |
+
try:
|
151 |
+
self.load_models()
|
152 |
+
|
153 |
+
# Placeholder for video generation logic
|
154 |
+
return f"Generated video for prompt: {prompt}"
|
155 |
+
|
156 |
+
except Exception as e:
|
157 |
+
return f"Error: {str(e)}"
|
158 |
+
|
159 |
+
def create_gradio_interface(wangp_interface):
|
160 |
+
"""Create Gradio interface for WanGP"""
|
161 |
+
|
162 |
+
with gr.Blocks(title="WanGP v6.3 - Hunyuan Video Avatar", theme=gr.themes.Soft()) as demo:
|
163 |
+
gr.HTML("""
|
164 |
+
<div style="text-align: center; margin-bottom: 20px;">
|
165 |
+
<h1>🎭 WanGP v6.3 - Hunyuan Video Avatar</h1>
|
166 |
+
<p>Advanced AI Video Generation with Audio-Driven Human Animation</p>
|
167 |
+
</div>
|
168 |
+
""")
|
169 |
+
|
170 |
+
with gr.Tabs():
|
171 |
+
# Avatar Generation Tab
|
172 |
+
with gr.TabItem("��� Avatar Generation"):
|
173 |
+
with gr.Row():
|
174 |
+
with gr.Column():
|
175 |
+
audio_input = gr.Audio(
|
176 |
+
label="Audio Input",
|
177 |
+
type="filepath",
|
178 |
+
format="wav"
|
179 |
+
)
|
180 |
+
avatar_image = gr.Image(
|
181 |
+
label="Avatar Image",
|
182 |
+
type="filepath"
|
183 |
+
)
|
184 |
+
emotion_control = gr.Dropdown(
|
185 |
+
choices=["neutral", "happy", "sad", "angry", "surprised"],
|
186 |
+
value="neutral",
|
187 |
+
label="Emotion Control"
|
188 |
+
)
|
189 |
+
avatar_prompt = gr.Textbox(
|
190 |
+
label="Additional Prompt (Optional)",
|
191 |
+
placeholder="Describe additional details..."
|
192 |
+
)
|
193 |
+
generate_avatar_btn = gr.Button("Generate Avatar Video", variant="primary")
|
194 |
+
|
195 |
+
with gr.Column():
|
196 |
+
avatar_output = gr.Video(label="Generated Avatar Video")
|
197 |
+
avatar_status = gr.Textbox(label="Status", interactive=False)
|
198 |
+
|
199 |
+
# Text-to-Video Tab
|
200 |
+
with gr.TabItem("📹 Text to Video"):
|
201 |
+
with gr.Row():
|
202 |
+
with gr.Column():
|
203 |
+
video_prompt = gr.Textbox(
|
204 |
+
label="Video Prompt",
|
205 |
+
placeholder="Describe the video you want to generate...",
|
206 |
+
lines=3
|
207 |
+
)
|
208 |
+
duration_slider = gr.Slider(
|
209 |
+
minimum=2,
|
210 |
+
maximum=10,
|
211 |
+
value=5,
|
212 |
+
step=1,
|
213 |
+
label="Duration (seconds)"
|
214 |
+
)
|
215 |
+
resolution_dropdown = gr.Dropdown(
|
216 |
+
choices=["512x512", "768x768", "1024x1024"],
|
217 |
+
value="512x512",
|
218 |
+
label="Resolution"
|
219 |
+
)
|
220 |
+
generate_video_btn = gr.Button("Generate Video", variant="primary")
|
221 |
+
|
222 |
+
with gr.Column():
|
223 |
+
video_output = gr.Video(label="Generated Video")
|
224 |
+
video_status = gr.Textbox(label="Status", interactive=False)
|
225 |
+
|
226 |
+
# Event handlers
|
227 |
+
generate_avatar_btn.click(
|
228 |
+
fn=wangp_interface.generate_avatar_video,
|
229 |
+
inputs=[audio_input, avatar_image, avatar_prompt, emotion_control],
|
230 |
+
outputs=[avatar_status]
|
231 |
+
)
|
232 |
+
|
233 |
+
generate_video_btn.click(
|
234 |
+
fn=wangp_interface.generate_video,
|
235 |
+
inputs=[video_prompt, duration_slider, resolution_dropdown],
|
236 |
+
outputs=[video_status]
|
237 |
+
)
|
238 |
+
|
239 |
+
gr.HTML("""
|
240 |
+
<div style="text-align: center; margin-top: 20px; color: #666;">
|
241 |
+
<p>Powered by Hunyuan Video Avatar & WanGP v6.3</p>
|
242 |
+
</div>
|
243 |
+
""")
|
244 |
+
|
245 |
+
return demo
|
246 |
+
|
247 |
if __name__ == "__main__":
|
248 |
print("🚀 Starting WanGP v6.3 with Hunyuan Video Avatar...")
|
249 |
|
250 |
+
# Setup environment
|
251 |
setup_environment()
|
252 |
+
|
253 |
+
# Create configuration
|
254 |
config = create_hf_config()
|
255 |
+
|
256 |
+
# Download models in background
|
257 |
+
try:
|
258 |
+
download_essential_models()
|
259 |
+
except Exception as e:
|
260 |
+
print(f"Model download failed: {e}")
|
261 |
+
|
262 |
+
# Initialize WanGP interface
|
263 |
+
wangp_interface = WanGPInterface(config)
|
264 |
+
|
265 |
+
# Create and launch Gradio interface
|
266 |
+
demo = create_gradio_interface(wangp_interface)
|
267 |
|
268 |
print("✅ Setup complete! Launching application...")
|
269 |
|
270 |
+
demo.launch(
|
271 |
+
server_name="0.0.0.0",
|
|
|
|
|
|
|
|
|
272 |
server_port=7860,
|
|
|
|
|
|
|
273 |
share=False, # HF Spaces handles sharing
|
274 |
+
debug=False,
|
275 |
+
show_error=True
|
276 |
+
)
|