openfree commited on
Commit
8949c1a
Β·
verified Β·
1 Parent(s): 23c2b11

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +372 -175
app.py CHANGED
@@ -1,26 +1,55 @@
1
  import os
2
  import sys
3
- import gradio as gr
4
- import subprocess
5
  import json
6
- import torch
7
  from pathlib import Path
 
 
8
 
9
- # Set environment variables for HF Spaces
10
- os.environ["GRADIO_SERVER_NAME"] = "0.0.0.0"
11
- os.environ["GRADIO_SERVER_PORT"] = "7860"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- # Pre-download models cache
14
- os.environ["HF_HUB_CACHE"] = "/tmp/hf_cache"
15
- os.environ["HUGGINGFACE_HUB_CACHE"] = "/tmp/hf_cache"
 
16
 
17
- # Fix potential Hunyuan Video Avatar issues
 
 
 
 
18
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
19
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
20
 
 
 
 
 
 
 
 
 
 
 
21
  def setup_environment():
22
- """Setup environment for HF Spaces with WanGP v6.3"""
23
- # Install additional dependencies if needed
24
  dependencies = [
25
  "sageattention==1.0.6",
26
  "insightface",
@@ -38,55 +67,72 @@ def setup_environment():
38
 
39
  for dep in dependencies:
40
  try:
41
- module_name = dep.split("==")[0].split(">=")[0]
42
- __import__(module_name.replace("-", "_"))
43
- except ImportError:
44
- print(f"Installing {dep}...")
45
- subprocess.run([sys.executable, "-m", "pip", "install", dep],
46
- check=True, capture_output=True)
 
 
 
 
 
 
 
 
47
 
48
  def download_essential_models():
49
- """Pre-download essential models for faster startup"""
50
  try:
51
  from huggingface_hub import snapshot_download
52
 
53
- print("Downloading Hunyuan Video Avatar models...")
54
 
55
- # Download Hunyuan Video Avatar base models
56
- snapshot_download(
57
- repo_id="tencent/HunyuanVideo-Avatar",
58
- cache_dir="/tmp/hf_cache",
59
- allow_patterns=["*.safetensors", "*.json", "*.txt", "*.bin"],
60
- ignore_patterns=["*.mp4", "*.avi", "*.mov"] # Skip demo videos
61
- )
 
 
 
 
62
 
63
- # Download base Hunyuan Video model
64
- snapshot_download(
65
- repo_id="tencent/HunyuanVideo",
66
- cache_dir="/tmp/hf_cache",
67
- allow_patterns=["*.safetensors", "*.json", "*.txt"],
68
- ignore_patterns=["*.mp4", "*.avi"]
69
- )
 
 
 
 
70
 
71
- print("βœ… Models downloaded successfully!")
72
 
 
 
73
  except Exception as e:
74
- print(f"Model download warning: {e}")
75
- print("Models will be downloaded on-demand during first use.")
76
 
77
  def create_hf_config():
78
- """Create optimized config for HF Spaces deployment"""
79
  config = {
80
  "model_settings": {
81
- "profile": 3, # Optimized for A10G Large
82
  "quantize_transformer": True,
83
  "attention_mode": "sage",
84
- "compile": False, # Disable for stability on HF
85
  "teacache": "2.0"
86
  },
87
  "avatar_settings": {
88
- "max_frames": 120, # ~5 seconds at 24fps
89
- "resolution": "512x512", # Balanced quality/performance
90
  "emotion_control": True,
91
  "multi_character": True
92
  },
@@ -98,182 +144,333 @@ def create_hf_config():
98
  },
99
  "audio_processing": {
100
  "sample_rate": 16000,
101
- "max_duration": 15, # seconds
102
  "supported_formats": ["wav", "mp3", "m4a"]
103
  }
104
  }
105
 
106
- config_path = "/tmp/hf_config.json"
107
- with open(config_path, "w") as f:
108
- json.dump(config, f, indent=2)
 
 
 
 
109
 
110
  return config
111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  class WanGPInterface:
113
- """WanGP Interface for HF Spaces"""
114
 
115
  def __init__(self, config):
116
- self.config = config
117
- self.device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
 
 
118
  self.models_loaded = False
119
 
120
  def load_models(self):
121
- """Load models on demand"""
122
  if self.models_loaded:
123
- return
124
 
125
  try:
126
- print("Loading Hunyuan Video Avatar models...")
127
- # Model loading logic would go here
128
- # This is a placeholder for the actual model loading
 
129
  self.models_loaded = True
130
- print("βœ… Models loaded successfully!")
 
131
  except Exception as e:
132
- print(f"❌ Error loading models: {e}")
133
- raise e
 
134
 
135
  def generate_avatar_video(self, audio_file, avatar_image, prompt="", emotion="neutral"):
 
136
  try:
137
- self.load_models()
138
-
139
- # Gọi model thαΊ­t để sinh video tα»« αΊ£nh vΓ  audio
140
- output_video_path = "/tmp/generated_avatar.mp4"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
 
142
- # TODO: thay bαΊ±ng Δ‘oαΊ‘n code gọi mΓ΄ hΓ¬nh HunyuanAvatar thα»±c sα»±
143
- # GiαΊ£ sα»­ video Δ‘Γ£ được lΖ°u vΓ o `output_video_path`
144
-
145
- return output_video_path, "βœ… Video generated successfully!"
146
-
147
  except Exception as e:
148
- return None, f"❌ Error: {str(e)}"
149
-
 
150
 
151
  def generate_video(self, prompt, duration=5, resolution="512x512"):
152
- """Generate video from text prompt"""
153
  try:
154
- self.load_models()
 
 
 
 
 
 
 
155
 
156
- # Placeholder for video generation logic
157
- return f"Generated video for prompt: {prompt}"
 
158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  except Exception as e:
160
- return f"Error: {str(e)}"
 
 
161
 
162
  def create_gradio_interface(wangp_interface):
163
- """Create Gradio interface for WanGP"""
164
 
165
- with gr.Blocks(title="WanGP v6.3 - Hunyuan Video Avatar", theme=gr.themes.Soft()) as demo:
166
- gr.HTML("""
167
- <div style="text-align: center; margin-bottom: 20px;">
168
- <h1>🎭 WanGP v6.3 - Hunyuan Video Avatar</h1>
169
- <p>Advanced AI Video Generation with Audio-Driven Human Animation</p>
170
- </div>
171
- """)
172
-
173
- with gr.Tabs():
174
- # Avatar Generation Tab
175
- with gr.TabItem("🎭 Avatar Generation"):
176
- with gr.Row():
177
- with gr.Column():
178
- audio_input = gr.Audio(
179
- label="Audio Input",
180
- type="filepath",
181
- format="wav"
182
- )
183
- avatar_image = gr.Image(
184
- label="Avatar Image",
185
- type="filepath"
186
- )
187
- emotion_control = gr.Dropdown(
188
- choices=["neutral", "happy", "sad", "angry", "surprised"],
189
- value="neutral",
190
- label="Emotion Control"
191
- )
192
- avatar_prompt = gr.Textbox(
193
- label="Additional Prompt (Optional)",
194
- placeholder="Describe additional details..."
195
- )
196
- generate_avatar_btn = gr.Button("Generate Avatar Video", variant="primary")
197
-
198
- with gr.Column():
199
- avatar_output = gr.Video(label="Generated Avatar Video")
200
- avatar_status = gr.Textbox(label="Status", interactive=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
 
202
- # Text-to-Video Tab
203
- with gr.TabItem("πŸ“Ή Text to Video"):
204
- with gr.Row():
205
- with gr.Column():
206
- video_prompt = gr.Textbox(
207
- label="Video Prompt",
208
- placeholder="Describe the video you want to generate...",
209
- lines=3
210
- )
211
- duration_slider = gr.Slider(
212
- minimum=2,
213
- maximum=10,
214
- value=5,
215
- step=1,
216
- label="Duration (seconds)"
217
- )
218
- resolution_dropdown = gr.Dropdown(
219
- choices=["512x512", "768x768", "1024x1024"],
220
- value="512x512",
221
- label="Resolution"
222
- )
223
- generate_video_btn = gr.Button("Generate Video", variant="primary")
224
-
225
- with gr.Column():
226
- video_output = gr.Video(label="Generated Video")
227
- video_status = gr.Textbox(label="Status", interactive=False)
228
 
229
- # Event handlers
230
- generate_avatar_btn.click(
231
- fn=wangp_interface.generate_avatar_video,
232
- inputs=[audio_input, avatar_image, avatar_prompt, emotion_control],
233
- outputs=[avatar_output, avatar_status] # trαΊ£ ra cαΊ£ video vΓ  status
234
- )
235
 
236
- generate_video_btn.click(
237
- fn=wangp_interface.generate_video,
238
- inputs=[video_prompt, duration_slider, resolution_dropdown],
239
- outputs=[video_status]
 
 
 
 
240
  )
241
-
242
- gr.HTML("""
243
- <div style="text-align: center; margin-top: 20px; color: #666;">
244
- <p>Powered by Hunyuan Video Avatar & WanGP v6.3</p>
245
- </div>
246
- """)
247
-
248
- return demo
249
 
250
- if __name__ == "__main__":
 
251
  print("πŸš€ Starting WanGP v6.3 with Hunyuan Video Avatar...")
252
 
253
- # Setup environment
254
- setup_environment()
 
 
 
255
 
256
- # Create configuration
257
- config = create_hf_config()
 
 
 
 
258
 
259
- # Download models in background
260
  try:
 
261
  download_essential_models()
262
  except Exception as e:
263
- print(f"Model download failed: {e}")
264
-
265
- # Initialize WanGP interface
266
- wangp_interface = WanGPInterface(config)
267
 
268
- # Create and launch Gradio interface
269
- demo = create_gradio_interface(wangp_interface)
270
-
271
- print("βœ… Setup complete! Launching application...")
 
 
 
 
 
 
 
 
 
 
272
 
273
- demo.launch(
274
- server_name="0.0.0.0",
275
- server_port=7860,
276
- share=False, # HF Spaces handles sharing
277
- debug=False,
278
- show_error=True
279
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import sys
 
 
3
  import json
 
4
  from pathlib import Path
5
+ import tempfile
6
+ import shutil
7
 
8
+ # Safe imports with fallbacks
9
+ try:
10
+ import gradio as gr
11
+ except ImportError:
12
+ print("Installing gradio...")
13
+ os.system(f"{sys.executable} -m pip install gradio")
14
+ import gradio as gr
15
+
16
+ try:
17
+ import subprocess
18
+ except ImportError:
19
+ subprocess = None
20
+
21
+ try:
22
+ import torch
23
+ HAS_TORCH = True
24
+ except ImportError:
25
+ HAS_TORCH = False
26
+ print("PyTorch not available, using CPU mode")
27
 
28
+ # Ensure temp directories exist
29
+ TEMP_DIR = tempfile.gettempdir()
30
+ HF_CACHE_DIR = os.path.join(TEMP_DIR, "hf_cache")
31
+ os.makedirs(HF_CACHE_DIR, exist_ok=True)
32
 
33
+ # Set environment variables safely
34
+ os.environ["GRADIO_SERVER_NAME"] = "0.0.0.0"
35
+ os.environ["GRADIO_SERVER_PORT"] = "7860"
36
+ os.environ["HF_HUB_CACHE"] = HF_CACHE_DIR
37
+ os.environ["HUGGINGFACE_HUB_CACHE"] = HF_CACHE_DIR
38
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
39
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
40
 
41
+ def safe_import(module_name, package_name=None):
42
+ """Safely import a module with fallback"""
43
+ if package_name is None:
44
+ package_name = module_name
45
+ try:
46
+ return __import__(module_name.replace("-", "_"))
47
+ except ImportError:
48
+ print(f"Module {module_name} not found, functionality limited")
49
+ return None
50
+
51
  def setup_environment():
52
+ """Setup environment with error handling"""
 
53
  dependencies = [
54
  "sageattention==1.0.6",
55
  "insightface",
 
67
 
68
  for dep in dependencies:
69
  try:
70
+ module_name = dep.split("==")[0].split(">=")[0].split("[")[0]
71
+ safe_import(module_name)
72
+ except Exception as e:
73
+ print(f"Could not process {dep}: {e}")
74
+ if subprocess:
75
+ try:
76
+ subprocess.run(
77
+ [sys.executable, "-m", "pip", "install", dep],
78
+ check=False,
79
+ capture_output=True,
80
+ timeout=30
81
+ )
82
+ except Exception as install_error:
83
+ print(f"Failed to install {dep}: {install_error}")
84
 
85
  def download_essential_models():
86
+ """Pre-download models with full error handling"""
87
  try:
88
  from huggingface_hub import snapshot_download
89
 
90
+ print("Attempting to download Hunyuan Video Avatar models...")
91
 
92
+ try:
93
+ snapshot_download(
94
+ repo_id="tencent/HunyuanVideo-Avatar",
95
+ cache_dir=HF_CACHE_DIR,
96
+ allow_patterns=["*.safetensors", "*.json", "*.txt", "*.bin"],
97
+ ignore_patterns=["*.mp4", "*.avi", "*.mov"],
98
+ resume_download=True,
99
+ max_workers=2
100
+ )
101
+ except Exception as e:
102
+ print(f"Could not download HunyuanVideo-Avatar: {e}")
103
 
104
+ try:
105
+ snapshot_download(
106
+ repo_id="tencent/HunyuanVideo",
107
+ cache_dir=HF_CACHE_DIR,
108
+ allow_patterns=["*.safetensors", "*.json", "*.txt"],
109
+ ignore_patterns=["*.mp4", "*.avi"],
110
+ resume_download=True,
111
+ max_workers=2
112
+ )
113
+ except Exception as e:
114
+ print(f"Could not download HunyuanVideo: {e}")
115
 
116
+ print("Model download attempt completed")
117
 
118
+ except ImportError:
119
+ print("huggingface_hub not available, skipping model download")
120
  except Exception as e:
121
+ print(f"Model download error: {e}")
 
122
 
123
  def create_hf_config():
124
+ """Create config with error handling"""
125
  config = {
126
  "model_settings": {
127
+ "profile": 3,
128
  "quantize_transformer": True,
129
  "attention_mode": "sage",
130
+ "compile": False,
131
  "teacache": "2.0"
132
  },
133
  "avatar_settings": {
134
+ "max_frames": 120,
135
+ "resolution": "512x512",
136
  "emotion_control": True,
137
  "multi_character": True
138
  },
 
144
  },
145
  "audio_processing": {
146
  "sample_rate": 16000,
147
+ "max_duration": 15,
148
  "supported_formats": ["wav", "mp3", "m4a"]
149
  }
150
  }
151
 
152
+ config_path = os.path.join(TEMP_DIR, "hf_config.json")
153
+ try:
154
+ with open(config_path, "w", encoding='utf-8') as f:
155
+ json.dump(config, f, indent=2)
156
+ except Exception as e:
157
+ print(f"Could not save config: {e}")
158
+ config_path = None
159
 
160
  return config
161
 
162
+ def create_dummy_video(output_path, duration=5, fps=24, width=512, height=512):
163
+ """Create a dummy video file for testing"""
164
+ try:
165
+ import numpy as np
166
+ import cv2
167
+
168
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
169
+ out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
170
+
171
+ for i in range(duration * fps):
172
+ # Create gradient frame
173
+ frame = np.ones((height, width, 3), dtype=np.uint8) * 50
174
+ text = f"Frame {i+1}"
175
+ cv2.putText(frame, text, (width//4, height//2),
176
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
177
+ out.write(frame)
178
+
179
+ out.release()
180
+ return True
181
+ except Exception as e:
182
+ print(f"Could not create video with OpenCV: {e}")
183
+ # Create empty file as fallback
184
+ try:
185
+ with open(output_path, 'wb') as f:
186
+ f.write(b'dummy video content')
187
+ return True
188
+ except:
189
+ return False
190
+
191
  class WanGPInterface:
192
+ """WanGP Interface with full error handling"""
193
 
194
  def __init__(self, config):
195
+ self.config = config or {}
196
+ self.device = "cpu"
197
+ if HAS_TORCH:
198
+ try:
199
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
200
+ except:
201
+ self.device = "cpu"
202
  self.models_loaded = False
203
 
204
  def load_models(self):
205
+ """Load models with error handling"""
206
  if self.models_loaded:
207
+ return True
208
 
209
  try:
210
+ print("Loading Hunyuan Video Avatar models (placeholder)...")
211
+ # Placeholder for actual model loading
212
+ import time
213
+ time.sleep(0.5) # Simulate loading
214
  self.models_loaded = True
215
+ print("βœ… Models loaded successfully (simulated)!")
216
+ return True
217
  except Exception as e:
218
+ print(f"Error in model loading: {e}")
219
+ self.models_loaded = False
220
+ return False
221
 
222
  def generate_avatar_video(self, audio_file, avatar_image, prompt="", emotion="neutral"):
223
+ """Generate avatar video with comprehensive error handling"""
224
  try:
225
+ # Validate inputs
226
+ if audio_file is None:
227
+ return None, "❌ Error: No audio file provided"
228
+
229
+ if avatar_image is None:
230
+ return None, "❌ Error: No avatar image provided"
231
+
232
+ # Ensure model is loaded
233
+ if not self.load_models():
234
+ print("Models not loaded, using dummy generation")
235
+
236
+ # Create output path
237
+ output_filename = f"avatar_{os.getpid()}_{id(self)}.mp4"
238
+ output_path = os.path.join(TEMP_DIR, output_filename)
239
+
240
+ # Create dummy video
241
+ if create_dummy_video(output_path, duration=5):
242
+ if os.path.exists(output_path):
243
+ return output_path, "βœ… Video generated successfully (demo mode)!"
244
+ else:
245
+ return None, "❌ Error: Failed to create output file"
246
+ else:
247
+ return None, "❌ Error: Video generation failed"
248
 
 
 
 
 
 
249
  except Exception as e:
250
+ error_msg = f"❌ Error in avatar generation: {str(e)}"
251
+ print(error_msg)
252
+ return None, error_msg
253
 
254
  def generate_video(self, prompt, duration=5, resolution="512x512"):
255
+ """Generate video from text with error handling"""
256
  try:
257
+ if not prompt:
258
+ return "❌ Error: No prompt provided"
259
+
260
+ # Parse resolution
261
+ try:
262
+ width, height = map(int, resolution.split('x'))
263
+ except:
264
+ width, height = 512, 512
265
 
266
+ # Ensure model is loaded
267
+ if not self.load_models():
268
+ print("Models not loaded, using dummy generation")
269
 
270
+ # Create output path
271
+ output_filename = f"video_{os.getpid()}_{id(self)}.mp4"
272
+ output_path = os.path.join(TEMP_DIR, output_filename)
273
+
274
+ # Create dummy video
275
+ if create_dummy_video(output_path, duration=int(duration), width=width, height=height):
276
+ if os.path.exists(output_path):
277
+ return output_path, f"βœ… Generated video for prompt: {prompt[:50]}..."
278
+ else:
279
+ return None, "❌ Error: Failed to create output file"
280
+ else:
281
+ return None, "❌ Error: Video generation failed"
282
+
283
  except Exception as e:
284
+ error_msg = f"❌ Error in video generation: {str(e)}"
285
+ print(error_msg)
286
+ return None, error_msg
287
 
288
  def create_gradio_interface(wangp_interface):
289
+ """Create Gradio interface with error handling"""
290
 
291
+ try:
292
+ with gr.Blocks(title="WanGP v6.3 - Hunyuan Video Avatar", theme=gr.themes.Soft()) as demo:
293
+ gr.HTML("""
294
+ <div style="text-align: center; margin-bottom: 20px;">
295
+ <h1>🎭 WanGP v6.3 - Hunyuan Video Avatar</h1>
296
+ <p>Advanced AI Video Generation with Audio-Driven Human Animation</p>
297
+ <p style="color: orange;">⚠️ Running in Demo Mode - Using placeholder outputs</p>
298
+ </div>
299
+ """)
300
+
301
+ with gr.Tabs():
302
+ # Avatar Generation Tab
303
+ with gr.TabItem("🎭 Avatar Generation"):
304
+ with gr.Row():
305
+ with gr.Column():
306
+ audio_input = gr.Audio(
307
+ label="Audio Input",
308
+ type="filepath"
309
+ )
310
+ avatar_image = gr.Image(
311
+ label="Avatar Image",
312
+ type="filepath"
313
+ )
314
+ emotion_control = gr.Dropdown(
315
+ choices=["neutral", "happy", "sad", "angry", "surprised"],
316
+ value="neutral",
317
+ label="Emotion Control"
318
+ )
319
+ avatar_prompt = gr.Textbox(
320
+ label="Additional Prompt (Optional)",
321
+ placeholder="Describe additional details...",
322
+ value=""
323
+ )
324
+ generate_avatar_btn = gr.Button("Generate Avatar Video", variant="primary")
325
+
326
+ with gr.Column():
327
+ avatar_output = gr.Video(label="Generated Avatar Video")
328
+ avatar_status = gr.Textbox(label="Status", interactive=False, value="Ready")
329
+
330
+ # Text-to-Video Tab
331
+ with gr.TabItem("πŸ“Ή Text to Video"):
332
+ with gr.Row():
333
+ with gr.Column():
334
+ video_prompt = gr.Textbox(
335
+ label="Video Prompt",
336
+ placeholder="Describe the video you want to generate...",
337
+ lines=3,
338
+ value=""
339
+ )
340
+ duration_slider = gr.Slider(
341
+ minimum=2,
342
+ maximum=10,
343
+ value=5,
344
+ step=1,
345
+ label="Duration (seconds)"
346
+ )
347
+ resolution_dropdown = gr.Dropdown(
348
+ choices=["512x512", "768x768", "1024x1024"],
349
+ value="512x512",
350
+ label="Resolution"
351
+ )
352
+ generate_video_btn = gr.Button("Generate Video", variant="primary")
353
+
354
+ with gr.Column():
355
+ video_output = gr.Video(label="Generated Video")
356
+ video_status = gr.Textbox(label="Status", interactive=False, value="Ready")
357
+
358
+ # Event handlers with error handling
359
+ def safe_avatar_generation(*args):
360
+ try:
361
+ return wangp_interface.generate_avatar_video(*args)
362
+ except Exception as e:
363
+ return None, f"❌ Unexpected error: {str(e)}"
364
+
365
+ def safe_video_generation(*args):
366
+ try:
367
+ result = wangp_interface.generate_video(*args)
368
+ if isinstance(result, tuple):
369
+ return result
370
+ else:
371
+ return None, result
372
+ except Exception as e:
373
+ return None, f"❌ Unexpected error: {str(e)}"
374
 
375
+ generate_avatar_btn.click(
376
+ fn=safe_avatar_generation,
377
+ inputs=[audio_input, avatar_image, avatar_prompt, emotion_control],
378
+ outputs=[avatar_output, avatar_status]
379
+ )
380
+
381
+ generate_video_btn.click(
382
+ fn=safe_video_generation,
383
+ inputs=[video_prompt, duration_slider, resolution_dropdown],
384
+ outputs=[video_output, video_status]
385
+ )
386
+
387
+ gr.HTML("""
388
+ <div style="text-align: center; margin-top: 20px; color: #666;">
389
+ <p>Powered by Hunyuan Video Avatar & WanGP v6.3</p>
390
+ <p style="font-size: 12px;">Note: This is a demonstration interface with placeholder outputs</p>
391
+ </div>
392
+ """)
 
 
 
 
 
 
 
 
393
 
394
+ return demo
 
 
 
 
 
395
 
396
+ except Exception as e:
397
+ print(f"Error creating Gradio interface: {e}")
398
+ # Return minimal interface
399
+ demo = gr.Interface(
400
+ fn=lambda x: f"Error: {str(e)}",
401
+ inputs="text",
402
+ outputs="text",
403
+ title="WanGP v6.3 - Error State"
404
  )
405
+ return demo
 
 
 
 
 
 
 
406
 
407
+ def main():
408
+ """Main function with comprehensive error handling"""
409
  print("πŸš€ Starting WanGP v6.3 with Hunyuan Video Avatar...")
410
 
411
+ try:
412
+ # Setup environment
413
+ setup_environment()
414
+ except Exception as e:
415
+ print(f"Environment setup warning: {e}")
416
 
417
+ try:
418
+ # Create configuration
419
+ config = create_hf_config()
420
+ except Exception as e:
421
+ print(f"Config creation warning: {e}")
422
+ config = {}
423
 
 
424
  try:
425
+ # Download models in background
426
  download_essential_models()
427
  except Exception as e:
428
+ print(f"Model download skipped: {e}")
 
 
 
429
 
430
+ try:
431
+ # Initialize WanGP interface
432
+ wangp_interface = WanGPInterface(config)
433
+ except Exception as e:
434
+ print(f"Interface initialization error: {e}")
435
+ # Create minimal interface
436
+ class MinimalInterface:
437
+ def __init__(self):
438
+ self.config = {}
439
+ def generate_avatar_video(self, *args):
440
+ return None, "Service temporarily unavailable"
441
+ def generate_video(self, *args):
442
+ return None, "Service temporarily unavailable"
443
+ wangp_interface = MinimalInterface()
444
 
445
+ try:
446
+ # Create and launch Gradio interface
447
+ demo = create_gradio_interface(wangp_interface)
448
+
449
+ print("βœ… Setup complete! Launching application...")
450
+
451
+ # Launch with error handling
452
+ demo.launch(
453
+ server_name="0.0.0.0",
454
+ server_port=int(os.environ.get("GRADIO_SERVER_PORT", 7860)),
455
+ share=False,
456
+ debug=False,
457
+ show_error=True,
458
+ prevent_thread_lock=False
459
+ )
460
+ except Exception as e:
461
+ print(f"❌ Failed to launch Gradio: {e}")
462
+ print("Attempting fallback launch...")
463
+ try:
464
+ # Minimal fallback
465
+ import gradio as gr
466
+ gr.Interface(
467
+ fn=lambda x: "System Error - Please restart",
468
+ inputs="text",
469
+ outputs="text"
470
+ ).launch()
471
+ except:
472
+ print("❌ Complete failure. Exiting.")
473
+ sys.exit(1)
474
+
475
+ if __name__ == "__main__":
476
+ main()