File size: 3,904 Bytes
9d9638e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import os
import sys
import gradio as gr
import subprocess
import json

# Set environment variables for HF Spaces
os.environ["GRADIO_SERVER_NAME"] = "0.0.0.0"
os.environ["GRADIO_SERVER_PORT"] = "7860"

# Pre-download models cache
os.environ["HF_HUB_CACHE"] = "/tmp/hf_cache"
os.environ["HUGGINGFACE_HUB_CACHE"] = "/tmp/hf_cache"

# Fix potential Hunyuan Video Avatar issues
os.environ["TOKENIZERS_PARALLELISM"] = "false"

def setup_environment():
    """Setup environment for HF Spaces with WanGP v6.3"""
    # Install additional dependencies if needed
    try:
        import sageattention
    except ImportError:
        print("Installing SageAttention...")
        subprocess.run([sys.executable, "-m", "pip", "install", "sageattention==1.0.6"], check=True)
    
    # Ensure face detection libs are available
    try:
        import insightface
        import facexlib
    except ImportError:
        print("Installing face processing libraries...")
        subprocess.run([sys.executable, "-m", "pip", "install", "insightface", "facexlib"], check=True)

def download_essential_models():
    """Pre-download essential models for faster startup"""
    from huggingface_hub import snapshot_download
    
    print("Downloading Hunyuan Video Avatar models...")
    try:
        # Download Hunyuan Video Avatar base models
        snapshot_download(
            repo_id="tencent/HunyuanVideo-Avatar", 
            cache_dir="/tmp/hf_cache",
            allow_patterns=["*.safetensors", "*.json", "*.txt", "*.bin"],
            ignore_patterns=["*.mp4", "*.avi", "*.mov"]  # Skip demo videos
        )
        
        # Download base Hunyuan Video model
        snapshot_download(
            repo_id="tencent/HunyuanVideo",
            cache_dir="/tmp/hf_cache", 
            allow_patterns=["*.safetensors", "*.json", "*.txt"],
            ignore_patterns=["*.mp4", "*.avi"]
        )
        
    except Exception as e:
        print(f"Model download warning: {e}")
        print("Models will be downloaded on-demand during first use.")

def create_hf_config():
    """Create optimized config for HF Spaces deployment"""
    config = {
        "model_settings": {
            "profile": 3,  # Optimized for A10G Large
            "quantize_transformer": True,
            "attention_mode": "sage",
            "compile": False,  # Disable for stability on HF
            "teacache": "2.0"
        },
        "avatar_settings": {
            "max_frames": 120,  # ~5 seconds at 24fps
            "resolution": "512x512",  # Balanced quality/performance
            "emotion_control": True,
            "multi_character": True
        },
        "memory_optimization": {
            "enable_vae_tiling": True,
            "enable_cpu_offload": True,
            "max_batch_size": 1,
            "gradient_checkpointing": True
        },
        "audio_processing": {
            "sample_rate": 16000,
            "max_duration": 15,  # seconds
            "supported_formats": ["wav", "mp3", "m4a"]
        }
    }
    
    with open("/tmp/hf_config.json", "w") as f:
        json.dump(config, f, indent=2)
    
    return config

if __name__ == "__main__":
    print("🚀 Starting WanGP v6.3 with Hunyuan Video Avatar...")
    
    setup_environment()
    config = create_hf_config()
    download_essential_models()
    
    print("✅ Setup complete! Launching application...")
    
    # Import and run the main application
    from wgp import main
    main(
        profile=3,  # Higher profile for A10G Large
        attention="sage",  # Use Sage attention for better performance
        server_name="0.0.0.0", 
        server_port=7860,
        quantize_transformer=True,
        teacache="2.0",  # Enable TeaCache for Avatar acceleration
        compile=False,  # Disabled for HF Spaces stability
        share=False,  # HF Spaces handles sharing
        config_file="/tmp/hf_config.json"
    )