File size: 3,387 Bytes
797f6a7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
#!/usr/bin/env python3
"""
Optimized App Launcher for Hugging Face Spaces
==============================================

Pre-loads models and optimizes the environment for fastest startup.
"""

import os
import sys
import logging
import warnings

# Suppress warnings for cleaner output
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=FutureWarning)

# Set environment variables for optimization
os.environ['TRANSFORMERS_VERBOSITY'] = 'error'
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
os.environ['PYTHONUNBUFFERED'] = '1'

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)

logger = logging.getLogger(__name__)

def optimize_environment():
    """Optimize the environment for best performance."""
    try:
        import torch
        
        # Set optimal thread counts
        if hasattr(torch, 'set_num_threads'):
            torch.set_num_threads(4)
        
        # Optimize for inference
        torch.set_grad_enabled(False)
        
        if torch.cuda.is_available():
            # GPU optimizations
            torch.backends.cudnn.benchmark = True
            torch.backends.cudnn.deterministic = False
            logger.info("πŸš€ GPU optimization enabled")
        else:
            logger.info("πŸ’» Running on CPU")
            
    except Exception as e:
        logger.warning(f"Environment optimization failed: {e}")

def preload_models():
    """Preload models to reduce first inference time."""
    logger.info("πŸ”„ Preloading models...")
    
    try:
        # Add src to path
        sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
        
        # Initialize the pipeline (this will load all models)
        from src.pipeline import TTSPipeline
        
        # Create pipeline with optimization
        pipeline = TTSPipeline()
        
        # Warm up with a simple inference
        _ = pipeline.synthesize("Τ²Υ‘Φ€Φ‡", log_performance=False)
        
        logger.info("βœ… Models preloaded successfully")
        return pipeline
        
    except Exception as e:
        logger.error(f"Model preloading failed: {e}")
        return None

def main():
    """Main application entry point."""
    logger.info("πŸš€ Starting Optimized Armenian TTS")
    
    # Optimize environment
    optimize_environment()
    
    # Preload models
    pipeline = preload_models()
    
    if pipeline is None:
        logger.error("❌ Failed to initialize pipeline")
        sys.exit(1)
    
    # Import and run the main app
    try:
        logger.info("🌐 Starting Gradio interface...")
        
        # Import the main app
        from app_optimized import create_interface, tts_pipeline
        
        # Set the global pipeline
        globals()['tts_pipeline'] = pipeline
        
        # Create and launch interface
        interface = create_interface()
        
        # Launch with optimal settings for Spaces
        interface.launch(
            server_name="0.0.0.0",
            server_port=7860,
            share=False,  # Spaces handles sharing
            max_threads=10,
            show_error=True,
            quiet=False
        )
        
    except Exception as e:
        logger.error(f"Application startup failed: {e}")
        sys.exit(1)

if __name__ == "__main__":
    main()