Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,872 Bytes
7a6c881 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
#!/usr/bin/env python3
"""
Health check script for Tranception app on Hugging Face Spaces
"""
import os
import sys
import torch
def check_environment():
"""Check if the environment is properly configured"""
print("=== Tranception Health Check ===")
# Check Python version
print(f"Python version: {sys.version}")
# Check PyTorch
print(f"PyTorch version: {torch.__version__}")
print(f"CUDA available: {torch.cuda.is_available()}")
if torch.cuda.is_available():
print(f"CUDA version: {torch.version.cuda}")
print(f"GPU: {torch.cuda.get_device_name(0)}")
# Check environment variables
print(f"\nEnvironment variables:")
print(f"DISABLE_ZERO_GPU: {os.environ.get('DISABLE_ZERO_GPU', 'not set')}")
print(f"SPACE_ID: {os.environ.get('SPACE_ID', 'not set')}")
# Check if running on Hugging Face Spaces
if os.environ.get('SPACE_ID'):
print("\nRunning on Hugging Face Spaces")
# Try to import spaces module
try:
import spaces
print("✓ spaces module available")
# Try to create a GPU decorator
try:
test_decorator = spaces.GPU()
print("✓ Zero GPU decorator can be created")
except Exception as e:
print(f"✗ Zero GPU decorator error: {e}")
except ImportError:
print("✗ spaces module not available")
else:
print("\nNot running on Hugging Face Spaces")
# Check model files
print(f"\nChecking model availability on Hugging Face Hub:")
models = ["Tranception_Small", "Tranception_Medium", "Tranception_Large"]
for model in models:
print(f"- PascalNotin/{model}: Available on HF Hub")
print("\n=== Health check complete ===")
if __name__ == "__main__":
check_environment() |