auto-diffuser-config / hardware_detector.py
chansung's picture
Upload folder using huggingface_hub
aae35f1 verified
import platform
import subprocess
import os
import logging
from typing import Dict, Optional
# Configure logging
logger = logging.getLogger(__name__)
class HardwareDetector:
def __init__(self):
logger.info("Initializing HardwareDetector")
try:
self.specs = self._detect_system_specs()
logger.info("Hardware detection completed successfully")
logger.debug(f"Detected specs: {self.specs}")
except Exception as e:
logger.error(f"Failed to detect hardware specs: {e}")
raise
def _detect_system_specs(self) -> Dict:
"""Detect system hardware specifications automatically."""
logger.debug("Starting system hardware detection")
platform_info = platform.system()
architecture = platform.machine()
cpu_count = os.cpu_count()
python_version = platform.python_version()
logger.debug(f"Platform: {platform_info}, Architecture: {architecture}")
logger.debug(f"CPU cores: {cpu_count}, Python: {python_version}")
gpu_info = self._detect_gpu()
specs = {
'platform': platform_info,
'architecture': architecture,
'cpu_count': cpu_count,
'python_version': python_version,
'gpu_info': gpu_info,
'cuda_available': False,
'mps_available': False
}
# Check for PyTorch and device availability
logger.debug("Checking PyTorch availability")
try:
import torch
torch_version = torch.__version__
cuda_available = torch.cuda.is_available()
mps_available = torch.backends.mps.is_available()
logger.info(f"PyTorch {torch_version} detected")
logger.debug(f"CUDA available: {cuda_available}, MPS available: {mps_available}")
specs['torch_version'] = torch_version
specs['cuda_available'] = cuda_available
specs['mps_available'] = mps_available
if cuda_available:
device_count = torch.cuda.device_count()
device_name = torch.cuda.get_device_name(0)
device_memory = torch.cuda.get_device_properties(0).total_memory // (1024**3)
logger.info(f"CUDA devices: {device_count}, Primary: {device_name} ({device_memory}GB)")
specs['cuda_device_count'] = device_count
specs['cuda_device_name'] = device_name
specs['cuda_memory'] = device_memory
except ImportError as e:
logger.warning(f"PyTorch not installed: {e}")
specs['torch_version'] = 'Not installed'
return specs
def _detect_gpu(self) -> Optional[Dict]:
"""Attempt to detect GPU information using nvidia-smi."""
logger.debug("Attempting GPU detection via nvidia-smi")
try:
result = subprocess.run([
'nvidia-smi',
'--query-gpu=name,memory.total',
'--format=csv,noheader,nounits'
], capture_output=True, text=True, check=True)
logger.debug(f"nvidia-smi output: {result.stdout}")
lines = result.stdout.strip().split('\n')
gpus = []
logger.debug(f"Found {len(lines)} GPU entries")
for line in lines:
if line.strip():
try:
name, memory = line.split(', ')
gpu_entry = {'name': name.strip(), 'memory_mb': int(memory)}
gpus.append(gpu_entry)
logger.debug(f"Parsed GPU: {gpu_entry}")
except ValueError as e:
logger.warning(f"Failed to parse GPU line '{line}': {e}")
logger.info(f"Successfully detected {len(gpus)} GPUs")
return gpus
except subprocess.CalledProcessError as e:
logger.warning(f"nvidia-smi command failed: {e}")
return None
except FileNotFoundError:
logger.debug("nvidia-smi not found, no NVIDIA GPU detected")
return None
except Exception as e:
logger.error(f"Unexpected error during GPU detection: {e}")
return None
def get_manual_input(self) -> Dict:
"""Get hardware specifications via manual user input."""
logger.info("Starting manual hardware input")
print("Enter your hardware specifications manually:")
gpu_name = input("GPU Name (e.g., RTX 4090, A100, leave empty if none): ").strip()
logger.debug(f"User input GPU name: '{gpu_name}'")
if gpu_name:
try:
vram_gb = int(input("VRAM in GB (e.g., 24): "))
gpu_info = [{'name': gpu_name, 'memory_mb': vram_gb * 1024}]
logger.info(f"Manual GPU configured: {gpu_name} with {vram_gb}GB VRAM")
except ValueError as e:
logger.warning(f"Invalid VRAM input: {e}")
gpu_info = None
else:
gpu_info = None
logger.info("No GPU specified in manual input")
try:
ram_gb = int(input("System RAM in GB (e.g., 32): "))
logger.debug(f"User input RAM: {ram_gb}GB")
except ValueError as e:
logger.warning(f"Invalid RAM input: {e}, using default 16GB")
ram_gb = 16 # Default
specs = self.specs.copy()
specs['gpu_info'] = gpu_info
specs['ram_gb'] = ram_gb
specs['manual_input'] = True
logger.info(f"Manual hardware specs configured: {specs}")
return specs
def get_optimization_profile(self) -> str:
"""Determine the best optimization profile based on hardware."""
logger.debug("Determining optimization profile")
if self.specs['cuda_available']:
cuda_memory = self.specs.get('cuda_memory', 0)
logger.debug(f"CUDA available with {cuda_memory}GB memory")
if cuda_memory >= 20:
profile = 'high_end_gpu'
elif cuda_memory >= 8:
profile = 'mid_range_gpu'
else:
profile = 'low_vram_gpu'
elif self.specs['mps_available']:
logger.debug("MPS available, using Apple Silicon profile")
profile = 'apple_silicon'
else:
logger.debug("No GPU acceleration available, using CPU-only profile")
profile = 'cpu_only'
logger.info(f"Selected optimization profile: {profile}")
return profile
def print_specs(self):
"""Print detected hardware specifications."""
logger.info("Printing hardware specifications")
print(f"Platform: {self.specs['platform']} ({self.specs['architecture']})")
print(f"CPU Cores: {self.specs['cpu_count']}")
print(f"Python: {self.specs['python_version']}")
print(f"PyTorch: {self.specs.get('torch_version', 'Not detected')}")
print(f"CUDA Available: {self.specs['cuda_available']}")
print(f"MPS Available: {self.specs['mps_available']}")
logger.debug("Hardware specs display completed")
if self.specs['gpu_info']:
print("GPU Information:")
for i, gpu in enumerate(self.specs['gpu_info']):
vram_gb = gpu['memory_mb'] / 1024
print(f" GPU {i}: {gpu['name']} ({vram_gb:.1f} GB VRAM)")
else:
print("No GPU detected")
if __name__ == "__main__":
detector = HardwareDetector()
print("=== Auto-detected Hardware ===")
detector.print_specs()
choice = input("\nUse auto-detected specs? (y/n): ").lower()
if choice != 'y':
specs = detector.get_manual_input()
detector.specs = specs
print("\n=== Final Hardware Specs ===")
detector.print_specs()
print(f"\nRecommended optimization profile: {detector.get_optimization_profile()}")