File size: 4,823 Bytes
80a1334
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import platform
import subprocess
import os
from typing import Dict, Optional


class HardwareDetector:
    def __init__(self):
        self.specs = self._detect_system_specs()
    
    def _detect_system_specs(self) -> Dict:
        """Detect system hardware specifications automatically."""
        specs = {
            'platform': platform.system(),
            'architecture': platform.machine(),
            'cpu_count': os.cpu_count(),
            'python_version': platform.python_version(),
            'gpu_info': self._detect_gpu(),
            'cuda_available': False,
            'mps_available': False
        }
        
        # Check for PyTorch and device availability
        try:
            import torch
            specs['torch_version'] = torch.__version__
            specs['cuda_available'] = torch.cuda.is_available()
            specs['mps_available'] = torch.backends.mps.is_available()
            
            if specs['cuda_available']:
                specs['cuda_device_count'] = torch.cuda.device_count()
                specs['cuda_device_name'] = torch.cuda.get_device_name(0)
                specs['cuda_memory'] = torch.cuda.get_device_properties(0).total_memory // (1024**3)
                
        except ImportError:
            specs['torch_version'] = 'Not installed'
            
        return specs
    
    def _detect_gpu(self) -> Optional[Dict]:
        """Attempt to detect GPU information using nvidia-smi."""
        try:
            result = subprocess.run([
                'nvidia-smi', 
                '--query-gpu=name,memory.total', 
                '--format=csv,noheader,nounits'
            ], capture_output=True, text=True, check=True)
            
            lines = result.stdout.strip().split('\n')
            gpus = []
            for line in lines:
                if line.strip():
                    name, memory = line.split(', ')
                    gpus.append({'name': name.strip(), 'memory_mb': int(memory)})
            return gpus
            
        except (subprocess.CalledProcessError, FileNotFoundError):
            return None
    
    def get_manual_input(self) -> Dict:
        """Get hardware specifications via manual user input."""
        print("Enter your hardware specifications manually:")
        
        gpu_name = input("GPU Name (e.g., RTX 4090, A100, leave empty if none): ").strip()
        if gpu_name:
            try:
                vram_gb = int(input("VRAM in GB (e.g., 24): "))
                gpu_info = [{'name': gpu_name, 'memory_mb': vram_gb * 1024}]
            except ValueError:
                gpu_info = None
        else:
            gpu_info = None
            
        try:
            ram_gb = int(input("System RAM in GB (e.g., 32): "))
        except ValueError:
            ram_gb = 16  # Default
            
        specs = self.specs.copy()
        specs['gpu_info'] = gpu_info
        specs['ram_gb'] = ram_gb
        specs['manual_input'] = True
        
        return specs
    
    def get_optimization_profile(self) -> str:
        """Determine the best optimization profile based on hardware."""
        if self.specs['cuda_available']:
            if self.specs.get('cuda_memory', 0) >= 20:
                return 'high_end_gpu'
            elif self.specs.get('cuda_memory', 0) >= 8:
                return 'mid_range_gpu'
            else:
                return 'low_vram_gpu'
        elif self.specs['mps_available']:
            return 'apple_silicon'
        else:
            return 'cpu_only'
    
    def print_specs(self):
        """Print detected hardware specifications."""
        print(f"Platform: {self.specs['platform']} ({self.specs['architecture']})")
        print(f"CPU Cores: {self.specs['cpu_count']}")
        print(f"Python: {self.specs['python_version']}")
        print(f"PyTorch: {self.specs.get('torch_version', 'Not detected')}")
        print(f"CUDA Available: {self.specs['cuda_available']}")
        print(f"MPS Available: {self.specs['mps_available']}")
        
        if self.specs['gpu_info']:
            print("GPU Information:")
            for i, gpu in enumerate(self.specs['gpu_info']):
                vram_gb = gpu['memory_mb'] / 1024
                print(f"  GPU {i}: {gpu['name']} ({vram_gb:.1f} GB VRAM)")
        else:
            print("No GPU detected")


if __name__ == "__main__":
    detector = HardwareDetector()
    
    print("=== Auto-detected Hardware ===")
    detector.print_specs()
    
    choice = input("\nUse auto-detected specs? (y/n): ").lower()
    if choice != 'y':
        specs = detector.get_manual_input()
        detector.specs = specs
        print("\n=== Final Hardware Specs ===")
        detector.print_specs()
    
    print(f"\nRecommended optimization profile: {detector.get_optimization_profile()}")