|
"""
|
|
Dynamic Performance Prediction Module for CanRun
|
|
Advanced tiered performance predictions (S-A-B-C-D-F) with dynamic hardware detection and real-time benchmarking.
|
|
Focus on NVIDIA RTX/GTX systems with comprehensive laptop support.
|
|
"""
|
|
|
|
import logging
|
|
import re
|
|
import requests
|
|
import platform
|
|
import psutil
|
|
from typing import Dict, List, Optional, Tuple
|
|
from dataclasses import dataclass
|
|
from enum import Enum
|
|
|
|
try:
|
|
import GPUtil
|
|
except ImportError:
|
|
GPUtil = None
|
|
|
|
try:
|
|
import wmi
|
|
except ImportError:
|
|
wmi = None
|
|
|
|
|
|
class PerformanceTier(Enum):
|
|
"""Performance tier classifications"""
|
|
S = (90, 100, "Exceptional - Ultra settings, 4K@60fps+")
|
|
A = (80, 89, "Excellent - High settings, 1440p@60fps")
|
|
B = (70, 79, "Good - High settings, 1080p@60fps")
|
|
C = (60, 69, "Adequate - Medium settings, 1080p@30fps")
|
|
D = (50, 59, "Minimum - Low settings, 720p@30fps")
|
|
F = (0, 49, "Below Minimum - Unable to run acceptably")
|
|
|
|
|
|
@dataclass
|
|
class PerformanceAssessment:
|
|
"""Complete performance assessment result with S-A-B-C-D-F tier system"""
|
|
score: int
|
|
tier: PerformanceTier
|
|
tier_description: str
|
|
expected_fps: int
|
|
recommended_settings: str
|
|
recommended_resolution: str
|
|
bottlenecks: list
|
|
upgrade_suggestions: list
|
|
|
|
|
|
class HardwareDetector:
|
|
"""Dynamic hardware detection with cross-platform support"""
|
|
|
|
def __init__(self):
|
|
self.logger = logging.getLogger(__name__)
|
|
self.system_info = {}
|
|
|
|
def detect_all(self) -> Dict:
|
|
"""Detect all hardware components"""
|
|
self.system_info = {
|
|
'cpu': self._detect_cpu(),
|
|
'gpu': self._detect_gpu(),
|
|
'ram': self._detect_ram(),
|
|
'os': self._detect_os()
|
|
}
|
|
return self.system_info
|
|
|
|
def _detect_cpu(self) -> Dict:
|
|
"""Detect CPU information"""
|
|
cpu_info = {
|
|
'name': platform.processor(),
|
|
'cores': psutil.cpu_count(logical=False),
|
|
'threads': psutil.cpu_count(logical=True),
|
|
'frequency': psutil.cpu_freq().max if psutil.cpu_freq() else 0
|
|
}
|
|
|
|
|
|
if platform.system() == 'Windows' and wmi:
|
|
try:
|
|
c = wmi.WMI()
|
|
for processor in c.Win32_Processor():
|
|
cpu_info['name'] = processor.Name
|
|
cpu_info['max_clock_speed'] = processor.MaxClockSpeed
|
|
except Exception as e:
|
|
self.logger.debug(f"WMI CPU detection failed: {e}")
|
|
|
|
return cpu_info
|
|
|
|
def _detect_gpu(self) -> Dict:
|
|
"""Detect GPU information - NVIDIA focus"""
|
|
gpu_info = {'cards': []}
|
|
|
|
|
|
if GPUtil:
|
|
try:
|
|
gpus = GPUtil.getGPUs()
|
|
for gpu in gpus:
|
|
|
|
if 'nvidia' in gpu.name.lower() or 'geforce' in gpu.name.lower() or 'rtx' in gpu.name.lower() or 'gtx' in gpu.name.lower():
|
|
gpu_info['cards'].append({
|
|
'name': gpu.name,
|
|
'memory': gpu.memoryTotal,
|
|
'driver': gpu.driver
|
|
})
|
|
except Exception as e:
|
|
self.logger.debug(f"GPUtil detection failed: {e}")
|
|
|
|
|
|
if not gpu_info['cards'] and platform.system() == 'Windows' and wmi:
|
|
try:
|
|
c = wmi.WMI()
|
|
for gpu in c.Win32_VideoController():
|
|
if gpu.Name and gpu.AdapterRAM:
|
|
gpu_name = gpu.Name.lower()
|
|
|
|
if any(keyword in gpu_name for keyword in ['nvidia', 'geforce', 'rtx', 'gtx']):
|
|
gpu_info['cards'].append({
|
|
'name': gpu.Name,
|
|
'memory': gpu.AdapterRAM / (1024**3) if gpu.AdapterRAM else 0,
|
|
'driver': gpu.DriverVersion
|
|
})
|
|
except Exception as e:
|
|
self.logger.debug(f"WMI GPU detection failed: {e}")
|
|
|
|
return gpu_info
|
|
|
|
def _detect_ram(self) -> Dict:
|
|
"""Detect RAM information"""
|
|
memory = psutil.virtual_memory()
|
|
return {
|
|
'total': memory.total / (1024**3),
|
|
'available': memory.available / (1024**3),
|
|
'used_percent': memory.percent
|
|
}
|
|
|
|
def _detect_os(self) -> Dict:
|
|
"""Detect OS information"""
|
|
return {
|
|
'system': platform.system(),
|
|
'release': platform.release(),
|
|
'version': platform.version(),
|
|
'architecture': platform.machine()
|
|
}
|
|
|
|
|
|
class PerformanceCalculator:
|
|
"""Dynamic performance calculation with pattern matching for NVIDIA GPUs"""
|
|
|
|
def __init__(self):
|
|
self.logger = logging.getLogger(__name__)
|
|
|
|
self.cpu_benchmarks = self._load_cpu_benchmarks()
|
|
|
|
self.gpu_benchmarks = self._load_gpu_benchmarks()
|
|
|
|
def _load_cpu_benchmarks(self) -> Dict[str, int]:
|
|
"""Load CPU benchmark data dynamically using pattern matching"""
|
|
return {
|
|
|
|
r'i9-1[3-4]\d{3}[A-Z]*': 35000,
|
|
r'i7-1[3-4]\d{3}[A-Z]*': 25000,
|
|
r'i5-1[3-4]\d{3}[A-Z]*': 15000,
|
|
r'i3-1[3-4]\d{3}[A-Z]*': 8000,
|
|
|
|
|
|
r'i9-12\d{3}[A-Z]*': 32000,
|
|
r'i7-12\d{3}[A-Z]*': 22000,
|
|
r'i5-12\d{3}[A-Z]*': 13000,
|
|
r'i3-12\d{3}[A-Z]*': 7000,
|
|
|
|
|
|
r'Ryzen 9 7\d{3}[A-Z]*': 35000,
|
|
r'Ryzen 7 7800X3D': 38000,
|
|
r'Ryzen 7 7\d{3}[A-Z]*': 25000,
|
|
r'Ryzen 5 7\d{3}[A-Z]*': 15000,
|
|
r'Ryzen 3 7\d{3}[A-Z]*': 8000,
|
|
|
|
|
|
r'Ryzen 9 5\d{3}[A-Z]*': 30000,
|
|
r'Ryzen 7 5800X3D': 32000,
|
|
r'Ryzen 7 5\d{3}[A-Z]*': 22000,
|
|
r'Ryzen 5 5\d{3}[A-Z]*': 13000,
|
|
r'Ryzen 3 5\d{3}[A-Z]*': 7000,
|
|
|
|
|
|
r'Apple M[1-4]': 20000,
|
|
r'Apple M[1-4] Pro': 30000,
|
|
r'Apple M[1-4] Max': 40000,
|
|
}
|
|
|
|
def _load_gpu_benchmarks(self) -> Dict[str, int]:
|
|
"""Load NVIDIA GPU benchmark data with RTX 5000 series and laptop variants"""
|
|
return {
|
|
|
|
r'RTX 5090': 55000,
|
|
r'RTX 5080': 48000,
|
|
r'RTX 507[0-9]': 40000,
|
|
r'RTX 506[0-9]': 32000,
|
|
|
|
|
|
r'RTX 4090': 45000,
|
|
r'RTX 4080 Super': 40000,
|
|
r'RTX 4080': 38000,
|
|
r'RTX 4070 Ti Super': 35000,
|
|
r'RTX 4070 Ti': 32000,
|
|
r'RTX 4070 Super': 30000,
|
|
r'RTX 4070': 28000,
|
|
r'RTX 4060 Ti': 22000,
|
|
r'RTX 4060': 18000,
|
|
|
|
|
|
r'RTX 4090 Laptop': 38000,
|
|
r'RTX 4080 Laptop': 32000,
|
|
r'RTX 4070 Laptop': 24000,
|
|
r'RTX 4060 Laptop': 16000,
|
|
r'RTX 4050 Laptop': 12000,
|
|
|
|
|
|
r'RTX 3090 Ti': 39000,
|
|
r'RTX 3090': 35000,
|
|
r'RTX 3080 Ti': 32000,
|
|
r'RTX 3080': 28000,
|
|
r'RTX 3070 Ti': 25000,
|
|
r'RTX 3070': 22000,
|
|
r'RTX 3060 Ti': 18000,
|
|
r'RTX 3060': 15000,
|
|
r'RTX 3050': 10000,
|
|
|
|
|
|
r'RTX 3080 Ti Laptop': 26000,
|
|
r'RTX 3080 Laptop': 22000,
|
|
r'RTX 3070 Ti Laptop': 20000,
|
|
r'RTX 3070 Laptop': 18000,
|
|
r'RTX 3060 Laptop': 13000,
|
|
r'RTX 3050 Ti Laptop': 9000,
|
|
r'RTX 3050 Laptop': 8000,
|
|
|
|
|
|
r'RTX 2080 Ti': 20000,
|
|
r'RTX 2080 Super': 18000,
|
|
r'RTX 2080': 16000,
|
|
r'RTX 2070 Super': 15000,
|
|
r'RTX 2070': 13000,
|
|
r'RTX 2060 Super': 12000,
|
|
r'RTX 2060': 10000,
|
|
|
|
|
|
r'RTX 2080 Laptop': 14000,
|
|
r'RTX 2070 Laptop': 11000,
|
|
r'RTX 2060 Laptop': 9000,
|
|
|
|
|
|
r'GTX 1660 Ti': 9000,
|
|
r'GTX 1660 Super': 8500,
|
|
r'GTX 1660': 8000,
|
|
r'GTX 1650 Super': 7000,
|
|
r'GTX 1650': 6000,
|
|
|
|
|
|
r'GTX 1660 Ti Laptop': 7500,
|
|
r'GTX 1660 Ti Mobile': 7500,
|
|
r'GTX 1650 Ti Laptop': 5500,
|
|
r'GTX 1650 Laptop': 5000,
|
|
|
|
|
|
r'GTX 1080 Ti': 12000,
|
|
r'GTX 1080': 10000,
|
|
r'GTX 1070 Ti': 9000,
|
|
r'GTX 1070': 8000,
|
|
r'GTX 1060 6GB': 6500,
|
|
r'GTX 1060 3GB': 5500,
|
|
r'GTX 1060': 6000,
|
|
r'GTX 1050 Ti': 4000,
|
|
r'GTX 1050': 3000,
|
|
|
|
|
|
r'GTX 1080 Laptop': 8500,
|
|
r'GTX 1070 Laptop': 7000,
|
|
r'GTX 1060 Laptop': 5000,
|
|
r'GTX 1050 Ti Laptop': 3200,
|
|
r'GTX 1050 Laptop': 2500,
|
|
|
|
|
|
r'GTX 980 Ti': 7000,
|
|
r'GTX 980': 6000,
|
|
r'GTX 970': 5000,
|
|
r'GTX 960': 3500,
|
|
r'GTX 950': 2500,
|
|
|
|
|
|
r'GeForce.*RTX': 15000,
|
|
r'GeForce.*GTX': 5000,
|
|
r'NVIDIA.*RTX': 15000,
|
|
r'NVIDIA.*GTX': 5000,
|
|
}
|
|
|
|
def calculate_cpu_score(self, cpu_info: Dict, requirements: Dict) -> float:
|
|
"""Calculate CPU performance score (0-100)"""
|
|
cpu_name = cpu_info.get('name', '')
|
|
cpu_score = 0
|
|
|
|
self.logger.debug(f"Calculating CPU score for: {cpu_name}")
|
|
|
|
|
|
for pattern, benchmark in self.cpu_benchmarks.items():
|
|
if re.search(pattern, cpu_name, re.IGNORECASE):
|
|
cpu_score = benchmark
|
|
self.logger.debug(f"CPU matched pattern '{pattern}' with score {benchmark}")
|
|
break
|
|
|
|
|
|
if cpu_score == 0:
|
|
cores = cpu_info.get('cores', 1)
|
|
freq = cpu_info.get('frequency', 2000)
|
|
cpu_score = cores * freq * 2.5
|
|
self.logger.debug(f"CPU fallback estimation: {cores} cores * {freq} MHz = {cpu_score}")
|
|
|
|
|
|
req_cpu = requirements.get('recommended', {}).get('processor', '')
|
|
req_score = self._estimate_required_cpu_score(req_cpu)
|
|
|
|
final_score = min(100, (cpu_score / req_score) * 100) if req_score > 0 else 75
|
|
self.logger.debug(f"Final CPU score: {final_score}")
|
|
return final_score
|
|
|
|
def calculate_gpu_score(self, gpu_info: Dict, requirements: Dict) -> float:
|
|
"""Calculate NVIDIA GPU performance score (0-100)"""
|
|
if not gpu_info.get('cards'):
|
|
self.logger.warning("No NVIDIA GPU detected")
|
|
return 0
|
|
|
|
gpu_name = gpu_info['cards'][0].get('name', '')
|
|
gpu_score = 0
|
|
|
|
self.logger.debug(f"Calculating GPU score for: {gpu_name}")
|
|
|
|
|
|
for pattern, benchmark in self.gpu_benchmarks.items():
|
|
if re.search(pattern, gpu_name, re.IGNORECASE):
|
|
gpu_score = benchmark
|
|
self.logger.debug(f"GPU matched pattern '{pattern}' with score {benchmark}")
|
|
break
|
|
|
|
|
|
if gpu_score == 0:
|
|
memory = gpu_info['cards'][0].get('memory', 0)
|
|
|
|
|
|
if 'rtx' in gpu_name.lower():
|
|
if '40' in gpu_name:
|
|
gpu_score = memory * 3000
|
|
elif '30' in gpu_name:
|
|
gpu_score = memory * 2500
|
|
elif '20' in gpu_name:
|
|
gpu_score = memory * 2000
|
|
else:
|
|
gpu_score = memory * 2200
|
|
elif 'gtx' in gpu_name.lower():
|
|
if '16' in gpu_name:
|
|
gpu_score = memory * 1500
|
|
elif '10' in gpu_name:
|
|
gpu_score = memory * 1200
|
|
else:
|
|
gpu_score = memory * 1000
|
|
else:
|
|
gpu_score = memory * 1500
|
|
|
|
self.logger.debug(f"GPU fallback estimation: {memory}GB * multiplier = {gpu_score}")
|
|
|
|
|
|
req_gpu = requirements.get('recommended', {}).get('graphics', '')
|
|
req_score = self._estimate_required_gpu_score(req_gpu)
|
|
|
|
final_score = min(100, (gpu_score / req_score) * 100) if req_score > 0 else 75
|
|
self.logger.debug(f"Final GPU score: {final_score}")
|
|
return final_score
|
|
|
|
def calculate_ram_score(self, ram_info: Dict, requirements: Dict) -> float:
|
|
"""Calculate RAM performance score (0-100)"""
|
|
available_ram = ram_info.get('total', 0)
|
|
required_ram = requirements.get('recommended', {}).get('memory', 8)
|
|
|
|
if required_ram == 0:
|
|
required_ram = requirements.get('minimum', {}).get('memory', 4)
|
|
|
|
score = min(100, (available_ram / required_ram) * 100)
|
|
self.logger.debug(f"RAM score: {available_ram}GB / {required_ram}GB = {score}")
|
|
return score
|
|
|
|
def _estimate_required_cpu_score(self, cpu_string: str) -> int:
|
|
"""Estimate required CPU score from string"""
|
|
patterns = {
|
|
r'i9|Ryzen 9': 30000,
|
|
r'i7|Ryzen 7': 20000,
|
|
r'i5|Ryzen 5': 12000,
|
|
r'i3|Ryzen 3': 6000,
|
|
r'Core 2 Duo|Dual.?Core': 2000,
|
|
r'Quad.?Core': 4000,
|
|
}
|
|
|
|
for pattern, score in patterns.items():
|
|
if re.search(pattern, cpu_string, re.IGNORECASE):
|
|
return score
|
|
|
|
return 8000
|
|
|
|
def _estimate_required_gpu_score(self, gpu_string: str) -> int:
|
|
"""Estimate required NVIDIA GPU score from string"""
|
|
patterns = {
|
|
r'RTX 50\d{2}': 40000,
|
|
r'RTX 40\d{2}': 30000,
|
|
r'RTX 30\d{2}': 20000,
|
|
r'RTX 20\d{2}': 12000,
|
|
r'GTX 16\d{2}': 8000,
|
|
r'GTX 10\d{2}': 6000,
|
|
r'GTX 9\d{2}': 4000,
|
|
}
|
|
|
|
for pattern, score in patterns.items():
|
|
if re.search(pattern, gpu_string, re.IGNORECASE):
|
|
return score
|
|
|
|
return 8000
|
|
|
|
|
|
class DynamicPerformancePredictor:
|
|
"""Dynamic performance predictor with real-time hardware detection for NVIDIA systems"""
|
|
|
|
def __init__(self):
|
|
self.logger = logging.getLogger(__name__)
|
|
self.hardware_detector = HardwareDetector()
|
|
self.calculator = PerformanceCalculator()
|
|
|
|
|
|
self.weights = {
|
|
'gpu': 0.60,
|
|
'cpu': 0.25,
|
|
'ram': 0.15
|
|
}
|
|
|
|
self.logger.info("Dynamic performance predictor initialized for NVIDIA RTX/GTX systems")
|
|
|
|
def assess_performance(self, hardware_specs: Dict = None, game_requirements: Dict = None) -> PerformanceAssessment:
|
|
"""
|
|
Generate advanced tiered performance assessment using dynamic detection.
|
|
|
|
Args:
|
|
hardware_specs: Optional pre-detected hardware specs
|
|
game_requirements: Optional game requirements from Steam API
|
|
|
|
Returns:
|
|
PerformanceAssessment with tier, score, FPS, and recommendations
|
|
"""
|
|
self.logger.info("Generating dynamic performance assessment")
|
|
|
|
|
|
if hardware_specs is None:
|
|
hardware = self.hardware_detector.detect_all()
|
|
else:
|
|
|
|
hardware = {
|
|
'cpu': {
|
|
'name': hardware_specs.get('cpu_model', ''),
|
|
'cores': hardware_specs.get('cpu_cores', 4),
|
|
'threads': hardware_specs.get('cpu_threads', 8),
|
|
'frequency': hardware_specs.get('cpu_frequency', 3000)
|
|
},
|
|
'gpu': {
|
|
'cards': [{
|
|
'name': hardware_specs.get('gpu_model', ''),
|
|
'memory': hardware_specs.get('gpu_vram_gb', 4),
|
|
'driver': 'Unknown'
|
|
}]
|
|
},
|
|
'ram': {
|
|
'total': hardware_specs.get('ram_total_gb', 8),
|
|
'available': hardware_specs.get('ram_total_gb', 8) * 0.7,
|
|
'used_percent': 30
|
|
}
|
|
}
|
|
|
|
|
|
scores = {
|
|
'cpu': self.calculator.calculate_cpu_score(hardware['cpu'], game_requirements or {}),
|
|
'gpu': self.calculator.calculate_gpu_score(hardware['gpu'], game_requirements or {}),
|
|
'ram': self.calculator.calculate_ram_score(hardware['ram'], game_requirements or {})
|
|
}
|
|
|
|
self.logger.debug(f"Component scores: {scores}")
|
|
|
|
|
|
base_score = int(
|
|
scores['gpu'] * self.weights['gpu'] +
|
|
scores['cpu'] * self.weights['cpu'] +
|
|
scores['ram'] * self.weights['ram']
|
|
)
|
|
|
|
|
|
total_score = base_score
|
|
if game_requirements:
|
|
|
|
min_gpu = game_requirements.get('minimum_gpu', '')
|
|
rec_gpu = game_requirements.get('recommended_gpu', '')
|
|
min_cpu = game_requirements.get('minimum_cpu', '')
|
|
rec_cpu = game_requirements.get('recommended_cpu', '')
|
|
min_ram = game_requirements.get('minimum_ram_gb', 8)
|
|
rec_ram = game_requirements.get('recommended_ram_gb', 16)
|
|
|
|
|
|
min_exceeded_factor = 0
|
|
rec_approach_factor = 0
|
|
|
|
|
|
user_gpu_score = scores['gpu']
|
|
min_gpu_benchmark = self._estimate_gpu_benchmark(min_gpu)
|
|
rec_gpu_benchmark = self._estimate_gpu_benchmark(rec_gpu)
|
|
|
|
if min_gpu_benchmark > 0 and rec_gpu_benchmark > 0:
|
|
|
|
if user_gpu_score > min_gpu_benchmark:
|
|
min_exceeded_factor += (user_gpu_score - min_gpu_benchmark) / min_gpu_benchmark
|
|
|
|
if rec_gpu_benchmark > min_gpu_benchmark:
|
|
rec_range = rec_gpu_benchmark - min_gpu_benchmark
|
|
user_in_range = user_gpu_score - min_gpu_benchmark
|
|
if user_in_range > 0:
|
|
rec_approach_factor += min(1.0, user_in_range / rec_range)
|
|
|
|
|
|
|
|
if min_exceeded_factor > 1.5:
|
|
total_score = min(100, int(total_score * 1.1))
|
|
elif min_exceeded_factor > 0.5:
|
|
total_score = min(100, int(total_score * 1.05))
|
|
|
|
|
|
if rec_approach_factor > 0.8:
|
|
total_score = min(100, int(total_score * 1.1))
|
|
elif rec_approach_factor > 0.5:
|
|
total_score = min(100, int(total_score * 1.05))
|
|
|
|
|
|
if min_exceeded_factor < 0.2:
|
|
total_score = max(0, int(total_score * 0.9))
|
|
|
|
|
|
game_name = None
|
|
if game_requirements:
|
|
game_name = (
|
|
game_requirements.get('game_name', '') or
|
|
game_requirements.get('minimum_game', '') or
|
|
game_requirements.get('recommended_game', '') or
|
|
game_requirements.get('name', '')
|
|
)
|
|
|
|
|
|
if game_name:
|
|
self.logger.info(f"Applying game-specific adjustments for {game_name}")
|
|
|
|
|
|
min_gpu = game_requirements.get('minimum_gpu', '')
|
|
rec_gpu = game_requirements.get('recommended_gpu', '')
|
|
gpu_model = hardware['gpu']['cards'][0]['name'] if hardware['gpu']['cards'] else ''
|
|
|
|
|
|
min_gpu_score = self._estimate_gpu_benchmark(min_gpu)
|
|
rec_gpu_score = self._estimate_gpu_benchmark(rec_gpu)
|
|
user_gpu_score = 0
|
|
|
|
|
|
for pattern, benchmark in self.calculator.gpu_benchmarks.items():
|
|
if re.search(pattern, gpu_model, re.IGNORECASE):
|
|
user_gpu_score = benchmark
|
|
break
|
|
|
|
|
|
self.logger.info(f"Game: {game_name}, Min GPU Score: {min_gpu_score}, Rec GPU Score: {rec_gpu_score}, User GPU Score: {user_gpu_score}")
|
|
|
|
|
|
if min_gpu_score > 0 and user_gpu_score > 0:
|
|
|
|
if user_gpu_score < min_gpu_score:
|
|
|
|
total_score = max(30, int(total_score * 0.65))
|
|
self.logger.info(f"Hardware below minimum requirements, reducing score to {total_score}")
|
|
|
|
|
|
elif rec_gpu_score > min_gpu_score and user_gpu_score < rec_gpu_score:
|
|
|
|
position = (user_gpu_score - min_gpu_score) / (rec_gpu_score - min_gpu_score)
|
|
tier_score = 60 + int(position * 20)
|
|
total_score = min(tier_score, total_score)
|
|
self.logger.info(f"Hardware between min and rec, setting score to {total_score}")
|
|
|
|
|
|
elif user_gpu_score >= rec_gpu_score:
|
|
|
|
exceed_factor = user_gpu_score / rec_gpu_score
|
|
if exceed_factor >= 1.5:
|
|
|
|
total_score = max(total_score, 90)
|
|
self.logger.info(f"Hardware well above rec, setting to S tier ({total_score})")
|
|
elif exceed_factor >= 1.2:
|
|
|
|
total_score = max(total_score, 80)
|
|
self.logger.info(f"Hardware above rec, setting to A tier ({total_score})")
|
|
else:
|
|
|
|
total_score = max(total_score, 70)
|
|
self.logger.info(f"Hardware meets rec, setting to B tier ({total_score})")
|
|
|
|
|
|
tier = self._get_tier(total_score)
|
|
self.logger.info(f"Final performance tier: {tier.name} with score {total_score}")
|
|
|
|
|
|
expected_fps = self._calculate_expected_fps(tier, scores['gpu'], scores['cpu'], game_requirements)
|
|
|
|
|
|
recommended_settings, recommended_resolution = self._determine_recommendations(tier, total_score)
|
|
|
|
|
|
bottlenecks = self._identify_bottlenecks(scores)
|
|
|
|
|
|
upgrade_suggestions = self._generate_upgrade_suggestions(hardware, scores, tier)
|
|
|
|
assessment = PerformanceAssessment(
|
|
score=total_score,
|
|
tier=tier,
|
|
tier_description=tier.value[2],
|
|
expected_fps=expected_fps,
|
|
recommended_settings=recommended_settings,
|
|
recommended_resolution=recommended_resolution,
|
|
bottlenecks=bottlenecks,
|
|
upgrade_suggestions=upgrade_suggestions
|
|
)
|
|
|
|
self.logger.info(f"Dynamic performance assessment: Score {assessment.score}, Tier {assessment.tier.name}")
|
|
|
|
return assessment
|
|
|
|
def _get_tier(self, score: float) -> PerformanceTier:
|
|
"""Convert score to tier"""
|
|
for tier in PerformanceTier:
|
|
min_score, max_score, _ = tier.value
|
|
if min_score <= score <= max_score:
|
|
return tier
|
|
return PerformanceTier.F
|
|
|
|
def _calculate_expected_fps(self, tier: PerformanceTier, gpu_score: float, cpu_score: float, game_requirements: Dict = None) -> int:
|
|
"""
|
|
Calculate expected FPS based on tier, component scores, and game-specific requirements
|
|
|
|
Args:
|
|
tier: Performance tier classification
|
|
gpu_score: GPU score (0-100)
|
|
cpu_score: CPU score (0-100)
|
|
game_requirements: Optional game requirements from Steam API
|
|
|
|
Returns:
|
|
Expected FPS value
|
|
"""
|
|
|
|
base_fps = {
|
|
PerformanceTier.S: 90,
|
|
PerformanceTier.A: 75,
|
|
PerformanceTier.B: 60,
|
|
PerformanceTier.C: 40,
|
|
PerformanceTier.D: 30,
|
|
PerformanceTier.F: 20
|
|
}
|
|
|
|
fps = base_fps.get(tier, 30)
|
|
|
|
|
|
if game_requirements:
|
|
game_name = (
|
|
game_requirements.get('game_name', '') or
|
|
game_requirements.get('minimum_game', '') or
|
|
game_requirements.get('recommended_game', '') or
|
|
game_requirements.get('name', '')
|
|
)
|
|
|
|
if game_name:
|
|
self.logger.info(f"Calculating game-specific FPS for {game_name}")
|
|
|
|
|
|
fps_modifier = 1.0
|
|
|
|
|
|
well_optimized_games = [
|
|
'fortnite', 'valorant', 'apex legends', 'rocket league',
|
|
'league of legends', 'counter-strike', 'counter strike', 'cs2',
|
|
'overwatch', 'minecraft', 'dota 2', 'rainbow six siege'
|
|
]
|
|
|
|
|
|
demanding_games = [
|
|
'cyberpunk 2077', 'cyberpunk', 'red dead redemption 2', 'red dead redemption',
|
|
'assassin\'s creed valhalla', 'assassin\'s creed', 'flight simulator',
|
|
'control', 'metro exodus', 'crysis', 'star citizen', 'elden ring'
|
|
]
|
|
|
|
|
|
game_lower = game_name.lower()
|
|
|
|
if any(optimized_game in game_lower for optimized_game in well_optimized_games):
|
|
fps_modifier = 1.2
|
|
self.logger.info(f"Well-optimized game detected, applying 20% FPS boost")
|
|
elif any(demanding_game in game_lower for demanding_game in demanding_games):
|
|
fps_modifier = 0.8
|
|
self.logger.info(f"Demanding game detected, reducing FPS by 20%")
|
|
|
|
|
|
fps = int(fps * fps_modifier)
|
|
|
|
|
|
if 'unreal engine' in game_lower or 'unreal' in game_lower:
|
|
|
|
if gpu_score < 60:
|
|
fps = int(fps * 0.9)
|
|
elif gpu_score > 85:
|
|
fps = int(fps * 1.1)
|
|
elif 'unity' in game_lower:
|
|
|
|
if min(cpu_score, gpu_score) < 60:
|
|
fps = int(fps * 0.9)
|
|
|
|
|
|
min_gpu = game_requirements.get('minimum_gpu', '')
|
|
rec_gpu = game_requirements.get('recommended_gpu', '')
|
|
|
|
|
|
min_gpu_score = self._estimate_gpu_benchmark(min_gpu)
|
|
rec_gpu_score = self._estimate_gpu_benchmark(rec_gpu)
|
|
|
|
|
|
gpu_model = ""
|
|
if 'user_gpu_model' in game_requirements:
|
|
gpu_model = game_requirements.get('user_gpu_model', '')
|
|
else:
|
|
|
|
gpu_model = hardware['gpu']['cards'][0]['name'] if hardware['gpu']['cards'] else ''
|
|
|
|
user_gpu_benchmark = 0
|
|
|
|
for pattern, benchmark in self.calculator.gpu_benchmarks.items():
|
|
if re.search(pattern, gpu_model, re.IGNORECASE):
|
|
user_gpu_benchmark = benchmark
|
|
break
|
|
|
|
|
|
if min_gpu_score > 0 and rec_gpu_score > 0 and user_gpu_benchmark > 0:
|
|
|
|
min_ratio = user_gpu_benchmark / min_gpu_score if min_gpu_score > 0 else 1.0
|
|
|
|
|
|
rec_ratio = user_gpu_benchmark / rec_gpu_score if rec_gpu_score > 0 else 0.5
|
|
|
|
|
|
if min_ratio < 1.0:
|
|
|
|
fps = int(fps * min_ratio * 0.8)
|
|
self.logger.info(f"Below minimum requirements, reducing FPS to {fps}")
|
|
elif rec_ratio >= 1.5:
|
|
|
|
fps = int(fps * 1.3)
|
|
self.logger.info(f"Far exceeds recommended requirements, boosting FPS to {fps}")
|
|
elif rec_ratio >= 1.0:
|
|
|
|
fps = int(fps * 1.15)
|
|
self.logger.info(f"Exceeds recommended requirements, boosting FPS to {fps}")
|
|
else:
|
|
|
|
position = (min_ratio - 1.0) / (1.0 - rec_ratio)
|
|
fps_factor = 1.0 + (position * 0.15)
|
|
fps = int(fps * fps_factor)
|
|
self.logger.info(f"Between min and rec requirements, adjusted FPS to {fps}")
|
|
|
|
|
|
|
|
if gpu_score >= 90:
|
|
fps += 20
|
|
elif gpu_score >= 80:
|
|
fps += 10
|
|
elif gpu_score <= 50:
|
|
fps -= 10
|
|
|
|
|
|
if cpu_score >= 90:
|
|
fps += 5
|
|
elif cpu_score <= 50:
|
|
fps -= 5
|
|
|
|
|
|
return max(15, fps)
|
|
|
|
def _determine_recommendations(self, tier: PerformanceTier, score: int) -> Tuple[str, str]:
|
|
"""Determine recommended settings and resolution"""
|
|
if tier == PerformanceTier.S:
|
|
return "Ultra/Maximum", "4K (3840x2160)"
|
|
elif tier == PerformanceTier.A:
|
|
return "High", "1440p (2560x1440)"
|
|
elif tier == PerformanceTier.B:
|
|
return "High", "1080p (1920x1080)"
|
|
elif tier == PerformanceTier.C:
|
|
return "Medium", "1080p (1920x1080)"
|
|
elif tier == PerformanceTier.D:
|
|
return "Low", "720p (1280x720)"
|
|
else:
|
|
return "Minimum", "720p (1280x720)"
|
|
|
|
def _identify_bottlenecks(self, scores: Dict) -> List[str]:
|
|
"""Identify system bottlenecks"""
|
|
bottlenecks = []
|
|
|
|
|
|
min_score = min(scores.values())
|
|
avg_score = sum(scores.values()) / len(scores)
|
|
|
|
for component, score in scores.items():
|
|
if score <= min_score + 5 and score < avg_score - 10:
|
|
bottlenecks.append(component.upper())
|
|
|
|
return bottlenecks
|
|
|
|
def _generate_upgrade_suggestions(self, hardware: Dict, scores: Dict, tier: PerformanceTier) -> List[str]:
|
|
"""Generate hardware upgrade suggestions for NVIDIA systems"""
|
|
suggestions = []
|
|
|
|
|
|
if scores['gpu'] < 70:
|
|
if tier == PerformanceTier.F or tier == PerformanceTier.D:
|
|
suggestions.append("GPU upgrade essential - Consider RTX 3060 or RTX 4060")
|
|
elif tier == PerformanceTier.C:
|
|
suggestions.append("GPU upgrade recommended - Consider RTX 3070 or RTX 4070")
|
|
|
|
|
|
if scores['cpu'] < 65:
|
|
suggestions.append("CPU upgrade recommended for better performance")
|
|
|
|
|
|
ram_gb = hardware['ram']['total']
|
|
if ram_gb < 16:
|
|
suggestions.append("Upgrade to 16GB+ RAM for optimal performance")
|
|
elif ram_gb < 32 and tier == PerformanceTier.S:
|
|
suggestions.append("Consider 32GB RAM for maximum performance")
|
|
|
|
|
|
gpu_name = hardware['gpu']['cards'][0]['name'] if hardware['gpu']['cards'] else ''
|
|
if 'rtx' in gpu_name.lower():
|
|
suggestions.append("Enable DLSS for better performance with RTX cards")
|
|
if any(series in gpu_name.lower() for series in ['rtx 20', 'rtx 30', 'rtx 40']):
|
|
suggestions.append("Consider enabling RTX ray tracing for enhanced visuals")
|
|
|
|
return suggestions
|
|
|
|
def _estimate_gpu_benchmark(self, gpu_name: str) -> int:
|
|
"""
|
|
Estimate GPU benchmark score from name string using pattern matching.
|
|
|
|
Args:
|
|
gpu_name: Name of the GPU from requirements
|
|
|
|
Returns:
|
|
Estimated benchmark score (0 if can't estimate)
|
|
"""
|
|
if not gpu_name or not isinstance(gpu_name, str):
|
|
return 0
|
|
|
|
gpu_name = gpu_name.lower()
|
|
|
|
|
|
for pattern, benchmark in self.calculator.gpu_benchmarks.items():
|
|
if re.search(pattern, gpu_name, re.IGNORECASE):
|
|
self.logger.debug(f"GPU requirement '{gpu_name}' matched pattern '{pattern}' with score {benchmark}")
|
|
return benchmark
|
|
|
|
|
|
if 'rtx' in gpu_name:
|
|
if '4090' in gpu_name or '4080' in gpu_name:
|
|
return 40000
|
|
elif '40' in gpu_name:
|
|
return 30000
|
|
elif '3090' in gpu_name or '3080' in gpu_name:
|
|
return 35000
|
|
elif '30' in gpu_name:
|
|
return 25000
|
|
elif '20' in gpu_name:
|
|
return 18000
|
|
else:
|
|
return 20000
|
|
elif 'gtx' in gpu_name:
|
|
if '16' in gpu_name:
|
|
return 8000
|
|
elif '1080' in gpu_name or '1070' in gpu_name:
|
|
return 10000
|
|
elif '10' in gpu_name:
|
|
return 6000
|
|
else:
|
|
return 5000
|
|
elif 'nvidia' in gpu_name:
|
|
return 10000
|
|
elif 'amd' in gpu_name or 'radeon' in gpu_name:
|
|
if 'rx 7' in gpu_name:
|
|
return 30000
|
|
elif 'rx 6' in gpu_name:
|
|
return 20000
|
|
elif 'rx' in gpu_name:
|
|
return 10000
|
|
else:
|
|
return 8000
|
|
|
|
|
|
return 5000 |