File size: 40,308 Bytes
d86b25e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 |
"""
Dynamic Performance Prediction Module for CanRun
Advanced tiered performance predictions (S-A-B-C-D-F) with dynamic hardware detection and real-time benchmarking.
Focus on NVIDIA RTX/GTX systems with comprehensive laptop support.
"""
import logging
import re
import requests
import platform
import psutil
from typing import Dict, List, Optional, Tuple
from dataclasses import dataclass
from enum import Enum
try:
import GPUtil
except ImportError:
GPUtil = None
try:
import wmi # Windows only
except ImportError:
wmi = None
class PerformanceTier(Enum):
"""Performance tier classifications"""
S = (90, 100, "Exceptional - Ultra settings, 4K@60fps+")
A = (80, 89, "Excellent - High settings, 1440p@60fps")
B = (70, 79, "Good - High settings, 1080p@60fps")
C = (60, 69, "Adequate - Medium settings, 1080p@30fps")
D = (50, 59, "Minimum - Low settings, 720p@30fps")
F = (0, 49, "Below Minimum - Unable to run acceptably")
@dataclass
class PerformanceAssessment:
"""Complete performance assessment result with S-A-B-C-D-F tier system"""
score: int
tier: PerformanceTier
tier_description: str
expected_fps: int
recommended_settings: str
recommended_resolution: str
bottlenecks: list
upgrade_suggestions: list
class HardwareDetector:
"""Dynamic hardware detection with cross-platform support"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self.system_info = {}
def detect_all(self) -> Dict:
"""Detect all hardware components"""
self.system_info = {
'cpu': self._detect_cpu(),
'gpu': self._detect_gpu(),
'ram': self._detect_ram(),
'os': self._detect_os()
}
return self.system_info
def _detect_cpu(self) -> Dict:
"""Detect CPU information"""
cpu_info = {
'name': platform.processor(),
'cores': psutil.cpu_count(logical=False),
'threads': psutil.cpu_count(logical=True),
'frequency': psutil.cpu_freq().max if psutil.cpu_freq() else 0
}
# Windows-specific detailed CPU info
if platform.system() == 'Windows' and wmi:
try:
c = wmi.WMI()
for processor in c.Win32_Processor():
cpu_info['name'] = processor.Name
cpu_info['max_clock_speed'] = processor.MaxClockSpeed
except Exception as e:
self.logger.debug(f"WMI CPU detection failed: {e}")
return cpu_info
def _detect_gpu(self) -> Dict:
"""Detect GPU information - NVIDIA focus"""
gpu_info = {'cards': []}
# Try GPUtil first
if GPUtil:
try:
gpus = GPUtil.getGPUs()
for gpu in gpus:
# Filter for NVIDIA only
if 'nvidia' in gpu.name.lower() or 'geforce' in gpu.name.lower() or 'rtx' in gpu.name.lower() or 'gtx' in gpu.name.lower():
gpu_info['cards'].append({
'name': gpu.name,
'memory': gpu.memoryTotal,
'driver': gpu.driver
})
except Exception as e:
self.logger.debug(f"GPUtil detection failed: {e}")
# Windows WMI fallback
if not gpu_info['cards'] and platform.system() == 'Windows' and wmi:
try:
c = wmi.WMI()
for gpu in c.Win32_VideoController():
if gpu.Name and gpu.AdapterRAM:
gpu_name = gpu.Name.lower()
# Filter for NVIDIA only
if any(keyword in gpu_name for keyword in ['nvidia', 'geforce', 'rtx', 'gtx']):
gpu_info['cards'].append({
'name': gpu.Name,
'memory': gpu.AdapterRAM / (1024**3) if gpu.AdapterRAM else 0,
'driver': gpu.DriverVersion
})
except Exception as e:
self.logger.debug(f"WMI GPU detection failed: {e}")
return gpu_info
def _detect_ram(self) -> Dict:
"""Detect RAM information"""
memory = psutil.virtual_memory()
return {
'total': memory.total / (1024**3), # Convert to GB
'available': memory.available / (1024**3),
'used_percent': memory.percent
}
def _detect_os(self) -> Dict:
"""Detect OS information"""
return {
'system': platform.system(),
'release': platform.release(),
'version': platform.version(),
'architecture': platform.machine()
}
class PerformanceCalculator:
"""Dynamic performance calculation with pattern matching for NVIDIA GPUs"""
def __init__(self):
self.logger = logging.getLogger(__name__)
# CPU benchmark scores (approximate PassMark scores)
self.cpu_benchmarks = self._load_cpu_benchmarks()
# GPU benchmark scores (approximate PassMark G3D scores) - NVIDIA only
self.gpu_benchmarks = self._load_gpu_benchmarks()
def _load_cpu_benchmarks(self) -> Dict[str, int]:
"""Load CPU benchmark data dynamically using pattern matching"""
return {
# Intel 13th Gen
r'i9-1[3-4]\d{3}[A-Z]*': 35000,
r'i7-1[3-4]\d{3}[A-Z]*': 25000,
r'i5-1[3-4]\d{3}[A-Z]*': 15000,
r'i3-1[3-4]\d{3}[A-Z]*': 8000,
# Intel 12th Gen
r'i9-12\d{3}[A-Z]*': 32000,
r'i7-12\d{3}[A-Z]*': 22000,
r'i5-12\d{3}[A-Z]*': 13000,
r'i3-12\d{3}[A-Z]*': 7000,
# AMD Ryzen 7000
r'Ryzen 9 7\d{3}[A-Z]*': 35000,
r'Ryzen 7 7800X3D': 38000, # Special case for 3D V-Cache
r'Ryzen 7 7\d{3}[A-Z]*': 25000,
r'Ryzen 5 7\d{3}[A-Z]*': 15000,
r'Ryzen 3 7\d{3}[A-Z]*': 8000,
# AMD Ryzen 5000
r'Ryzen 9 5\d{3}[A-Z]*': 30000,
r'Ryzen 7 5800X3D': 32000, # Special case for 3D V-Cache
r'Ryzen 7 5\d{3}[A-Z]*': 22000,
r'Ryzen 5 5\d{3}[A-Z]*': 13000,
r'Ryzen 3 5\d{3}[A-Z]*': 7000,
# Apple Silicon
r'Apple M[1-4]': 20000,
r'Apple M[1-4] Pro': 30000,
r'Apple M[1-4] Max': 40000,
}
def _load_gpu_benchmarks(self) -> Dict[str, int]:
"""Load NVIDIA GPU benchmark data with RTX 5000 series and laptop variants"""
return {
# NVIDIA RTX 50 Series (Future-proofing)
r'RTX 5090': 55000,
r'RTX 5080': 48000,
r'RTX 507[0-9]': 40000,
r'RTX 506[0-9]': 32000,
# NVIDIA RTX 40 Series Desktop
r'RTX 4090': 45000,
r'RTX 4080 Super': 40000,
r'RTX 4080': 38000,
r'RTX 4070 Ti Super': 35000,
r'RTX 4070 Ti': 32000,
r'RTX 4070 Super': 30000,
r'RTX 4070': 28000,
r'RTX 4060 Ti': 22000,
r'RTX 4060': 18000,
# NVIDIA RTX 40 Series Laptop
r'RTX 4090 Laptop': 38000,
r'RTX 4080 Laptop': 32000,
r'RTX 4070 Laptop': 24000,
r'RTX 4060 Laptop': 16000,
r'RTX 4050 Laptop': 12000,
# NVIDIA RTX 30 Series Desktop
r'RTX 3090 Ti': 39000,
r'RTX 3090': 35000,
r'RTX 3080 Ti': 32000,
r'RTX 3080': 28000,
r'RTX 3070 Ti': 25000,
r'RTX 3070': 22000,
r'RTX 3060 Ti': 18000,
r'RTX 3060': 15000,
r'RTX 3050': 10000,
# NVIDIA RTX 30 Series Laptop
r'RTX 3080 Ti Laptop': 26000,
r'RTX 3080 Laptop': 22000,
r'RTX 3070 Ti Laptop': 20000,
r'RTX 3070 Laptop': 18000,
r'RTX 3060 Laptop': 13000,
r'RTX 3050 Ti Laptop': 9000,
r'RTX 3050 Laptop': 8000,
# NVIDIA RTX 20 Series Desktop
r'RTX 2080 Ti': 20000,
r'RTX 2080 Super': 18000,
r'RTX 2080': 16000,
r'RTX 2070 Super': 15000,
r'RTX 2070': 13000,
r'RTX 2060 Super': 12000,
r'RTX 2060': 10000,
# NVIDIA RTX 20 Series Laptop
r'RTX 2080 Laptop': 14000,
r'RTX 2070 Laptop': 11000,
r'RTX 2060 Laptop': 9000,
# NVIDIA GTX 16 Series Desktop
r'GTX 1660 Ti': 9000,
r'GTX 1660 Super': 8500,
r'GTX 1660': 8000,
r'GTX 1650 Super': 7000,
r'GTX 1650': 6000,
# NVIDIA GTX 16 Series Laptop
r'GTX 1660 Ti Laptop': 7500,
r'GTX 1660 Ti Mobile': 7500,
r'GTX 1650 Ti Laptop': 5500,
r'GTX 1650 Laptop': 5000,
# NVIDIA GTX 10 Series Desktop
r'GTX 1080 Ti': 12000,
r'GTX 1080': 10000,
r'GTX 1070 Ti': 9000,
r'GTX 1070': 8000,
r'GTX 1060 6GB': 6500,
r'GTX 1060 3GB': 5500,
r'GTX 1060': 6000,
r'GTX 1050 Ti': 4000,
r'GTX 1050': 3000,
# NVIDIA GTX 10 Series Laptop
r'GTX 1080 Laptop': 8500,
r'GTX 1070 Laptop': 7000,
r'GTX 1060 Laptop': 5000,
r'GTX 1050 Ti Laptop': 3200,
r'GTX 1050 Laptop': 2500,
# NVIDIA GTX 9 Series
r'GTX 980 Ti': 7000,
r'GTX 980': 6000,
r'GTX 970': 5000,
r'GTX 960': 3500,
r'GTX 950': 2500,
# Generic patterns for unmatched cards
r'GeForce.*RTX': 15000, # Generic RTX fallback
r'GeForce.*GTX': 5000, # Generic GTX fallback
r'NVIDIA.*RTX': 15000, # Generic NVIDIA RTX
r'NVIDIA.*GTX': 5000, # Generic NVIDIA GTX
}
def calculate_cpu_score(self, cpu_info: Dict, requirements: Dict) -> float:
"""Calculate CPU performance score (0-100)"""
cpu_name = cpu_info.get('name', '')
cpu_score = 0
self.logger.debug(f"Calculating CPU score for: {cpu_name}")
# Find matching benchmark using pattern matching
for pattern, benchmark in self.cpu_benchmarks.items():
if re.search(pattern, cpu_name, re.IGNORECASE):
cpu_score = benchmark
self.logger.debug(f"CPU matched pattern '{pattern}' with score {benchmark}")
break
# Fallback: estimate based on cores and frequency
if cpu_score == 0:
cores = cpu_info.get('cores', 1)
freq = cpu_info.get('frequency', 2000)
cpu_score = cores * freq * 2.5 # Rough estimation
self.logger.debug(f"CPU fallback estimation: {cores} cores * {freq} MHz = {cpu_score}")
# Normalize against requirements
req_cpu = requirements.get('recommended', {}).get('processor', '')
req_score = self._estimate_required_cpu_score(req_cpu)
final_score = min(100, (cpu_score / req_score) * 100) if req_score > 0 else 75
self.logger.debug(f"Final CPU score: {final_score}")
return final_score
def calculate_gpu_score(self, gpu_info: Dict, requirements: Dict) -> float:
"""Calculate NVIDIA GPU performance score (0-100)"""
if not gpu_info.get('cards'):
self.logger.warning("No NVIDIA GPU detected")
return 0
gpu_name = gpu_info['cards'][0].get('name', '')
gpu_score = 0
self.logger.debug(f"Calculating GPU score for: {gpu_name}")
# Find matching benchmark using pattern matching
for pattern, benchmark in self.gpu_benchmarks.items():
if re.search(pattern, gpu_name, re.IGNORECASE):
gpu_score = benchmark
self.logger.debug(f"GPU matched pattern '{pattern}' with score {benchmark}")
break
# Fallback: estimate based on memory and generation
if gpu_score == 0:
memory = gpu_info['cards'][0].get('memory', 0)
# Estimate based on VRAM and naming patterns
if 'rtx' in gpu_name.lower():
if '40' in gpu_name: # RTX 40 series
gpu_score = memory * 3000
elif '30' in gpu_name: # RTX 30 series
gpu_score = memory * 2500
elif '20' in gpu_name: # RTX 20 series
gpu_score = memory * 2000
else:
gpu_score = memory * 2200 # Generic RTX
elif 'gtx' in gpu_name.lower():
if '16' in gpu_name: # GTX 16 series
gpu_score = memory * 1500
elif '10' in gpu_name: # GTX 10 series
gpu_score = memory * 1200
else:
gpu_score = memory * 1000 # Older GTX
else:
gpu_score = memory * 1500 # Generic NVIDIA
self.logger.debug(f"GPU fallback estimation: {memory}GB * multiplier = {gpu_score}")
# Normalize against requirements
req_gpu = requirements.get('recommended', {}).get('graphics', '')
req_score = self._estimate_required_gpu_score(req_gpu)
final_score = min(100, (gpu_score / req_score) * 100) if req_score > 0 else 75
self.logger.debug(f"Final GPU score: {final_score}")
return final_score
def calculate_ram_score(self, ram_info: Dict, requirements: Dict) -> float:
"""Calculate RAM performance score (0-100)"""
available_ram = ram_info.get('total', 0)
required_ram = requirements.get('recommended', {}).get('memory', 8)
if required_ram == 0:
required_ram = requirements.get('minimum', {}).get('memory', 4)
score = min(100, (available_ram / required_ram) * 100)
self.logger.debug(f"RAM score: {available_ram}GB / {required_ram}GB = {score}")
return score
def _estimate_required_cpu_score(self, cpu_string: str) -> int:
"""Estimate required CPU score from string"""
patterns = {
r'i9|Ryzen 9': 30000,
r'i7|Ryzen 7': 20000,
r'i5|Ryzen 5': 12000,
r'i3|Ryzen 3': 6000,
r'Core 2 Duo|Dual.?Core': 2000,
r'Quad.?Core': 4000,
}
for pattern, score in patterns.items():
if re.search(pattern, cpu_string, re.IGNORECASE):
return score
return 8000 # Default middle-range requirement
def _estimate_required_gpu_score(self, gpu_string: str) -> int:
"""Estimate required NVIDIA GPU score from string"""
patterns = {
r'RTX 50\d{2}': 40000,
r'RTX 40\d{2}': 30000,
r'RTX 30\d{2}': 20000,
r'RTX 20\d{2}': 12000,
r'GTX 16\d{2}': 8000,
r'GTX 10\d{2}': 6000,
r'GTX 9\d{2}': 4000,
}
for pattern, score in patterns.items():
if re.search(pattern, gpu_string, re.IGNORECASE):
return score
return 8000 # Default middle-range requirement
class DynamicPerformancePredictor:
"""Dynamic performance predictor with real-time hardware detection for NVIDIA systems"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self.hardware_detector = HardwareDetector()
self.calculator = PerformanceCalculator()
# Component weights as per CanRun spec
self.weights = {
'gpu': 0.60,
'cpu': 0.25,
'ram': 0.15
}
self.logger.info("Dynamic performance predictor initialized for NVIDIA RTX/GTX systems")
def assess_performance(self, hardware_specs: Dict = None, game_requirements: Dict = None) -> PerformanceAssessment:
"""
Generate advanced tiered performance assessment using dynamic detection.
Args:
hardware_specs: Optional pre-detected hardware specs
game_requirements: Optional game requirements from Steam API
Returns:
PerformanceAssessment with tier, score, FPS, and recommendations
"""
self.logger.info("Generating dynamic performance assessment")
# Detect hardware if not provided
if hardware_specs is None:
hardware = self.hardware_detector.detect_all()
else:
# Convert from CanRun format to dynamic format
hardware = {
'cpu': {
'name': hardware_specs.get('cpu_model', ''),
'cores': hardware_specs.get('cpu_cores', 4),
'threads': hardware_specs.get('cpu_threads', 8),
'frequency': hardware_specs.get('cpu_frequency', 3000)
},
'gpu': {
'cards': [{
'name': hardware_specs.get('gpu_model', ''),
'memory': hardware_specs.get('gpu_vram_gb', 4),
'driver': 'Unknown'
}]
},
'ram': {
'total': hardware_specs.get('ram_total_gb', 8),
'available': hardware_specs.get('ram_total_gb', 8) * 0.7,
'used_percent': 30
}
}
# Calculate individual component scores
scores = {
'cpu': self.calculator.calculate_cpu_score(hardware['cpu'], game_requirements or {}),
'gpu': self.calculator.calculate_gpu_score(hardware['gpu'], game_requirements or {}),
'ram': self.calculator.calculate_ram_score(hardware['ram'], game_requirements or {})
}
self.logger.debug(f"Component scores: {scores}")
# Calculate base weighted total score
base_score = int(
scores['gpu'] * self.weights['gpu'] +
scores['cpu'] * self.weights['cpu'] +
scores['ram'] * self.weights['ram']
)
# Apply adjustments based on minimum vs recommended specs comparison
total_score = base_score
if game_requirements:
# Extract minimum and recommended specs
min_gpu = game_requirements.get('minimum_gpu', '')
rec_gpu = game_requirements.get('recommended_gpu', '')
min_cpu = game_requirements.get('minimum_cpu', '')
rec_cpu = game_requirements.get('recommended_cpu', '')
min_ram = game_requirements.get('minimum_ram_gb', 8)
rec_ram = game_requirements.get('recommended_ram_gb', 16)
# Calculate how much user's hardware exceeds minimum and approaches recommended specs
min_exceeded_factor = 0
rec_approach_factor = 0
# GPU comparison - get benchmark scores
user_gpu_score = scores['gpu']
min_gpu_benchmark = self._estimate_gpu_benchmark(min_gpu)
rec_gpu_benchmark = self._estimate_gpu_benchmark(rec_gpu)
if min_gpu_benchmark > 0 and rec_gpu_benchmark > 0:
# Calculate factors based on how much user's GPU exceeds minimum and approaches recommended
if user_gpu_score > min_gpu_benchmark:
min_exceeded_factor += (user_gpu_score - min_gpu_benchmark) / min_gpu_benchmark
if rec_gpu_benchmark > min_gpu_benchmark:
rec_range = rec_gpu_benchmark - min_gpu_benchmark
user_in_range = user_gpu_score - min_gpu_benchmark
if user_in_range > 0:
rec_approach_factor += min(1.0, user_in_range / rec_range)
# Adjust total score based on how hardware compares to game-specific requirements
# If exceeding minimum by a lot, boost score
if min_exceeded_factor > 1.5:
total_score = min(100, int(total_score * 1.1))
elif min_exceeded_factor > 0.5:
total_score = min(100, int(total_score * 1.05))
# If approaching or exceeding recommended specs, boost score further
if rec_approach_factor > 0.8:
total_score = min(100, int(total_score * 1.1))
elif rec_approach_factor > 0.5:
total_score = min(100, int(total_score * 1.05))
# If barely meeting minimum, reduce score
if min_exceeded_factor < 0.2:
total_score = max(0, int(total_score * 0.9))
# Apply more game-specific adjustments based on the actual game requirements
game_name = None
if game_requirements:
game_name = (
game_requirements.get('game_name', '') or
game_requirements.get('minimum_game', '') or
game_requirements.get('recommended_game', '') or
game_requirements.get('name', '')
)
# Analyze game requirements vs hardware for dynamic scoring
if game_name:
self.logger.info(f"Applying game-specific adjustments for {game_name}")
# Get specs for calculations
min_gpu = game_requirements.get('minimum_gpu', '')
rec_gpu = game_requirements.get('recommended_gpu', '')
gpu_model = hardware['gpu']['cards'][0]['name'] if hardware['gpu']['cards'] else ''
# Calculate more precise hardware vs requirements comparison
min_gpu_score = self._estimate_gpu_benchmark(min_gpu)
rec_gpu_score = self._estimate_gpu_benchmark(rec_gpu)
user_gpu_score = 0
# Find benchmark of user's GPU
for pattern, benchmark in self.calculator.gpu_benchmarks.items():
if re.search(pattern, gpu_model, re.IGNORECASE):
user_gpu_score = benchmark
break
# Log the scores for transparency
self.logger.info(f"Game: {game_name}, Min GPU Score: {min_gpu_score}, Rec GPU Score: {rec_gpu_score}, User GPU Score: {user_gpu_score}")
# Apply sophisticated tiering based on real hardware comparison
if min_gpu_score > 0 and user_gpu_score > 0:
# If below minimum requirements
if user_gpu_score < min_gpu_score:
# Set to F tier for below minimum
total_score = max(30, int(total_score * 0.65))
self.logger.info(f"Hardware below minimum requirements, reducing score to {total_score}")
# If between minimum and recommended
elif rec_gpu_score > min_gpu_score and user_gpu_score < rec_gpu_score:
# Set to C-B tier based on where in the range they fall
position = (user_gpu_score - min_gpu_score) / (rec_gpu_score - min_gpu_score)
tier_score = 60 + int(position * 20) # C to B range (60-80)
total_score = min(tier_score, total_score)
self.logger.info(f"Hardware between min and rec, setting score to {total_score}")
# If above recommended
elif user_gpu_score >= rec_gpu_score:
# How much above recommended?
exceed_factor = user_gpu_score / rec_gpu_score
if exceed_factor >= 1.5:
# Significantly above recommended - S tier
total_score = max(total_score, 90)
self.logger.info(f"Hardware well above rec, setting to S tier ({total_score})")
elif exceed_factor >= 1.2:
# Moderately above recommended - A tier
total_score = max(total_score, 80)
self.logger.info(f"Hardware above rec, setting to A tier ({total_score})")
else:
# Just above recommended - B tier
total_score = max(total_score, 70)
self.logger.info(f"Hardware meets rec, setting to B tier ({total_score})")
# Determine tier
tier = self._get_tier(total_score)
self.logger.info(f"Final performance tier: {tier.name} with score {total_score}")
# Calculate expected FPS with game-specific adjustments
expected_fps = self._calculate_expected_fps(tier, scores['gpu'], scores['cpu'], game_requirements)
# Determine settings and resolution
recommended_settings, recommended_resolution = self._determine_recommendations(tier, total_score)
# Identify bottlenecks
bottlenecks = self._identify_bottlenecks(scores)
# Generate upgrade suggestions
upgrade_suggestions = self._generate_upgrade_suggestions(hardware, scores, tier)
assessment = PerformanceAssessment(
score=total_score,
tier=tier,
tier_description=tier.value[2],
expected_fps=expected_fps,
recommended_settings=recommended_settings,
recommended_resolution=recommended_resolution,
bottlenecks=bottlenecks,
upgrade_suggestions=upgrade_suggestions
)
self.logger.info(f"Dynamic performance assessment: Score {assessment.score}, Tier {assessment.tier.name}")
return assessment
def _get_tier(self, score: float) -> PerformanceTier:
"""Convert score to tier"""
for tier in PerformanceTier:
min_score, max_score, _ = tier.value
if min_score <= score <= max_score:
return tier
return PerformanceTier.F
def _calculate_expected_fps(self, tier: PerformanceTier, gpu_score: float, cpu_score: float, game_requirements: Dict = None) -> int:
"""
Calculate expected FPS based on tier, component scores, and game-specific requirements
Args:
tier: Performance tier classification
gpu_score: GPU score (0-100)
cpu_score: CPU score (0-100)
game_requirements: Optional game requirements from Steam API
Returns:
Expected FPS value
"""
# Base FPS by tier - starting point
base_fps = {
PerformanceTier.S: 90,
PerformanceTier.A: 75,
PerformanceTier.B: 60,
PerformanceTier.C: 40,
PerformanceTier.D: 30,
PerformanceTier.F: 20
}
fps = base_fps.get(tier, 30)
# Game-specific adjustments if available
if game_requirements:
game_name = (
game_requirements.get('game_name', '') or
game_requirements.get('minimum_game', '') or
game_requirements.get('recommended_game', '') or
game_requirements.get('name', '')
)
if game_name:
self.logger.info(f"Calculating game-specific FPS for {game_name}")
# Check if the game is known to be well-optimized or demanding
fps_modifier = 1.0 # Default modifier
# List of known well-optimized games
well_optimized_games = [
'fortnite', 'valorant', 'apex legends', 'rocket league',
'league of legends', 'counter-strike', 'counter strike', 'cs2',
'overwatch', 'minecraft', 'dota 2', 'rainbow six siege'
]
# List of known demanding games
demanding_games = [
'cyberpunk 2077', 'cyberpunk', 'red dead redemption 2', 'red dead redemption',
'assassin\'s creed valhalla', 'assassin\'s creed', 'flight simulator',
'control', 'metro exodus', 'crysis', 'star citizen', 'elden ring'
]
# Apply game-specific adjustments
game_lower = game_name.lower()
if any(optimized_game in game_lower for optimized_game in well_optimized_games):
fps_modifier = 1.2 # 20% FPS boost for well-optimized games
self.logger.info(f"Well-optimized game detected, applying 20% FPS boost")
elif any(demanding_game in game_lower for demanding_game in demanding_games):
fps_modifier = 0.8 # 20% FPS reduction for demanding games
self.logger.info(f"Demanding game detected, reducing FPS by 20%")
# Modify the base FPS by game optimization factor
fps = int(fps * fps_modifier)
# Check for specific game engines
if 'unreal engine' in game_lower or 'unreal' in game_lower:
# Unreal Engine games tend to be more GPU-bound
if gpu_score < 60:
fps = int(fps * 0.9) # Further reduce for low-end GPUs
elif gpu_score > 85:
fps = int(fps * 1.1) # Boost for high-end GPUs
elif 'unity' in game_lower:
# Unity games are often more balanced between CPU and GPU
if min(cpu_score, gpu_score) < 60:
fps = int(fps * 0.9) # Reduce for balanced bottleneck
# Compare user's hardware to game requirements
min_gpu = game_requirements.get('minimum_gpu', '')
rec_gpu = game_requirements.get('recommended_gpu', '')
# Get benchmark scores
min_gpu_score = self._estimate_gpu_benchmark(min_gpu)
rec_gpu_score = self._estimate_gpu_benchmark(rec_gpu)
# Find user's GPU benchmark - use the actual hardware info, not from game_requirements
gpu_model = ""
if 'user_gpu_model' in game_requirements:
gpu_model = game_requirements.get('user_gpu_model', '')
else:
# Get from hardware data structure
gpu_model = hardware['gpu']['cards'][0]['name'] if hardware['gpu']['cards'] else ''
user_gpu_benchmark = 0
for pattern, benchmark in self.calculator.gpu_benchmarks.items():
if re.search(pattern, gpu_model, re.IGNORECASE):
user_gpu_benchmark = benchmark
break
# Calculate performance ratio if we have valid benchmarks
if min_gpu_score > 0 and rec_gpu_score > 0 and user_gpu_benchmark > 0:
# How much the user exceeds minimum requirements
min_ratio = user_gpu_benchmark / min_gpu_score if min_gpu_score > 0 else 1.0
# How close the user is to recommended requirements
rec_ratio = user_gpu_benchmark / rec_gpu_score if rec_gpu_score > 0 else 0.5
# Apply precise adjustments based on hardware vs requirements
if min_ratio < 1.0:
# Below minimum requirements - significant FPS reduction
fps = int(fps * min_ratio * 0.8)
self.logger.info(f"Below minimum requirements, reducing FPS to {fps}")
elif rec_ratio >= 1.5:
# Far exceeds recommended - significant FPS boost
fps = int(fps * 1.3)
self.logger.info(f"Far exceeds recommended requirements, boosting FPS to {fps}")
elif rec_ratio >= 1.0:
# Meets or exceeds recommended - moderate FPS boost
fps = int(fps * 1.15)
self.logger.info(f"Exceeds recommended requirements, boosting FPS to {fps}")
else:
# Between minimum and recommended - proportional adjustment
position = (min_ratio - 1.0) / (1.0 - rec_ratio)
fps_factor = 1.0 + (position * 0.15) # 0-15% boost
fps = int(fps * fps_factor)
self.logger.info(f"Between min and rec requirements, adjusted FPS to {fps}")
# Standard adjustments based on component scores
# Adjust based on GPU performance
if gpu_score >= 90:
fps += 20
elif gpu_score >= 80:
fps += 10
elif gpu_score <= 50:
fps -= 10
# Slight CPU adjustment
if cpu_score >= 90:
fps += 5
elif cpu_score <= 50:
fps -= 5
# Return with reasonable lower bound
return max(15, fps)
def _determine_recommendations(self, tier: PerformanceTier, score: int) -> Tuple[str, str]:
"""Determine recommended settings and resolution"""
if tier == PerformanceTier.S:
return "Ultra/Maximum", "4K (3840x2160)"
elif tier == PerformanceTier.A:
return "High", "1440p (2560x1440)"
elif tier == PerformanceTier.B:
return "High", "1080p (1920x1080)"
elif tier == PerformanceTier.C:
return "Medium", "1080p (1920x1080)"
elif tier == PerformanceTier.D:
return "Low", "720p (1280x720)"
else:
return "Minimum", "720p (1280x720)"
def _identify_bottlenecks(self, scores: Dict) -> List[str]:
"""Identify system bottlenecks"""
bottlenecks = []
# Find the lowest scoring component(s)
min_score = min(scores.values())
avg_score = sum(scores.values()) / len(scores)
for component, score in scores.items():
if score <= min_score + 5 and score < avg_score - 10:
bottlenecks.append(component.upper())
return bottlenecks
def _generate_upgrade_suggestions(self, hardware: Dict, scores: Dict, tier: PerformanceTier) -> List[str]:
"""Generate hardware upgrade suggestions for NVIDIA systems"""
suggestions = []
# GPU upgrades
if scores['gpu'] < 70:
if tier == PerformanceTier.F or tier == PerformanceTier.D:
suggestions.append("GPU upgrade essential - Consider RTX 3060 or RTX 4060")
elif tier == PerformanceTier.C:
suggestions.append("GPU upgrade recommended - Consider RTX 3070 or RTX 4070")
# CPU upgrades
if scores['cpu'] < 65:
suggestions.append("CPU upgrade recommended for better performance")
# RAM upgrades
ram_gb = hardware['ram']['total']
if ram_gb < 16:
suggestions.append("Upgrade to 16GB+ RAM for optimal performance")
elif ram_gb < 32 and tier == PerformanceTier.S:
suggestions.append("Consider 32GB RAM for maximum performance")
# DLSS/RTX suggestions
gpu_name = hardware['gpu']['cards'][0]['name'] if hardware['gpu']['cards'] else ''
if 'rtx' in gpu_name.lower():
suggestions.append("Enable DLSS for better performance with RTX cards")
if any(series in gpu_name.lower() for series in ['rtx 20', 'rtx 30', 'rtx 40']):
suggestions.append("Consider enabling RTX ray tracing for enhanced visuals")
return suggestions
def _estimate_gpu_benchmark(self, gpu_name: str) -> int:
"""
Estimate GPU benchmark score from name string using pattern matching.
Args:
gpu_name: Name of the GPU from requirements
Returns:
Estimated benchmark score (0 if can't estimate)
"""
if not gpu_name or not isinstance(gpu_name, str):
return 0
gpu_name = gpu_name.lower()
# First try exact pattern matching using the calculator's benchmarks
for pattern, benchmark in self.calculator.gpu_benchmarks.items():
if re.search(pattern, gpu_name, re.IGNORECASE):
self.logger.debug(f"GPU requirement '{gpu_name}' matched pattern '{pattern}' with score {benchmark}")
return benchmark
# If no exact match, use simplified estimation based on series detection
if 'rtx' in gpu_name:
if '4090' in gpu_name or '4080' in gpu_name:
return 40000 # High-end RTX 40 series
elif '40' in gpu_name:
return 30000 # Mid-range RTX 40 series
elif '3090' in gpu_name or '3080' in gpu_name:
return 35000 # High-end RTX 30 series
elif '30' in gpu_name:
return 25000 # Mid-range RTX 30 series
elif '20' in gpu_name:
return 18000 # RTX 20 series
else:
return 20000 # Generic RTX
elif 'gtx' in gpu_name:
if '16' in gpu_name:
return 8000 # GTX 16 series
elif '1080' in gpu_name or '1070' in gpu_name:
return 10000 # High-end GTX 10 series
elif '10' in gpu_name:
return 6000 # Mid-range GTX 10 series
else:
return 5000 # Generic GTX
elif 'nvidia' in gpu_name:
return 10000 # Generic NVIDIA
elif 'amd' in gpu_name or 'radeon' in gpu_name:
if 'rx 7' in gpu_name:
return 30000 # High-end AMD RX 7000
elif 'rx 6' in gpu_name:
return 20000 # AMD RX 6000
elif 'rx' in gpu_name:
return 10000 # Generic AMD RX
else:
return 8000 # Generic AMD
# Fallback for unknown GPU
return 5000 |