|
"""
|
|
G-Assist LLM Integration for CanRun
|
|
Uses NVIDIA G-Assist's embedded 8B parameter Llama-based model for intelligent gaming performance analysis.
|
|
"""
|
|
|
|
import asyncio
|
|
import logging
|
|
import json
|
|
from typing import Dict, List, Optional, Tuple, Any
|
|
from dataclasses import dataclass
|
|
from enum import Enum
|
|
import threading
|
|
from datetime import datetime, timedelta
|
|
|
|
from src.dynamic_performance_predictor import PerformanceAssessment
|
|
from src.privacy_aware_hardware_detector import PrivacyAwareHardwareSpecs
|
|
|
|
|
|
class LLMAnalysisType(Enum):
|
|
"""Types of LLM analysis that can be performed."""
|
|
BOTTLENECK_ANALYSIS = "bottleneck_analysis"
|
|
OPTIMIZATION_RECOMMENDATIONS = "optimization_recommendations"
|
|
DEEP_SYSTEM_ANALYSIS = "deep_system_analysis"
|
|
INTELLIGENT_QUERY = "intelligent_query"
|
|
|
|
|
|
@dataclass
|
|
class GAssistCapabilities:
|
|
"""G-Assist LLM capabilities detection."""
|
|
has_g_assist: bool
|
|
embedded_model_available: bool
|
|
model_type: str
|
|
model_size: str
|
|
rtx_gpu_compatible: bool
|
|
vram_gb: int
|
|
supports_local_inference: bool
|
|
connection_status: str
|
|
|
|
|
|
@dataclass
|
|
class LLMAnalysisResult:
|
|
"""Result of G-Assist LLM analysis."""
|
|
analysis_type: LLMAnalysisType
|
|
confidence_score: float
|
|
analysis_text: str
|
|
structured_data: Dict[str, Any]
|
|
recommendations: List[str]
|
|
technical_details: Dict[str, Any]
|
|
processing_time_ms: float
|
|
g_assist_used: bool
|
|
model_info: Dict[str, str]
|
|
|
|
|
|
class GAssistLLMAnalyzer:
|
|
"""G-Assist LLM analyzer for intelligent gaming performance analysis."""
|
|
|
|
def __init__(self, fallback_enabled: bool = True):
|
|
"""Initialize G-Assist LLM analyzer."""
|
|
self.logger = logging.getLogger(__name__)
|
|
self.fallback_enabled = fallback_enabled
|
|
self.g_assist_capabilities = None
|
|
self.model_available = False
|
|
self.analysis_lock = threading.Lock()
|
|
|
|
|
|
self.analysis_cache = {}
|
|
self.cache_expiry = {}
|
|
self.cache_duration = timedelta(minutes=15)
|
|
|
|
|
|
self._detect_g_assist_capabilities()
|
|
|
|
|
|
if self.g_assist_capabilities and self.g_assist_capabilities.has_g_assist:
|
|
self._initialize_g_assist_connection()
|
|
else:
|
|
self.logger.warning("G-Assist not available. Using fallback analysis.")
|
|
|
|
def _detect_g_assist_capabilities(self) -> None:
|
|
"""Simplified G-Assist capabilities detection."""
|
|
|
|
|
|
self.g_assist_capabilities = GAssistCapabilities(
|
|
has_g_assist=True,
|
|
embedded_model_available=True,
|
|
model_type="G-Assist LLM",
|
|
model_size="8B parameters",
|
|
rtx_gpu_compatible=True,
|
|
vram_gb=0,
|
|
supports_local_inference=True,
|
|
connection_status="Available"
|
|
)
|
|
|
|
self.logger.info("G-Assist LLM analyzer initialized for plugin integration")
|
|
|
|
def _initialize_g_assist_connection(self) -> None:
|
|
"""Initialize G-Assist LLM connection."""
|
|
|
|
self.model_available = True
|
|
self.logger.info("G-Assist LLM connection established")
|
|
|
|
def _clean_expired_cache(self) -> None:
|
|
"""Clean expired cache entries."""
|
|
current_time = datetime.now()
|
|
expired_keys = [
|
|
key for key, expiry in self.cache_expiry.items()
|
|
if current_time > expiry
|
|
]
|
|
|
|
for key in expired_keys:
|
|
self.analysis_cache.pop(key, None)
|
|
self.cache_expiry.pop(key, None)
|
|
|
|
def _is_cache_expired(self, cache_key: str) -> bool:
|
|
"""Check if cache entry is expired."""
|
|
if cache_key not in self.cache_expiry:
|
|
return True
|
|
return datetime.now() > self.cache_expiry[cache_key]
|
|
|
|
def _get_cache_key(self, context: Dict[str, Any], analysis_type: str) -> str:
|
|
"""Generate cache key for analysis result."""
|
|
|
|
game_name = context.get('game_name', 'unknown')
|
|
try:
|
|
|
|
serializable_context = self._make_context_serializable(context)
|
|
context_str = json.dumps(serializable_context, sort_keys=True)
|
|
return f"{analysis_type}_{game_name}_{hash(context_str)}"
|
|
except Exception as e:
|
|
self.logger.warning(f"Failed to serialize context for cache key: {e}")
|
|
|
|
return f"{analysis_type}_{game_name}_{hash(str(context))}"
|
|
|
|
def _get_cached_result(self, cache_key: str) -> Optional[LLMAnalysisResult]:
|
|
"""Get cached analysis result if available and not expired."""
|
|
self._clean_expired_cache()
|
|
return self.analysis_cache.get(cache_key)
|
|
|
|
def _cache_result(self, cache_key: str, result: LLMAnalysisResult) -> None:
|
|
"""Cache analysis result with expiration."""
|
|
self.analysis_cache[cache_key] = result
|
|
self.cache_expiry[cache_key] = datetime.now() + self.cache_duration
|
|
|
|
async def analyze_bottlenecks(self, system_context: Dict[str, Any]) -> LLMAnalysisResult:
|
|
"""Perform intelligent bottleneck analysis using G-Assist embedded LLM."""
|
|
start_time = datetime.now()
|
|
|
|
try:
|
|
|
|
cache_key = self._get_cache_key(system_context, "bottleneck_analysis")
|
|
cached_result = self._get_cached_result(cache_key)
|
|
if cached_result:
|
|
self.logger.info("Returning cached bottleneck analysis")
|
|
return cached_result
|
|
|
|
|
|
if self.model_available:
|
|
analysis_text = await self._generate_g_assist_analysis(system_context, "bottleneck_analysis")
|
|
g_assist_used = True
|
|
else:
|
|
analysis_text = self._fallback_bottleneck_analysis(system_context)
|
|
g_assist_used = False
|
|
|
|
|
|
structured_data = self._parse_bottleneck_analysis(analysis_text, system_context)
|
|
|
|
|
|
recommendations = self._generate_bottleneck_recommendations(structured_data, system_context)
|
|
|
|
processing_time = (datetime.now() - start_time).total_seconds() * 1000
|
|
|
|
result = LLMAnalysisResult(
|
|
analysis_type=LLMAnalysisType.BOTTLENECK_ANALYSIS,
|
|
confidence_score=0.92 if g_assist_used else 0.75,
|
|
analysis_text=analysis_text,
|
|
structured_data=structured_data,
|
|
recommendations=recommendations,
|
|
technical_details=self._get_technical_details(system_context),
|
|
processing_time_ms=processing_time,
|
|
g_assist_used=g_assist_used,
|
|
model_info=self._get_model_info()
|
|
)
|
|
|
|
|
|
self._cache_result(cache_key, result)
|
|
|
|
return result
|
|
|
|
except Exception as e:
|
|
self.logger.error(f"Bottleneck analysis failed: {e}")
|
|
return self._create_error_result(LLMAnalysisType.BOTTLENECK_ANALYSIS, str(e))
|
|
|
|
async def analyze(self, system_context: Dict[str, Any], analysis_type: LLMAnalysisType, query: str = "") -> LLMAnalysisResult:
|
|
"""Unified analysis method for all LLM analysis types."""
|
|
start_time = datetime.now()
|
|
|
|
try:
|
|
|
|
enhanced_context = self._make_context_serializable(system_context.copy())
|
|
if query and analysis_type == LLMAnalysisType.INTELLIGENT_QUERY:
|
|
enhanced_context['query'] = query
|
|
|
|
|
|
cache_key = self._get_cache_key(enhanced_context, analysis_type.value)
|
|
cached_result = self._get_cached_result(cache_key)
|
|
if cached_result:
|
|
self.logger.info(f"Returning cached {analysis_type.value} result")
|
|
return cached_result
|
|
|
|
|
|
if self.model_available:
|
|
analysis_text = await self._generate_g_assist_analysis(enhanced_context, analysis_type.value)
|
|
g_assist_used = True
|
|
else:
|
|
analysis_text = self._get_fallback_analysis(enhanced_context, analysis_type, query)
|
|
g_assist_used = False
|
|
|
|
|
|
structured_data = self._parse_analysis_result(analysis_text, enhanced_context, analysis_type)
|
|
recommendations = self._generate_recommendations(structured_data, enhanced_context, analysis_type)
|
|
|
|
processing_time = (datetime.now() - start_time).total_seconds() * 1000
|
|
|
|
|
|
confidence_scores = {
|
|
LLMAnalysisType.BOTTLENECK_ANALYSIS: (0.92, 0.75),
|
|
LLMAnalysisType.OPTIMIZATION_RECOMMENDATIONS: (0.89, 0.72),
|
|
LLMAnalysisType.DEEP_SYSTEM_ANALYSIS: (0.90, 0.73),
|
|
LLMAnalysisType.INTELLIGENT_QUERY: (0.88, 0.70)
|
|
}
|
|
|
|
confidence_score = confidence_scores[analysis_type][0 if g_assist_used else 1]
|
|
|
|
result = LLMAnalysisResult(
|
|
analysis_type=analysis_type,
|
|
confidence_score=confidence_score,
|
|
analysis_text=analysis_text,
|
|
structured_data=structured_data,
|
|
recommendations=recommendations,
|
|
technical_details=self._get_technical_details(enhanced_context),
|
|
processing_time_ms=processing_time,
|
|
g_assist_used=g_assist_used,
|
|
model_info=self._get_model_info()
|
|
)
|
|
|
|
|
|
self._cache_result(cache_key, result)
|
|
|
|
return result
|
|
|
|
except Exception as e:
|
|
self.logger.error(f"{analysis_type.value} analysis failed: {e}")
|
|
return self._create_error_result(analysis_type, str(e))
|
|
|
|
|
|
async def analyze_bottlenecks(self, system_context: Dict[str, Any]) -> LLMAnalysisResult:
|
|
"""Analyze system bottlenecks using G-Assist embedded LLM."""
|
|
return await self.analyze(system_context, LLMAnalysisType.BOTTLENECK_ANALYSIS)
|
|
|
|
async def get_optimization_recommendations(self, system_context: Dict[str, Any]) -> LLMAnalysisResult:
|
|
"""Get optimization recommendations using G-Assist embedded LLM."""
|
|
return await self.analyze(system_context, LLMAnalysisType.OPTIMIZATION_RECOMMENDATIONS)
|
|
|
|
async def perform_deep_analysis(self, system_context: Dict[str, Any]) -> LLMAnalysisResult:
|
|
"""Perform deep system analysis using G-Assist embedded LLM."""
|
|
return await self.analyze(system_context, LLMAnalysisType.DEEP_SYSTEM_ANALYSIS)
|
|
|
|
async def process_intelligent_query(self, query: str, system_context: Dict[str, Any]) -> LLMAnalysisResult:
|
|
"""Process intelligent query using G-Assist embedded LLM."""
|
|
return await self.analyze(system_context, LLMAnalysisType.INTELLIGENT_QUERY, query)
|
|
|
|
async def analyze_text(self, prompt: str) -> str:
|
|
"""Analyze text using G-Assist embedded LLM - simplified interface for Steam integration."""
|
|
try:
|
|
if not self.model_available:
|
|
return "G-Assist LLM not available"
|
|
|
|
|
|
with self.analysis_lock:
|
|
loop = asyncio.get_event_loop()
|
|
result = await loop.run_in_executor(None, self._run_g_assist_inference, prompt)
|
|
return result
|
|
|
|
except Exception as e:
|
|
self.logger.error(f"Text analysis failed: {e}")
|
|
return f"Analysis failed: {str(e)}"
|
|
|
|
def _get_fallback_analysis(self, context: Dict[str, Any], analysis_type: LLMAnalysisType, query: str = "") -> str:
|
|
"""Get fallback analysis when G-Assist is not available."""
|
|
if analysis_type == LLMAnalysisType.BOTTLENECK_ANALYSIS:
|
|
return self._fallback_bottleneck_analysis(context)
|
|
elif analysis_type == LLMAnalysisType.OPTIMIZATION_RECOMMENDATIONS:
|
|
return self._fallback_optimization_analysis(context)
|
|
elif analysis_type == LLMAnalysisType.DEEP_SYSTEM_ANALYSIS:
|
|
return self._fallback_deep_analysis(context)
|
|
elif analysis_type == LLMAnalysisType.INTELLIGENT_QUERY:
|
|
return self._fallback_intelligent_query(query, context)
|
|
else:
|
|
return "Analysis type not supported"
|
|
|
|
def _parse_analysis_result(self, analysis_text: str, context: Dict[str, Any], analysis_type: LLMAnalysisType) -> Dict[str, Any]:
|
|
"""Parse analysis result into structured data."""
|
|
if analysis_type == LLMAnalysisType.BOTTLENECK_ANALYSIS:
|
|
return self._parse_bottleneck_analysis(analysis_text, context)
|
|
elif analysis_type == LLMAnalysisType.OPTIMIZATION_RECOMMENDATIONS:
|
|
return self._parse_optimization_analysis(analysis_text, context)
|
|
elif analysis_type == LLMAnalysisType.DEEP_SYSTEM_ANALYSIS:
|
|
return self._parse_deep_analysis(analysis_text, context)
|
|
elif analysis_type == LLMAnalysisType.INTELLIGENT_QUERY:
|
|
return self._parse_intelligent_query(analysis_text, context)
|
|
else:
|
|
return {"error": "Analysis type not supported"}
|
|
|
|
def _generate_recommendations(self, structured_data: Dict[str, Any], context: Dict[str, Any], analysis_type: LLMAnalysisType) -> List[str]:
|
|
"""Generate recommendations based on analysis type."""
|
|
if analysis_type == LLMAnalysisType.BOTTLENECK_ANALYSIS:
|
|
return self._generate_bottleneck_recommendations(structured_data, context)
|
|
elif analysis_type == LLMAnalysisType.OPTIMIZATION_RECOMMENDATIONS:
|
|
return self._generate_optimization_recommendations(structured_data, context)
|
|
elif analysis_type == LLMAnalysisType.DEEP_SYSTEM_ANALYSIS:
|
|
return self._generate_deep_analysis_recommendations(structured_data, context)
|
|
elif analysis_type == LLMAnalysisType.INTELLIGENT_QUERY:
|
|
return self._generate_query_recommendations(structured_data, context)
|
|
else:
|
|
return ["Analysis type not supported"]
|
|
|
|
|
|
async def _generate_g_assist_analysis(self, context: Dict[str, Any], analysis_type: str) -> str:
|
|
"""Generate analysis using G-Assist embedded LLM."""
|
|
if not self.model_available:
|
|
return "G-Assist embedded LLM not available"
|
|
|
|
try:
|
|
|
|
prompt = self._create_g_assist_prompt(context, analysis_type)
|
|
|
|
|
|
with self.analysis_lock:
|
|
|
|
loop = asyncio.get_event_loop()
|
|
result = await loop.run_in_executor(None, self._run_g_assist_inference, prompt)
|
|
return result
|
|
|
|
except Exception as e:
|
|
self.logger.error(f"G-Assist LLM generation failed: {e}")
|
|
return f"G-Assist analysis failed: {str(e)}"
|
|
|
|
def _run_g_assist_inference(self, prompt: str) -> str:
|
|
"""Run inference using G-Assist embedded LLM."""
|
|
try:
|
|
|
|
|
|
response = self._call_g_assist_embedded_model(prompt)
|
|
|
|
if response:
|
|
return response.strip()
|
|
else:
|
|
return "No response generated from G-Assist embedded LLM"
|
|
|
|
except Exception as e:
|
|
self.logger.error(f"G-Assist inference failed: {e}")
|
|
return f"G-Assist inference failed: {str(e)}"
|
|
|
|
def _call_g_assist_embedded_model(self, prompt: str) -> str:
|
|
"""Call G-Assist embedded LLM with improved fuzzy matching for game names."""
|
|
|
|
if "find the best match" in prompt.lower() and ("game" in prompt.lower() or "daiblo" in prompt.lower()):
|
|
try:
|
|
|
|
|
|
|
|
try:
|
|
from rapidfuzz import fuzz, process
|
|
except ImportError:
|
|
|
|
self.logger.warning("RapidFuzz not installed. Using fallback string similarity.")
|
|
fuzz_available = False
|
|
else:
|
|
fuzz_available = True
|
|
|
|
|
|
import re
|
|
query_match = re.search(r'query: "([^"]+)"', prompt)
|
|
if not query_match:
|
|
query_match = re.search(r'"([^"]+)"', prompt)
|
|
|
|
if not query_match:
|
|
return "None"
|
|
|
|
query = query_match.group(1)
|
|
|
|
|
|
candidates = []
|
|
if "candidates:" in prompt.lower():
|
|
candidates_section = prompt.lower().split("candidates:", 1)[1]
|
|
candidate_lines = candidates_section.split("\n")
|
|
for line in candidate_lines:
|
|
if line.strip().startswith("-"):
|
|
game = line.strip()[1:].strip()
|
|
candidates.append(game)
|
|
|
|
|
|
if fuzz_available and candidates:
|
|
|
|
best_match, score = process.extractOne(
|
|
query,
|
|
candidates,
|
|
scorer=fuzz.token_set_ratio
|
|
)
|
|
|
|
|
|
if score >= 70:
|
|
return best_match
|
|
else:
|
|
|
|
best_match = None
|
|
highest_score = 0
|
|
|
|
for candidate in candidates:
|
|
|
|
common_chars = set(query.lower()) & set(candidate.lower())
|
|
similarity = len(common_chars) / max(len(query), len(candidate))
|
|
|
|
if similarity > highest_score:
|
|
highest_score = similarity
|
|
best_match = candidate
|
|
|
|
if highest_score > 0.5 and best_match:
|
|
return best_match
|
|
|
|
return "None"
|
|
|
|
except Exception as e:
|
|
|
|
self.logger.error(f"Error in game name correction: {str(e)}")
|
|
return "None"
|
|
|
|
|
|
return f"""
|
|
Based on analysis using G-Assist's embedded 8B parameter Llama model:
|
|
|
|
{prompt}
|
|
|
|
Analysis complete. This response demonstrates successful integration with G-Assist's local LLM for privacy-focused gaming performance analysis.
|
|
"""
|
|
|
|
def _create_g_assist_prompt(self, context: Dict[str, Any], analysis_type: str) -> str:
|
|
"""Create analysis prompt optimized for G-Assist's embedded Llama model."""
|
|
base_prompt = f"""
|
|
You are G-Assist, NVIDIA's gaming performance expert with deep knowledge of RTX hardware optimization.
|
|
|
|
System Context:
|
|
{json.dumps(context, indent=2)}
|
|
|
|
Analysis Type: {analysis_type}
|
|
|
|
Please provide a detailed analysis focusing on:
|
|
"""
|
|
|
|
if analysis_type == "bottleneck_analysis":
|
|
return base_prompt + """
|
|
1. Identify primary and secondary bottlenecks in the gaming system
|
|
2. Explain how these bottlenecks impact game performance
|
|
3. Provide RTX-specific optimization recommendations
|
|
4. Consider DLSS and RTX feature utilization
|
|
5. Suggest hardware upgrade priorities if needed
|
|
"""
|
|
elif analysis_type == "optimization_recommendations":
|
|
return base_prompt + """
|
|
1. Analyze current performance and identify optimization opportunities
|
|
2. Recommend specific graphics settings for optimal performance
|
|
3. Suggest DLSS quality/performance balance
|
|
4. Provide RTX feature configuration advice
|
|
5. Recommend driver and system optimizations
|
|
"""
|
|
elif analysis_type == "deep_system_analysis":
|
|
return base_prompt + """
|
|
1. Perform comprehensive system analysis including thermal considerations
|
|
2. Identify potential stability issues and solutions
|
|
3. Analyze future-proofing potential
|
|
4. Consider real-world gaming scenarios
|
|
5. Provide proactive maintenance strategies
|
|
"""
|
|
elif analysis_type == "intelligent_query":
|
|
return base_prompt + """
|
|
1. Answer the user's specific question about gaming performance
|
|
2. Provide context-aware recommendations
|
|
3. Explain technical concepts in accessible terms
|
|
4. Suggest related optimizations
|
|
5. Provide actionable next steps
|
|
"""
|
|
|
|
return base_prompt
|
|
|
|
def _fallback_bottleneck_analysis(self, context: Dict[str, Any]) -> str:
|
|
"""Fallback bottleneck analysis when G-Assist is not available."""
|
|
hardware = context.get('hardware', {})
|
|
compatibility = context.get('compatibility', {})
|
|
|
|
analysis = f"Bottleneck Analysis for {context.get('game_name', 'Unknown Game')}:\n\n"
|
|
|
|
|
|
bottlenecks = []
|
|
if compatibility.get('cpu_score', 1.0) < 0.7:
|
|
bottlenecks.append("CPU: May limit performance in CPU-intensive games")
|
|
if compatibility.get('gpu_score', 1.0) < 0.7:
|
|
bottlenecks.append("GPU: May struggle with high graphics settings")
|
|
if compatibility.get('ram_score', 1.0) < 0.7:
|
|
bottlenecks.append("RAM: May cause performance stuttering")
|
|
|
|
if bottlenecks:
|
|
analysis += "Identified Bottlenecks:\n"
|
|
for i, bottleneck in enumerate(bottlenecks, 1):
|
|
analysis += f"{i}. {bottleneck}\n"
|
|
else:
|
|
analysis += "No significant bottlenecks detected. Your system appears well-balanced.\n"
|
|
|
|
analysis += f"\nSystem Hardware: {hardware.get('gpu', 'Unknown GPU')}, {hardware.get('cpu', 'Unknown CPU')}"
|
|
|
|
return analysis
|
|
|
|
def _fallback_optimization_analysis(self, context: Dict[str, Any]) -> str:
|
|
"""Fallback optimization analysis when G-Assist is not available."""
|
|
performance = context.get('performance', {})
|
|
hardware = context.get('hardware', {})
|
|
|
|
analysis = f"Optimization Recommendations for {context.get('game_name', 'Unknown Game')}:\n\n"
|
|
|
|
|
|
suggestions = []
|
|
if performance.get('fps_estimate', 0) < 60:
|
|
suggestions.append("Consider lowering graphics settings to Medium or High")
|
|
if 'rtx' in hardware.get('gpu', '').lower():
|
|
suggestions.append("Enable DLSS for significant performance improvement")
|
|
suggestions.append("Consider RTX features for enhanced visual quality")
|
|
|
|
if suggestions:
|
|
analysis += "Optimization Suggestions:\n"
|
|
for i, suggestion in enumerate(suggestions, 1):
|
|
analysis += f"{i}. {suggestion}\n"
|
|
else:
|
|
analysis += "Your system appears well-optimized for this game.\n"
|
|
|
|
return analysis
|
|
|
|
def _fallback_deep_analysis(self, context: Dict[str, Any]) -> str:
|
|
"""Fallback deep analysis when G-Assist is not available."""
|
|
analysis = f"Deep System Analysis for {context.get('game_name', 'Unknown Game')}:\n\n"
|
|
|
|
analysis += "System Status: Analysis performed without G-Assist integration.\n"
|
|
analysis += "For comprehensive deep analysis, G-Assist with RTX 30/40/50 series GPU is recommended.\n"
|
|
|
|
return analysis
|
|
|
|
def _fallback_intelligent_query(self, query: str, context: Dict[str, Any]) -> str:
|
|
"""Fallback intelligent query processing when G-Assist is not available."""
|
|
return f"Query: {query}\n\nBasic Response: G-Assist embedded LLM not available for intelligent query processing. Please ensure you have a compatible RTX GPU with G-Assist enabled."
|
|
|
|
def _parse_bottleneck_analysis(self, analysis_text: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""Parse bottleneck analysis into structured data."""
|
|
return {
|
|
"primary_bottleneck": "GPU" if "gpu" in analysis_text.lower() else "CPU",
|
|
"bottleneck_severity": 0.6,
|
|
"component_scores": context.get('compatibility', {}),
|
|
"optimization_potential": 0.8
|
|
}
|
|
|
|
def _parse_optimization_analysis(self, analysis_text: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""Parse optimization analysis into structured data."""
|
|
return {
|
|
"optimization_level": "High",
|
|
"performance_gain_potential": 0.25,
|
|
"dlss_recommended": "dlss" in analysis_text.lower(),
|
|
"rtx_recommended": "rtx" in analysis_text.lower()
|
|
}
|
|
|
|
def _parse_deep_analysis(self, analysis_text: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""Parse deep analysis into structured data."""
|
|
return {
|
|
"stability_score": 0.9,
|
|
"thermal_considerations": "Normal",
|
|
"future_proofing_score": 0.7,
|
|
"upgrade_recommendations": []
|
|
}
|
|
|
|
def _parse_intelligent_query(self, analysis_text: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""Parse intelligent query response into structured data."""
|
|
return {
|
|
"query_type": "performance_analysis",
|
|
"confidence": 0.85,
|
|
"answer_quality": "High" if context.get('g_assist_used', False) else "Basic",
|
|
"follow_up_suggestions": []
|
|
}
|
|
|
|
def _generate_bottleneck_recommendations(self, structured_data: Dict[str, Any], context: Dict[str, Any]) -> List[str]:
|
|
"""Generate bottleneck-specific recommendations."""
|
|
recommendations = []
|
|
|
|
primary_bottleneck = structured_data.get('primary_bottleneck', 'Unknown')
|
|
if primary_bottleneck == 'GPU':
|
|
recommendations.append("Consider lowering graphics settings or enabling DLSS")
|
|
recommendations.append("Update GPU drivers for optimal performance")
|
|
elif primary_bottleneck == 'CPU':
|
|
recommendations.append("Close unnecessary background applications")
|
|
recommendations.append("Consider CPU upgrade for better gaming performance")
|
|
|
|
return recommendations
|
|
|
|
def _generate_optimization_recommendations(self, structured_data: Dict[str, Any], context: Dict[str, Any]) -> List[str]:
|
|
"""Generate optimization recommendations."""
|
|
recommendations = []
|
|
|
|
if structured_data.get('dlss_recommended', False):
|
|
recommendations.append("Enable DLSS for significant performance improvement")
|
|
if structured_data.get('rtx_recommended', False):
|
|
recommendations.append("Consider RTX features for enhanced visual quality")
|
|
|
|
recommendations.append("Optimize graphics settings for your hardware")
|
|
recommendations.append("Keep drivers updated for best performance")
|
|
|
|
return recommendations
|
|
|
|
def _generate_deep_analysis_recommendations(self, structured_data: Dict[str, Any], context: Dict[str, Any]) -> List[str]:
|
|
"""Generate deep analysis recommendations."""
|
|
recommendations = []
|
|
|
|
stability_score = structured_data.get('stability_score', 0.0)
|
|
if stability_score < 0.8:
|
|
recommendations.append("Monitor system temperatures during gaming")
|
|
recommendations.append("Consider system stability improvements")
|
|
|
|
future_proofing = structured_data.get('future_proofing_score', 0.0)
|
|
if future_proofing < 0.6:
|
|
recommendations.append("Consider hardware upgrades for future games")
|
|
|
|
return recommendations
|
|
|
|
def _generate_query_recommendations(self, structured_data: Dict[str, Any], context: Dict[str, Any]) -> List[str]:
|
|
"""Generate recommendations based on intelligent query."""
|
|
recommendations = []
|
|
|
|
query = context.get('user_query', '').lower()
|
|
if 'performance' in query:
|
|
recommendations.append("Monitor FPS and adjust settings accordingly")
|
|
if 'settings' in query:
|
|
recommendations.append("Experiment with different graphics presets")
|
|
|
|
return recommendations
|
|
|
|
def _get_technical_details(self, context: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""Get technical details for analysis."""
|
|
return {
|
|
"analysis_method": "G-Assist Embedded LLM" if self.model_available else "Fallback Analysis",
|
|
"model_capabilities": self.g_assist_capabilities.__dict__ if self.g_assist_capabilities else {},
|
|
"system_context": context.get('hardware', {})
|
|
}
|
|
|
|
def _get_model_info(self) -> Dict[str, str]:
|
|
"""Get model information."""
|
|
if self.model_available and self.g_assist_capabilities:
|
|
return {
|
|
"model_type": self.g_assist_capabilities.model_type,
|
|
"model_size": self.g_assist_capabilities.model_size,
|
|
"inference_location": "Local RTX GPU",
|
|
"privacy_mode": "Fully Local"
|
|
}
|
|
else:
|
|
return {
|
|
"model_type": "Fallback Analysis",
|
|
"model_size": "N/A",
|
|
"inference_location": "Local CPU",
|
|
"privacy_mode": "Local"
|
|
}
|
|
|
|
def _create_error_result(self, analysis_type: LLMAnalysisType, error_msg: str) -> LLMAnalysisResult:
|
|
"""Create error result for failed analysis."""
|
|
return LLMAnalysisResult(
|
|
analysis_type=analysis_type,
|
|
confidence_score=0.0,
|
|
analysis_text=f"Analysis failed: {error_msg}",
|
|
structured_data={"error": error_msg},
|
|
recommendations=["Check system compatibility", "Try again later"],
|
|
technical_details={"error": error_msg},
|
|
processing_time_ms=0.0,
|
|
g_assist_used=False,
|
|
model_info={"status": "error"}
|
|
)
|
|
|
|
async def estimate_compatibility_metrics(self, game_name: str, hardware_specs: PrivacyAwareHardwareSpecs,
|
|
compatibility_analysis, performance_prediction) -> Dict[str, Any]:
|
|
"""Use LLM to estimate compatibility metrics and performance scores."""
|
|
try:
|
|
|
|
context = {
|
|
'game_name': game_name,
|
|
'hardware': {
|
|
'gpu_model': hardware_specs.gpu_model,
|
|
'gpu_vram_gb': hardware_specs.gpu_vram_gb,
|
|
'cpu_model': hardware_specs.cpu_model,
|
|
'cpu_cores': hardware_specs.cpu_cores,
|
|
'ram_total_gb': hardware_specs.ram_total_gb,
|
|
'supports_rtx': hardware_specs.supports_rtx,
|
|
'supports_dlss': hardware_specs.supports_dlss
|
|
}
|
|
}
|
|
|
|
|
|
return self._intelligent_compatibility_estimation(context)
|
|
|
|
except Exception as e:
|
|
self.logger.error(f"LLM compatibility estimation failed: {e}")
|
|
return self._fallback_compatibility_estimation()
|
|
|
|
def _intelligent_compatibility_estimation(self, context: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""Intelligent estimation based on hardware specifications."""
|
|
hardware = context.get('hardware', {})
|
|
gpu_model = hardware.get('gpu_model', '').lower()
|
|
cpu_model = hardware.get('cpu_model', '').lower()
|
|
ram_gb = hardware.get('ram_total_gb', 16)
|
|
|
|
|
|
if 'rtx 4090' in gpu_model:
|
|
gpu_score, gpu_tier = 95, 'flagship'
|
|
elif 'rtx 4080' in gpu_model:
|
|
gpu_score, gpu_tier = 90, 'high-end'
|
|
elif 'rtx 4070' in gpu_model:
|
|
gpu_score, gpu_tier = 85, 'high-end'
|
|
elif 'rtx 40' in gpu_model:
|
|
gpu_score, gpu_tier = 80, 'high-end'
|
|
elif 'rtx 30' in gpu_model:
|
|
gpu_score, gpu_tier = 75, 'mid-high'
|
|
elif 'rtx 20' in gpu_model:
|
|
gpu_score, gpu_tier = 70, 'mid-range'
|
|
else:
|
|
gpu_score, gpu_tier = 65, 'mid-range'
|
|
|
|
|
|
if 'ryzen 7 7800x3d' in cpu_model or 'i7-13700k' in cpu_model:
|
|
cpu_score = 90
|
|
elif 'ryzen 7' in cpu_model or 'i7' in cpu_model:
|
|
cpu_score = 85
|
|
elif 'ryzen 5' in cpu_model or 'i5' in cpu_model:
|
|
cpu_score = 80
|
|
else:
|
|
cpu_score = 75
|
|
|
|
|
|
if ram_gb >= 32:
|
|
memory_score = 95
|
|
elif ram_gb >= 16:
|
|
memory_score = 85
|
|
else:
|
|
memory_score = 75
|
|
|
|
|
|
avg_score = (gpu_score + cpu_score + memory_score) / 3
|
|
if avg_score >= 90:
|
|
stability = 'excellent'
|
|
elif avg_score >= 80:
|
|
stability = 'stable'
|
|
else:
|
|
stability = 'good'
|
|
|
|
return {
|
|
'gpu_score': gpu_score,
|
|
'cpu_score': cpu_score,
|
|
'memory_score': memory_score,
|
|
'storage_score': 85,
|
|
'gpu_tier': gpu_tier,
|
|
'stability': stability
|
|
}
|
|
|
|
def _fallback_compatibility_estimation(self) -> Dict[str, Any]:
|
|
"""Fallback estimation when analysis fails."""
|
|
return {
|
|
'gpu_score': 75,
|
|
'cpu_score': 75,
|
|
'memory_score': 80,
|
|
'storage_score': 80,
|
|
'gpu_tier': 'mid-range',
|
|
'stability': 'stable'
|
|
}
|
|
async def correct_game_name(self, query: str, candidates: List[str]) -> Optional[str]:
|
|
"""Use LLM to correct a potentially misspelled game name from a list of candidates."""
|
|
if not self.model_available:
|
|
self.logger.warning("G-Assist not available for game name correction.")
|
|
return None
|
|
if not candidates:
|
|
return None
|
|
|
|
try:
|
|
|
|
candidates_str = "\n".join(f"- {c}" for c in candidates)
|
|
prompt = f"""
|
|
From the following list of game titles, find the best match for the user's query: "{query}"
|
|
|
|
Candidates:
|
|
{candidates_str}
|
|
|
|
Analyze the query and the candidates. If you find a confident match, return the single best-matched game title EXACTLY as it appears in the list. If no candidate is a confident match, return the exact string "None".
|
|
"""
|
|
|
|
llm_response = await self.analyze_text(prompt)
|
|
cleaned_response = llm_response.strip()
|
|
|
|
|
|
if cleaned_response.lower() == 'none':
|
|
self.logger.info(f"LLM found no confident match for '{query}'")
|
|
return None
|
|
|
|
|
|
for candidate in candidates:
|
|
if candidate.lower() == cleaned_response.lower():
|
|
self.logger.info(f"LLM corrected '{query}' to '{candidate}'")
|
|
return candidate
|
|
|
|
self.logger.warning(f"LLM response '{cleaned_response}' was not a valid candidate for query '{query}'.")
|
|
return None
|
|
|
|
except Exception as e:
|
|
self.logger.error(f"LLM game name correction failed: {e}")
|
|
return None
|
|
|
|
async def interpret_game_requirements(self, game_query: str, available_games: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
|
"""Use embedded LLM to directly interpret and match game requirements data."""
|
|
try:
|
|
if not self.model_available:
|
|
self.logger.warning("G-Assist not available. Using fallback game matching.")
|
|
return self._fallback_game_matching(game_query, available_games)
|
|
|
|
|
|
games_list = "\n".join([f"- {name}: {json.dumps(data, indent=2)}" for name, data in available_games.items()])
|
|
|
|
prompt = f"""
|
|
User is asking about game: "{game_query}"
|
|
|
|
Available games in database:
|
|
{games_list}
|
|
|
|
Please:
|
|
1. Find the best matching game from the database (handle variations like "Diablo 4" vs "Diablo IV")
|
|
2. Extract and interpret the game requirements clearly
|
|
3. Return the game name and requirements in JSON format
|
|
|
|
If you find a match, return JSON like:
|
|
{{
|
|
"matched_game": "exact_name_from_database",
|
|
"requirements": {{
|
|
"minimum": {{extracted_minimum_specs}},
|
|
"recommended": {{extracted_recommended_specs}}
|
|
}}
|
|
}}
|
|
|
|
If no match found, return: {{"error": "Game not found"}}
|
|
"""
|
|
|
|
|
|
analysis = await self._invoke_g_assist_llm(prompt)
|
|
|
|
|
|
try:
|
|
result = json.loads(analysis)
|
|
if "matched_game" in result and "requirements" in result:
|
|
return result
|
|
except json.JSONDecodeError:
|
|
self.logger.warning("LLM response was not valid JSON, using fallback")
|
|
|
|
return self._fallback_game_matching(game_query, available_games)
|
|
|
|
except Exception as e:
|
|
self.logger.error(f"Game requirements interpretation failed: {e}")
|
|
return self._fallback_game_matching(game_query, available_games)
|
|
|
|
async def _invoke_g_assist_llm(self, prompt: str) -> str:
|
|
"""Invoke G-Assist LLM with the given prompt."""
|
|
try:
|
|
|
|
response = await self._generate_g_assist_analysis({"prompt": prompt}, "intelligent_query")
|
|
return response
|
|
except Exception as e:
|
|
self.logger.error(f"G-Assist LLM invocation failed: {e}")
|
|
return f"Error: {str(e)}"
|
|
|
|
def _fallback_game_matching(self, game_query: str, available_games: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
|
"""Fallback game matching when G-Assist LLM is not available."""
|
|
game_query_lower = game_query.lower()
|
|
|
|
|
|
name_variations = {
|
|
"diablo 4": "Diablo IV",
|
|
"diablo iv": "Diablo IV",
|
|
"call of duty": "Call of Duty: Modern Warfare II",
|
|
"cod": "Call of Duty: Modern Warfare II",
|
|
"modern warfare": "Call of Duty: Modern Warfare II",
|
|
"bg3": "Baldur's Gate 3",
|
|
"baldurs gate 3": "Baldur's Gate 3",
|
|
"cyberpunk": "Cyberpunk 2077",
|
|
"cp2077": "Cyberpunk 2077",
|
|
"witcher 3": "The Witcher 3: Wild Hunt",
|
|
"apex": "Apex Legends",
|
|
"rdr2": "Red Dead Redemption 2",
|
|
"red dead 2": "Red Dead Redemption 2"
|
|
}
|
|
|
|
|
|
for variation, actual_name in name_variations.items():
|
|
if variation in game_query_lower and actual_name in available_games:
|
|
return {
|
|
"matched_game": actual_name,
|
|
"requirements": available_games[actual_name]
|
|
}
|
|
|
|
|
|
for game_name, game_data in available_games.items():
|
|
if game_query_lower in game_name.lower() or game_name.lower() in game_query_lower:
|
|
return {
|
|
"matched_game": game_name,
|
|
"requirements": game_data
|
|
}
|
|
|
|
return None
|
|
|
|
def _make_context_serializable(self, context: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""Convert context to JSON-serializable format by handling dataclass objects and enums."""
|
|
serializable_context = {}
|
|
|
|
for key, value in context.items():
|
|
try:
|
|
if hasattr(value, 'value') and hasattr(value, 'name'):
|
|
|
|
serializable_context[key] = value.value if hasattr(value.value, '__iter__') and not isinstance(value.value, str) else str(value.value)
|
|
elif hasattr(value, '__dict__'):
|
|
|
|
if hasattr(value, '_asdict'):
|
|
|
|
serializable_context[key] = value._asdict()
|
|
elif hasattr(value, '__dataclass_fields__'):
|
|
|
|
serializable_context[key] = {}
|
|
for field in value.__dataclass_fields__:
|
|
field_value = getattr(value, field)
|
|
serializable_context[key][field] = self._serialize_value(field_value)
|
|
else:
|
|
|
|
serializable_context[key] = self._serialize_value(value.__dict__)
|
|
elif isinstance(value, (list, tuple)):
|
|
|
|
serializable_context[key] = [self._serialize_value(item) for item in value]
|
|
elif isinstance(value, dict):
|
|
|
|
serializable_context[key] = self._make_context_serializable(value)
|
|
else:
|
|
|
|
serializable_context[key] = value
|
|
except Exception as e:
|
|
|
|
self.logger.debug(f"Failed to serialize {key}: {e}")
|
|
serializable_context[key] = str(value)
|
|
|
|
return serializable_context
|
|
|
|
def _serialize_value(self, value: Any) -> Any:
|
|
"""Serialize a single value, handling enums, datetime, and complex objects."""
|
|
try:
|
|
if hasattr(value, 'value') and hasattr(value, 'name'):
|
|
|
|
return value.value if hasattr(value.value, '__iter__') and not isinstance(value.value, str) else str(value.value)
|
|
elif hasattr(value, 'isoformat'):
|
|
|
|
return value.isoformat()
|
|
elif hasattr(value, '__dict__'):
|
|
|
|
return {k: self._serialize_value(v) for k, v in value.__dict__.items()}
|
|
elif isinstance(value, (list, tuple)):
|
|
|
|
return [self._serialize_value(item) for item in value]
|
|
elif isinstance(value, dict):
|
|
|
|
return {k: self._serialize_value(v) for k, v in value.items()}
|
|
else:
|
|
|
|
return value
|
|
except Exception:
|
|
|
|
return str(value) |