Spaces:
Runtime error
Runtime error
| import aiohttp | |
| import json | |
| import logging | |
| import torch | |
| import faiss | |
| import numpy as np | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| from typing import List, Dict, Any | |
| from cryptography.fernet import Fernet | |
| from jwt import encode, decode, ExpiredSignatureError | |
| from datetime import datetime, timedelta | |
| import pyttsx3 | |
| import os | |
| from components.multi_model_analyzer import MultiAgentSystem | |
| from components.neuro_symbolic_engine import NeuroSymbolicEngine | |
| from components.self_improving_ai import SelfImprovingAI | |
| from modules.secure_memory_loader import load_secure_memory_module | |
| from ethical_filter import EthicalFilter | |
| from codette_openai_fallback import query_codette_with_fallback | |
| from CodriaoCore.federated_learning import FederatedAI | |
| from utils.database import Database | |
| from utils.logger import logger | |
| from codriao_tb_module import CodriaoHealthModule | |
| from fail_safe import AIFailsafeSystem | |
| from quarantine_engine import QuarantineEngine | |
| from anomaly_score import AnomalyScorer | |
| class AICoreAGIX: | |
| def __init__(self, config_path: str = "config.json"): | |
| self.ethical_filter = EthicalFilter() | |
| self.config = self._load_config(config_path) | |
| self.tokenizer = AutoTokenizer.from_pretrained(self.config["model_name"]) | |
| self.model = AutoModelForCausalLM.from_pretrained(self.config["model_name"]) | |
| self.context_memory = self._initialize_vector_memory() | |
| self.http_session = aiohttp.ClientSession() | |
| self.database = Database() | |
| self.multi_agent_system = MultiAgentSystem() | |
| self.self_improving_ai = SelfImprovingAI() | |
| self.neural_symbolic_engine = NeuroSymbolicEngine() | |
| self.federated_ai = FederatedAI() | |
| self.failsafe_system = AIFailsafeSystem() | |
| def engage_lockdown_mode(self, reason="Unspecified anomaly"): | |
| timestamp = datetime.utcnow().isoformat() | |
| self.lockdown_engaged = True | |
| # Disable external systems | |
| try: | |
| self.http_session = None | |
| if hasattr(self.federated_ai, "network_enabled"): | |
| self.federated_ai.network_enabled = False | |
| if hasattr(self.self_improving_ai, "enable_learning"): | |
| self.self_improving_ai.enable_learning = False | |
| except Exception as e: | |
| logger.error(f"Lockdown component shutdown failed: {e}") | |
| # Log the event | |
| lockdown_event = { | |
| "event": "Lockdown Mode Activated", | |
| "reason": reason, | |
| "timestamp": timestamp | |
| } | |
| logger.warning(f"[LOCKDOWN MODE] - Reason: {reason} | Time: {timestamp}") | |
| self.failsafe_system.trigger_failsafe("Lockdown initiated", str(lockdown_event)) | |
| # Return confirmation | |
| return { | |
| "status": "Lockdown Engaged", | |
| "reason": reason, | |
| "timestamp": timestamp | |
| } | |
| # Secure memory setup | |
| self._encryption_key = Fernet.generate_key() | |
| secure_memory_module = load_secure_memory_module() | |
| SecureMemorySession = secure_memory_module.SecureMemorySession | |
| self.secure_memory_loader = SecureMemorySession(self._encryption_key) | |
| self.speech_engine = pyttsx3.init() | |
| self.health_module = CodriaoHealthModule(ai_core=self) | |
| self.quarantine_engine = QuarantineEngine() | |
| self.anomaly_scorer = AnomalyScorer() | |
| def analyze_event_for_anomalies(self, event_type: str, data: dict): | |
| score = self.anomaly_scorer.score_event(event_type, data) | |
| if score["score"] >= 70: | |
| # Defensive, not destructive | |
| self.quarantine_engine.quarantine(data.get("module", "unknown"), reason=score["notes"]) | |
| logger.warning(f"[Codriao]: Suspicious activity quarantined. Module: {data.get('module')}") | |
| return score | |
| def _load_config(self, config_path: str) -> dict: | |
| """Loads the configuration file.""" | |
| try: | |
| with open(config_path, 'r') as file: | |
| return json.load(file) | |
| except FileNotFoundError: | |
| logger.error(f"Configuration file not found: {config_path}") | |
| raise | |
| except json.JSONDecodeError as e: | |
| logger.error(f"Error decoding JSON in config file: {config_path}, Error: {e}") | |
| raise | |
| def _initialize_vector_memory(self): | |
| """Initializes FAISS vector memory.""" | |
| return faiss.IndexFlatL2(768) | |
| def _vectorize_query(self, query: str): | |
| """Vectorizes user query using tokenizer.""" | |
| tokenized = self.tokenizer(query, return_tensors="pt") | |
| return tokenized["input_ids"].detach().numpy() | |
| async def generate_response(self, query: str, user_id: int) -> Dict[str, Any]: | |
| try: | |
| # Validate query input | |
| if not isinstance(query, str) or len(query.strip()) == 0: | |
| raise ValueError("Invalid query input.") | |
| # Ethical filter | |
| result = self.ethical_filter.analyze_query(query) | |
| if result["status"] == "blocked": | |
| return {"error": result["reason"]} | |
| if result["status"] == "flagged": | |
| logger.warning(result["warning"]) | |
| # Special diagnostics trigger | |
| if any(phrase in query.lower() for phrase in ["tb check", "analyze my tb", "run tb diagnostics", "tb test"]): | |
| return await self.run_tb_diagnostics("tb_image.jpg", "tb_cough.wav", user_id) | |
| # Vector memory and responses | |
| vectorized_query = self._vectorize_query(query) | |
| self.secure_memory_loader.encrypt_vector(user_id, vectorized_query) | |
| responses = await asyncio.gather( | |
| self._generate_local_model_response(query), | |
| self.multi_agent_system.delegate_task(query), | |
| self.self_improving_ai.evaluate_response(query), | |
| self.neural_symbolic_engine.integrate_reasoning(query) | |
| ) | |
| final_response = "\n\n".join(responses) | |
| # Verify response safety | |
| safe = self.failsafe_system.verify_response_safety(final_response) | |
| if not safe: | |
| return {"error": "Failsafe triggered due to unsafe response content."} | |
| self.database.log_interaction(user_id, query, final_response) | |
| self._log_to_blockchain(user_id, query, final_response) | |
| self._speak_response(final_response) | |
| return { | |
| "response": final_response, | |
| "real_time_data": self.federated_ai.get_latest_data(), | |
| "context_enhanced": True, | |
| "security_status": "Fully Secure" | |
| } | |
| except Exception as e: | |
| logger.error(f"Response generation failed: {e}") | |
| return {"error": "Processing failed - safety protocols engaged"} | |
| async def _generate_local_model_response(self, query: str) -> str: | |
| """Generates a response using the local model.""" | |
| inputs = self.tokenizer(query, return_tensors="pt") | |
| outputs = self.model.generate(**inputs) | |
| return self.tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| async def run_tb_diagnostics(self, image_path: str, audio_path: str, user_id: int) -> Dict[str, Any]: | |
| """Runs TB diagnostics with AI modules.""" | |
| try: | |
| result = await self.health_module.evaluate_tb_risk(image_path, audio_path, user_id) | |
| logger.info(f"TB Diagnostic Result: {result}") | |
| return result | |
| except Exception as e: | |
| logger.error(f"TB diagnostics failed: {e}") | |
| return {"tb_risk": "ERROR", "error": str(e)} | |
| def _log_to_blockchain(self, user_id: int, query: str, final_response: str): | |
| """Logs interaction to blockchain with retries.""" | |
| retries = 3 | |
| for attempt in range(retries): | |
| try: | |
| logger.info(f"Logging interaction to blockchain: Attempt {attempt + 1}") | |
| break | |
| except Exception as e: | |
| logger.warning(f"Blockchain logging failed: {e}") | |
| continue | |
| def _speak_response(self, response: str): | |
| """Speaks out the generated response.""" | |
| try: | |
| self.speech_engine.say(response) | |
| self.speech_engine.runAndWait() | |
| except Exception as e: | |
| logger.error(f"Speech synthesis failed: {e}") | |
| async def shutdown(self): | |
| """Closes asynchronous resources.""" | |
| await self.http_session.close() |