""" brain_speed.py Speed-optimized interface for multimodular_modul_v7 - Makes user replies faster after runtime is up - Uses caching + warm-up """ import os import time import threading import functools from collections import OrderedDict import importlib # ------------------------------ # Cache Setup # ------------------------------ CACHE_LIMIT = 50 _response_cache = OrderedDict() def _cache_result(func): @functools.wraps(func) def wrapper(*args, **kwargs): key = str((args, tuple(sorted(kwargs.items())))) if key in _response_cache: _response_cache.move_to_end(key) return _response_cache[key] result = func(*args, **kwargs) _response_cache[key] = result if len(_response_cache) > CACHE_LIMIT: _response_cache.popitem(last=False) return result return wrapper # ------------------------------ # Load + Warm-up # ------------------------------ _brain = None _is_ready = False def _load_brain(): global _brain, _is_ready if _brain is None: print("⚡ Loading brain (speed mode)...") start = time.time() _brain = importlib.import_module("multimodular_modul_v7") _is_ready = True print(f"✅ Brain ready in {time.time() - start:.2f}s") _warm_up() return _brain def _warm_up(): try: print("🔥 Warming up models...") _brain.process_input("Hello") print("✅ Warm-up complete") except Exception as e: print(f"⚠ Warm-up failed: {e}") def preload_in_background(): threading.Thread(target=_load_brain, daemon=True).start() preload_in_background() # ------------------------------ # Speed-optimized Proxies # ------------------------------ @_cache_result def process_input(text): return _load_brain().process_input(text) @_cache_result def search_kb(query): return _load_brain().search_kb(query) def upload_media(path): return _load_brain().upload_media(path) def backup_brain(): return _load_brain().backup_brain() def restore_brain(): return _load_brain().restore_brain() def show_creative_skills(): return _load_brain().show_creative_skills() def sync_status(): return _load_brain().sync_status() def is_ready(): return _is_ready