File size: 2,712 Bytes
e35cb49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
"""
brain_fast_lazy.py
Combines:
- Lazy loading (bypass Hugging Face timeout)
- Speed optimizations (fast replies after load)
"""

import os
import time
import threading
import functools
from collections import OrderedDict
import importlib

# ------------------------------
# Persistent cache setup
# ------------------------------
os.environ["TRANSFORMERS_CACHE"] = "/home/user/app/cache"
os.environ["HF_HOME"] = "/home/user/app/cache"
os.makedirs("/home/user/app/cache", exist_ok=True)

# ------------------------------
# Cache for replies
# ------------------------------
CACHE_LIMIT = 50
_response_cache = OrderedDict()

def _cache_result(func):
    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        key = str((args, tuple(sorted(kwargs.items()))))
        if key in _response_cache:
            _response_cache.move_to_end(key)
            return _response_cache[key]
        result = func(*args, **kwargs)
        _response_cache[key] = result
        if len(_response_cache) > CACHE_LIMIT:
            _response_cache.popitem(last=False)
        return result
    return wrapper

# ------------------------------
# Brain loader + warm-up
# ------------------------------
_brain = None
_is_loading = False
_is_ready = False
_lock = threading.Lock()

def _load_brain():
    global _brain, _is_ready, _is_loading
    with _lock:
        if _brain is None:
            _is_loading = True
            print("⏳ Loading multimodular brain (fast-lazy mode)...")
            start_time = time.time()
            _brain = importlib.import_module("multimodular_modul_v7")
            _is_ready = True
            _is_loading = False
            print(f"βœ… Brain loaded in {time.time() - start_time:.2f}s")
            _warm_up()
    return _brain

def _warm_up():
    try:
        print("πŸ”₯ Warming up models...")
        _brain.process_input("Hello")
        print("βœ… Warm-up complete")
    except Exception as e:
        print(f"⚠ Warm-up failed: {e}")

def preload_in_background():
    threading.Thread(target=_load_brain, daemon=True).start()

# Start background preload at import
preload_in_background()

# ------------------------------
# Proxies with caching
# ------------------------------
@_cache_result
def process_input(text): return _load_brain().process_input(text)
@_cache_result
def search_kb(query): return _load_brain().search_kb(query)
def upload_media(path): return _load_brain().upload_media(path)
def backup_brain(): return _load_brain().backup_brain()
def restore_brain(): return _load_brain().restore_brain()
def show_creative_skills(): return _load_brain().show_creative_skills()
def sync_status(): return _load_brain().sync_status()
def is_ready(): return _is_ready