Princeaka commited on
Commit
e35cb49
·
verified ·
1 Parent(s): dedd33b

Create brain_fast_lazy.py

Browse files
Files changed (1) hide show
  1. brain_fast_lazy.py +90 -0
brain_fast_lazy.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ brain_fast_lazy.py
3
+ Combines:
4
+ - Lazy loading (bypass Hugging Face timeout)
5
+ - Speed optimizations (fast replies after load)
6
+ """
7
+
8
+ import os
9
+ import time
10
+ import threading
11
+ import functools
12
+ from collections import OrderedDict
13
+ import importlib
14
+
15
+ # ------------------------------
16
+ # Persistent cache setup
17
+ # ------------------------------
18
+ os.environ["TRANSFORMERS_CACHE"] = "/home/user/app/cache"
19
+ os.environ["HF_HOME"] = "/home/user/app/cache"
20
+ os.makedirs("/home/user/app/cache", exist_ok=True)
21
+
22
+ # ------------------------------
23
+ # Cache for replies
24
+ # ------------------------------
25
+ CACHE_LIMIT = 50
26
+ _response_cache = OrderedDict()
27
+
28
+ def _cache_result(func):
29
+ @functools.wraps(func)
30
+ def wrapper(*args, **kwargs):
31
+ key = str((args, tuple(sorted(kwargs.items()))))
32
+ if key in _response_cache:
33
+ _response_cache.move_to_end(key)
34
+ return _response_cache[key]
35
+ result = func(*args, **kwargs)
36
+ _response_cache[key] = result
37
+ if len(_response_cache) > CACHE_LIMIT:
38
+ _response_cache.popitem(last=False)
39
+ return result
40
+ return wrapper
41
+
42
+ # ------------------------------
43
+ # Brain loader + warm-up
44
+ # ------------------------------
45
+ _brain = None
46
+ _is_loading = False
47
+ _is_ready = False
48
+ _lock = threading.Lock()
49
+
50
+ def _load_brain():
51
+ global _brain, _is_ready, _is_loading
52
+ with _lock:
53
+ if _brain is None:
54
+ _is_loading = True
55
+ print("⏳ Loading multimodular brain (fast-lazy mode)...")
56
+ start_time = time.time()
57
+ _brain = importlib.import_module("multimodular_modul_v7")
58
+ _is_ready = True
59
+ _is_loading = False
60
+ print(f"✅ Brain loaded in {time.time() - start_time:.2f}s")
61
+ _warm_up()
62
+ return _brain
63
+
64
+ def _warm_up():
65
+ try:
66
+ print("🔥 Warming up models...")
67
+ _brain.process_input("Hello")
68
+ print("✅ Warm-up complete")
69
+ except Exception as e:
70
+ print(f"⚠ Warm-up failed: {e}")
71
+
72
+ def preload_in_background():
73
+ threading.Thread(target=_load_brain, daemon=True).start()
74
+
75
+ # Start background preload at import
76
+ preload_in_background()
77
+
78
+ # ------------------------------
79
+ # Proxies with caching
80
+ # ------------------------------
81
+ @_cache_result
82
+ def process_input(text): return _load_brain().process_input(text)
83
+ @_cache_result
84
+ def search_kb(query): return _load_brain().search_kb(query)
85
+ def upload_media(path): return _load_brain().upload_media(path)
86
+ def backup_brain(): return _load_brain().backup_brain()
87
+ def restore_brain(): return _load_brain().restore_brain()
88
+ def show_creative_skills(): return _load_brain().show_creative_skills()
89
+ def sync_status(): return _load_brain().sync_status()
90
+ def is_ready(): return _is_ready