File size: 5,307 Bytes
f7f9a8a
785c4f7
f7f9a8a
11f2d5b
f7f9a8a
5b3d26d
7a7ebad
cdd53f3
0067953
 
7a7ebad
f7f9a8a
70b5bb7
f7f9a8a
 
541d702
 
70b5bb7
14bd5d8
f7f9a8a
 
541d702
 
 
 
 
 
 
8051dee
541d702
cdd53f3
541d702
f7f9a8a
7a7ebad
14bd5d8
cdd53f3
f7f9a8a
541d702
cdd53f3
f7f9a8a
7e8800a
 
 
 
 
 
 
 
 
 
 
7a7ebad
5b3d26d
cdd53f3
f7f9a8a
5b3d26d
f7f9a8a
 
5b3d26d
 
cdd53f3
 
 
 
 
7e8800a
cdd53f3
5b3d26d
819cc50
c62b1b6
5b3d26d
cdd53f3
e87c15e
5b3d26d
cdd53f3
e87c15e
5b3d26d
e87c15e
5b3d26d
cdd53f3
 
819cc50
cdd53f3
14bd5d8
 
 
 
 
 
 
 
 
 
cdd53f3
 
 
 
 
 
 
8ed51aa
819cc50
541d702
 
 
 
 
d023240
cdd53f3
 
541d702
0067953
819cc50
0067953
cdd53f3
 
0067953
 
 
cdd53f3
 
 
 
 
0067953
 
b50967b
819cc50
 
 
b50967b
 
 
 
 
 
 
 
 
819cc50
b50967b
819cc50
b50967b
 
 
 
 
 
 
 
 
 
 
819cc50
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import os
import torch
import torch.nn.functional as F
from transformers import AutoTokenizer
from evo_model import EvoTransformerV22
from search_utils import web_search
import openai
import time
import psutil
import platform

openai.api_key = os.getenv("OPENAI_API_KEY")

MODEL_PATH = "evo_hellaswag.pt"
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
model = None
last_mod_time = 0

# ๐Ÿ” Load Evo model with auto-reload
def load_model():
    global model, last_mod_time
    try:
        current_mod_time = os.path.getmtime(MODEL_PATH)
        if model is None or current_mod_time > last_mod_time:
            model = EvoTransformerV22()
            model.load_state_dict(torch.load(MODEL_PATH, map_location="cpu"))
            model.eval()
            last_mod_time = current_mod_time
            print("โœ… Evo model loaded.")
    except Exception as e:
        print(f"โŒ Error loading Evo model: {e}")
        model = None
    return model

# ๐Ÿ”ฎ Evo inference core logic
def evo_infer(query, options, user_context=""):
    model = load_model()
    if model is None:
        return "Model Error", 0.0, "Model not available", ""

    def is_fact_or_math(q):
        q_lower = q.lower()
        return any(char.isdigit() for char in q_lower) or any(op in q_lower for op in ["+", "-", "*", "/", "=", "what is", "solve", "calculate"])

    if is_fact_or_math(query):
        context_str = user_context or ""
    else:
        search_results = web_search(query)
        context_str = "\n".join(search_results + ([user_context] if user_context else []))

    input_pairs = [f"{query} [SEP] {opt} [CTX] {context_str}" for opt in options]

    scores = []
    for pair in input_pairs:
        encoded = tokenizer(pair, return_tensors="pt", padding="max_length", truncation=True, max_length=128)
        with torch.no_grad():
            logits = model(encoded["input_ids"])
            score = torch.sigmoid(logits).item()
            scores.append(score)

    best_idx = int(scores[1] > scores[0])
    return (
        options[best_idx],
        max(scores),
        f"{options[0]}: {scores[0]:.3f} vs {options[1]}: {scores[1]:.3f}",
        context_str
    )

# ๐Ÿค– GPT fallback (for comparison)
def get_gpt_response(query, user_context=""):
    try:
        context_block = f"\n\nContext:\n{user_context}" if user_context else ""
        response = openai.chat.completions.create(
            model="gpt-3.5-turbo",
            messages=[{"role": "user", "content": query + context_block}],
            temperature=0.7,
        )
        return response.choices[0].message.content.strip()
    except Exception as e:
        return f"โš ๏ธ GPT error:\n{str(e)}"

# ๐Ÿง  Live Evo prediction logic
def evo_chat_predict(history, query, options):
    try:
        if isinstance(history, list):
            context = "\n".join(history[-6:])
        elif hasattr(history, "empty") and not history.empty:
            context = "\n".join(history.tail(6).astype(str).tolist())
        else:
            context = ""
    except Exception:
        context = ""

    evo_ans, evo_score, evo_reason, evo_ctx = evo_infer(query, options, context)
    return {
        "answer": evo_ans,
        "confidence": round(evo_score, 3),
        "reasoning": evo_reason,
        "context_used": evo_ctx
    }

# ๐Ÿ“Š Evo model config metadata
def get_model_config():
    return {
        "num_layers": 6,
        "num_heads": 8,
        "ffn_dim": 1024,
        "memory_enabled": True,
        "phase": "v2.2",
        "accuracy": "~64.5%"
    }

# ๐Ÿ–ฅ๏ธ Runtime stats
def get_system_stats():
    gpu_info = torch.cuda.get_device_properties(0) if torch.cuda.is_available() else None
    memory = psutil.virtual_memory()
    return {
        "device": "GPU" if torch.cuda.is_available() else "CPU",
        "cpu_usage_percent": psutil.cpu_percent(),
        "memory_used_gb": round(memory.used / (1024 ** 3), 2),
        "memory_total_gb": round(memory.total / (1024 ** 3), 2),
        "gpu_name": gpu_info.name if gpu_info else "N/A",
        "gpu_memory_total_gb": round(gpu_info.total_memory / (1024 ** 3), 2) if gpu_info else "N/A",
        "gpu_memory_used_gb": round(torch.cuda.memory_allocated() / (1024 ** 3), 2) if gpu_info else "N/A",
        "platform": platform.platform()
    }

# ๐Ÿ” Retrain from in-memory feedback_log
def retrain_from_feedback(feedback_log):
    if not feedback_log:
        return "โš ๏ธ No feedback data to retrain from."

    model = load_model()
    if model is None:
        return "โŒ Evo model not available."

    model.train()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)

    for row in feedback_log:
        question, opt1, opt2, answer, *_ = row
        label = torch.tensor([1.0 if answer.strip() == opt2.strip() else 0.0])  # opt2 = class 1

        input_text = f"{question} [SEP] {opt2 if label.item() == 1 else opt1}"
        encoded = tokenizer(input_text, return_tensors="pt", padding="max_length", truncation=True, max_length=128)

        logits = model(encoded["input_ids"])
        loss = F.binary_cross_entropy_with_logits(logits.squeeze(), label)
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

    torch.save(model.state_dict(), MODEL_PATH)
    return "โœ… Evo retrained and reloaded from memory."