File size: 4,073 Bytes
1622676
b0ba5ba
1dec93e
5340d71
b0ba5ba
5340d71
1622676
da4e68d
b0ba5ba
1dec93e
1622676
da4e68d
1622676
b0ba5ba
3eaea0f
1622676
1dec93e
 
3ec70fc
ac8c35d
b0ba5ba
 
 
 
 
 
 
ac8c35d
1dec93e
b0ba5ba
1dec93e
 
b0ba5ba
1dec93e
 
 
 
b0ba5ba
 
1dec93e
b0ba5ba
1dec93e
 
 
b0ba5ba
 
 
 
 
 
 
 
1dec93e
b0ba5ba
 
 
 
 
 
 
 
 
 
 
 
1622676
 
 
b0ba5ba
3eaea0f
b0ba5ba
1622676
3eaea0f
b0ba5ba
1622676
b0ba5ba
 
c67fe91
b0ba5ba
 
 
 
 
3eaea0f
1dec93e
1622676
1dec93e
b0ba5ba
3eaea0f
 
1622676
 
 
 
b0ba5ba
1622676
 
 
b0ba5ba
3eaea0f
1622676
 
 
b0ba5ba
 
 
 
 
 
 
 
3eaea0f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import gradio as gr
from inference import evo_chat_predict, get_gpt_response, get_model_config
from logger import log_feedback
import subprocess
import os

chat_history = []

# πŸ” Handle chat logic
def chat_fn(user_input, option1, option2, user_vote=None):
    global chat_history

    if not user_input or not option1 or not option2:
        return "❗ Please provide a question and two options.", chat_history, "", ""

    options = [option1.strip(), option2.strip()]
    evo_result = evo_chat_predict(chat_history, user_input, options)
    gpt_response = get_gpt_response(user_input)

    evo_msg = (
        f"### πŸ€– Evo\n"
        f"**Answer:** {evo_result['answer']}  \n"
        f"**Reasoning:** {evo_result['reasoning']}\n\n"
        f"---\n"
        f"### 🧠 GPT-3.5\n"
        f"{gpt_response}"
    )

    chat_history.append(f"πŸ‘€ User: {user_input}")
    chat_history.append(f"πŸ€– Evo: {evo_result['answer']}")
    chat_history.append(f"🧠 GPT: {gpt_response}")

    # Log feedback
    log_feedback(
        question=user_input,
        option1=option1,
        option2=option2,
        context=evo_result["context_used"],
        evo_output=evo_result["answer"],
        gpt_output=gpt_response,
        evo_reasoning=evo_result["reasoning"],
        user_preference=user_vote
    )

    # Show genome config
    config = get_model_config()
    config_str = (
        f"**Layers:** {config['num_layers']}  \n"
        f"**Heads:** {config['num_heads']}  \n"
        f"**FFN Dim:** {config['ffn_dim']}  \n"
        f"**Memory:** {'Enabled' if config['memory_enabled'] else 'Disabled'}"
    )

    # Load latest genome score
    log_txt = "No genome log yet."
    if os.path.exists("genome_log.csv"):
        with open("genome_log.csv", "r", encoding="utf-8") as f:
            lines = f.readlines()
            if len(lines) > 1:
                last = lines[-1].strip().split(",")
                log_txt = f"🧬 Genome ID: {last[0]} | Accuracy: {last[-1]}"

    return evo_msg, chat_history, config_str, log_txt

# πŸ” Clear everything
def clear_fn():
    global chat_history
    chat_history = []
    return "", "", "", None, [], "", ""

# πŸ“ˆ Retrain
def retrain_model():
    try:
        subprocess.run(["python", "retrain_from_feedback.py"], check=True)
        return "βœ… Evo retrained successfully."
    except Exception as e:
        return f"❌ Retraining failed: {str(e)}"

# ⬇️ Download feedback
def export_feedback():
    if os.path.exists("feedback_log.csv"):
        return "feedback_log.csv"
    return None

# 🌐 Gradio UI
with gr.Blocks(title="EvoRAG – Real-Time Adaptive Reasoning AI") as demo:
    gr.Markdown("## 🧬 EvoRAG – Real-Time Adaptive Reasoning AI")
    gr.Markdown("Ask Evo a question and give two options. Evo chooses, explains, and evolves. Compare with GPT-3.5.")

    with gr.Row():
        with gr.Column(scale=4):
            user_input = gr.Textbox(label="Your Question", lines=2)
            option1 = gr.Textbox(label="Option 1")
            option2 = gr.Textbox(label="Option 2")
            user_vote = gr.Radio(["Evo", "GPT"], label="πŸ—³οΈ Who gave the better answer?", info="Optional – helps Evo learn.")
            submit = gr.Button("🧠 Ask Evo")
            clear = gr.Button("πŸ” Clear")
            retrain = gr.Button("πŸ“ˆ Retrain Evo from Feedback")
            export = gr.Button("⬇️ Export Feedback CSV")

        with gr.Column(scale=6):
            evo_reply = gr.Markdown()
            chat_display = gr.HighlightedText(label="Conversation History")
            model_info = gr.Markdown(label="🧠 Evo Architecture")
            genome_log = gr.Markdown(label="πŸ“Š Evolution Log")

    submit.click(chat_fn, inputs=[user_input, option1, option2, user_vote],
                 outputs=[evo_reply, chat_display, model_info, genome_log])
    clear.click(clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display, model_info, genome_log])
    retrain.click(retrain_model, outputs=evo_reply)
    export.click(export_feedback, outputs=[])

demo.launch()