File size: 4,490 Bytes
7dd02a3
cb9a19a
 
 
 
 
 
 
 
 
b7194b1
 
 
 
 
 
cf8af88
 
 
 
 
 
 
 
 
 
 
 
 
7dd02a3
b7194b1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eed17f4
 
 
568d79a
c615588
eed17f4
 
 
 
488dba6
eed17f4
 
 
 
 
 
488dba6
eed17f4
d9fdd26
488dba6
c615588
eed17f4
c615588
eed17f4
 
 
b7194b1
 
 
488dba6
c615588
eed17f4
 
488dba6
d9fdd26
eed17f4
d9fdd26
488dba6
eed17f4
 
 
 
 
 
 
 
b7194b1
 
 
 
 
 
eed17f4
 
 
 
 
 
 
 
 
3e1b974
eed17f4
d9fdd26
 
eed17f4
 
 
488dba6
cf8af88
 
 
eed17f4
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import os

# πŸ›‘οΈ Ensure model is saved before importing anything that loads it
if not os.path.exists("trained_model/config.json"):
    print("βš™οΈ No model found. Initializing and saving EvoTransformer...")
    from init_model import initialize_and_save_model
    initialize_and_save_model()
else:
    print("βœ… EvoTransformer already initialized.")

import gradio as gr
import random
from inference import generate_response
from logger import log_user_feedback
from dashboard import update_dashboard_plot
from watchdog import retrain_model
from init_model import load_model

# === Load model to extract architecture
model = load_model()

def get_architecture_summary(model):
    summary = {
        "Layers": getattr(model, "num_layers", "N/A"),
        "Attention Heads": getattr(model, "num_heads", "N/A"),
        "FFN Dim": getattr(model, "ffn_dim", "N/A"),
        "Memory Enabled": getattr(model, "use_memory", "N/A"),
    }
    return "\n".join(f"{k}: {v}" for k, v in summary.items())

# 🎲 Random examples
examples = [
    {
        "goal": "Escape from a burning house",
        "option1": "Run out through the front door",
        "option2": "Hide in the bathroom"
    },
    {
        "goal": "Improve sleep quality",
        "option1": "Use phone in bed",
        "option2": "Turn off screens 1 hour before bed"
    },
    {
        "goal": "Increase productivity at work",
        "option1": "Multitask all day",
        "option2": "Use Pomodoro technique"
    },
    {
        "goal": "Lose weight safely",
        "option1": "Skip meals",
        "option2": "Exercise regularly and eat balanced meals"
    },
    {
        "goal": "Ace an exam",
        "option1": "Cram the night before",
        "option2": "Study consistently for 2 weeks"
    },
]

def load_random_example():
    example = random.choice(examples)
    return example["goal"], example["option1"], example["option2"]

# 🧠 Model suggestion UI
evo_output = gr.Textbox(label="🧠 EvoTransformer Suggestion")
gpt_output = gr.Textbox(label="πŸ’¬ GPT-3.5 Suggestion")
feedback_output = gr.Textbox(visible=False)

def evo_chat(goal, sol1, sol2):
    response = generate_response(goal, sol1, sol2)
    evo = response.get("evo_suggestion", "Error")
    gpt = response.get("gpt_suggestion", "Error")
    return evo, gpt

def handle_feedback(goal, sol1, sol2, winner):
    try:
        log_user_feedback(goal, sol1, sol2, winner)
        return "βœ… Feedback logged. Thank you!"
    except Exception as e:
        return f"❌ Failed to log: {e}"

with gr.Blocks(title="EvoTransformer v2.1 – Compare Options and Learn") as demo:
    gr.Markdown("## 🧠 EvoTransformer v2.1 – Compare Options and Learn")

    with gr.Row():
        goal_input = gr.Textbox(label="Goal", placeholder="e.g. Escape from house on fire")
    with gr.Row():
        option1_input = gr.Textbox(label="Option 1", placeholder="e.g. Exit house through main door")
        option2_input = gr.Textbox(label="Option 2", placeholder="e.g. Hide under bed")

    with gr.Row():
        compare_btn = gr.Button("πŸ” Compare")
        random_btn = gr.Button("🎲 Load Random Example")

    with gr.Row():
        evo_output.render()
        gpt_output.render()

    with gr.Row():
        winner_dropdown = gr.Radio(["Solution 1", "Solution 2"], label="Which was better?")
        feedback_btn = gr.Button("βœ… Log Feedback")

    feedback_output.render()

    compare_btn.click(
        fn=evo_chat,
        inputs=[goal_input, option1_input, option2_input],
        outputs=[evo_output, gpt_output]
    )

    random_btn.click(
        fn=load_random_example,
        inputs=[],
        outputs=[goal_input, option1_input, option2_input]
    )

    feedback_btn.click(
        fn=handle_feedback,
        inputs=[goal_input, option1_input, option2_input, winner_dropdown],
        outputs=[feedback_output]
    )

    with gr.Row():
        gr.Markdown("### πŸ“Š Dashboard")
        dashboard_plot = gr.Plot()
        dashboard_plot = update_dashboard_plot()

    with gr.Row():
        retrain_button = gr.Button("♻️ Retrain Evo")
        retrain_status = gr.Textbox(label="Retrain Status")

    retrain_button.click(fn=retrain_model, inputs=[], outputs=[retrain_status])

    with gr.Accordion("🧬 EvoTransformer Architecture", open=False):
        arch_box = gr.Textbox(label="Model Configuration", value=get_architecture_summary(model), lines=5, interactive=False)

if __name__ == "__main__":
    demo.launch(share=True)