HemanM commited on
Commit
ec9b863
Β·
verified Β·
1 Parent(s): d9a17fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -124
app.py CHANGED
@@ -1,134 +1,90 @@
1
  import gradio as gr
2
- from inference import evo_chat_predict, get_gpt_response, get_model_config
3
- from logger import log_feedback
4
- import subprocess
5
  import os
 
 
 
6
 
7
- chat_history = []
8
-
9
- # πŸ” Handle chat logic
10
- def chat_fn(user_input, option1, option2, user_vote=None):
11
- global chat_history
12
-
13
- if not user_input or not option1 or not option2:
14
- return "❗ Please provide a question and two options.", chat_history, "", "", ""
15
-
16
- options = [option1.strip(), option2.strip()]
17
- evo_result = evo_chat_predict(chat_history, user_input, options)
18
- gpt_response = get_gpt_response(user_input)
19
-
20
- evo_msg = (
21
- f"### πŸ€– Evo\n"
22
- f"**Answer:** {evo_result['answer']} \n"
23
- f"**Reasoning:** {evo_result['reasoning']}\n\n"
24
- f"---\n"
25
- f"### 🧠 GPT-3.5\n"
26
- f"{gpt_response}"
27
- )
28
-
29
- chat_history.append(f"πŸ‘€ User: {user_input}")
30
- chat_history.append(f"πŸ€– Evo: {evo_result['answer']}")
31
- chat_history.append(f"🧠 GPT: {gpt_response}")
32
-
33
- # Log feedback
34
- log_feedback(
35
- question=user_input,
36
- option1=option1,
37
- option2=option2,
38
- context=evo_result["context_used"],
39
- evo_output=evo_result["answer"],
40
- gpt_output=gpt_response,
41
- evo_reasoning=evo_result["reasoning"],
42
- user_preference=user_vote
43
- )
44
-
45
- # Show genome config
46
- config = get_model_config()
47
- config_str = (
48
- f"**Layers:** {config['num_layers']} \n"
49
- f"**Heads:** {config['num_heads']} \n"
50
- f"**FFN Dim:** {config['ffn_dim']} \n"
51
- f"**Memory:** {'Enabled' if config['memory_enabled'] else 'Disabled'}"
52
- )
53
-
54
- # Load latest genome score
55
- log_txt = "No genome log yet."
56
- if os.path.exists("genome_log.csv"):
57
- with open("genome_log.csv", "r", encoding="utf-8") as f:
58
- lines = f.readlines()
59
- if len(lines) > 1:
60
- last = lines[-1].strip().split(",")
61
- log_txt = f"🧬 Genome ID: {last[0]} | Accuracy: {last[-1]}"
62
-
63
- return evo_msg, chat_history, config_str, log_txt, why_evo_panel()
64
-
65
- # 🧠 Static "Why Evo?" panel
66
- def why_evo_panel():
67
- return (
68
- "### πŸš€ Why Evo?\n"
69
- "- Learns from your input β€” evolves in real time\n"
70
- "- Adaptive architecture (changes #layers, memory, etc.)\n"
71
- "- Tiny model (~13M–28M params) vs GPT-3.5 (175B)\n"
72
- "- Runs on CPU or low-end GPUs\n"
73
- "- Transparent architecture: shows how it thinks\n"
74
- "- Can be deployed, fine-tuned, and evolved per user/domain"
75
- )
76
-
77
- # πŸ” Clear everything
78
- def clear_fn():
79
- global chat_history
80
- chat_history = []
81
- return "", "", "", None, [], "", ""
82
-
83
- # πŸ“ˆ Retrain
84
- def retrain_model():
85
- try:
86
- subprocess.run(["python", "retrain_from_feedback.py"], check=True)
87
- return "βœ… Evo retrained successfully."
88
- except Exception as e:
89
- return f"❌ Retraining failed: {str(e)}"
90
-
91
- # ⬇️ Download feedback
92
- def export_feedback():
93
- if os.path.exists("feedback_log.csv"):
94
- return "feedback_log.csv"
95
- return None
96
 
97
- # 🌐 Gradio UI
98
- with gr.Blocks(title="EvoRAG – Real-Time Adaptive Reasoning AI", css="body { font-family: 'Segoe UI', sans-serif; background-color: #f8f9fa; }") as demo:
99
  with gr.Row():
100
- with gr.Column(scale=2):
101
- gr.Markdown("""
102
- # 🧠 EvoRAG
103
- ### Built Different. Learns Live. Evolves from You.
104
- """)
105
- gr.Markdown(why_evo_panel(), elem_id="why-evo")
 
 
 
 
 
 
106
 
107
- with gr.Column(scale=5):
108
- user_input = gr.Textbox(label="Your Question", lines=2, placeholder="e.g. What should you do if there's a fire?")
109
- option1 = gr.Textbox(label="Option 1")
110
- option2 = gr.Textbox(label="Option 2")
111
- user_vote = gr.Radio(["Evo", "GPT"], label="πŸ—³οΈ Who was better?", info="Optional – fuels evolution")
112
- with gr.Row():
113
- submit = gr.Button("🧠 Ask Evo")
114
- clear = gr.Button("πŸ” Clear")
115
- with gr.Row():
116
- retrain = gr.Button("πŸ“ˆ Retrain Evo")
117
- export = gr.Button("⬇️ Export Feedback CSV")
118
 
119
  with gr.Row():
120
- with gr.Column(scale=6):
121
- evo_reply = gr.Markdown()
122
- chat_display = gr.HighlightedText(label="Conversation History")
123
- with gr.Column(scale=4):
124
- model_info = gr.Markdown(label="🧠 Evo Architecture")
125
- genome_log = gr.Markdown(label="πŸ“Š Evolution Log")
126
- evo_why = gr.Markdown(label="πŸ”¬ Why Evo?")
127
 
128
- submit.click(chat_fn, inputs=[user_input, option1, option2, user_vote],
129
- outputs=[evo_reply, chat_display, model_info, genome_log, evo_why])
130
- clear.click(clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display, model_info, genome_log])
131
- retrain.click(retrain_model, outputs=evo_reply)
132
- export.click(export_feedback, outputs=[])
133
 
134
- demo.launch()
 
 
1
  import gradio as gr
 
 
 
2
  import os
3
+ from inference import evo_infer, gpt_infer
4
+ from retrain_from_feedback import train_evo
5
+ from logger import log_feedback
6
 
7
+ question = gr.Textbox(label="🧠 Your Question", placeholder="e.g. What should you do if there's a fire?", lines=1)
8
+ option1 = gr.Textbox(label="πŸ…°οΈ Option 1", placeholder="Enter the first option")
9
+ option2 = gr.Textbox(label="πŸ…±οΈ Option 2", placeholder="Enter the second option")
10
+ choice = gr.Radio(["Evo", "GPT"], label="πŸ—³οΈ Who was better?", info="Optional – fuels evolution", type="value")
11
+
12
+ evo_out = gr.Textbox(label="πŸ€– Evo", interactive=False)
13
+ gpt_out = gr.Textbox(label="🧠 GPT-3.5", interactive=False)
14
+ history = gr.Textbox(label="πŸ“œ Conversation History", interactive=False)
15
+ evo_stats = gr.Textbox(label="πŸ“Š Evo Stats", value="Layers: 6 | Heads: 8 | FFN: 1024 | Memory: βœ… | Accuracy: ~64.5% | Phase: v2.2", interactive=False)
16
+
17
+ def run_inference(q, o1, o2, winner):
18
+ evo_answer, evo_reasoning = evo_infer(q, o1, o2)
19
+ gpt_answer = gpt_infer(q, o1, o2)
20
+ context = f"Question: {q}\nOptions: {o1}, {o2}"
21
+ log_feedback(q, o1, o2, context, evo_answer, gpt_answer, evo_reasoning, winner)
22
+ conv_log = f"πŸ‘€ {q}\nπŸ…°οΈ {o1} | πŸ…±οΈ {o2}\nπŸ€– Evo: {evo_answer} ({evo_reasoning})\n🧠 GPT: {gpt_answer}"
23
+ return evo_answer, gpt_answer, conv_log
24
+
25
+ def clear():
26
+ return "", "", "", None, "", "", ""
27
+
28
+ def export_csv():
29
+ return gr.File("feedback_log.csv")
30
+
31
+ def retrain():
32
+ train_evo()
33
+ return "πŸ” Evo model reloaded."
34
+
35
+ with gr.Blocks(theme=gr.themes.Soft(), css="""
36
+ body { background-color: #f3f6fb; font-family: 'Segoe UI', sans-serif; }
37
+ .gradio-container { max-width: 1024px; margin: auto; }
38
+ .gr-box { box-shadow: 0 4px 16px rgba(0,0,0,0.1); border-radius: 12px; padding: 16px; transition: all 0.3s ease-in-out; }
39
+ .gr-button { border-radius: 8px; font-weight: 600; transition: all 0.2s ease-in-out; }
40
+ .gr-button:hover { transform: scale(1.03); background-color: #e6f2ff; }
41
+ .gr-textbox, .gr-radio { border-radius: 8px; }
42
+ """) as demo:
43
+
44
+ gr.Markdown("""
45
+ <h1 style="font-size: 2.2em;">🧠 EvoRAG – Real-Time Reasoning AI</h1>
46
+ <p><b>Built Different. Learns Live. Evolves from You.</b></p>
47
+ <div style="margin-top: 10px; font-size: 0.9em;">
48
+ <ul>
49
+ <li>πŸš€ <b>Why Evo?</b></li>
50
+ <li>βœ”οΈ Learns from your input β€” evolves in real time</li>
51
+ <li>βœ”οΈ Adaptive architecture (changes #layers, memory, etc.)</li>
52
+ <li>βœ”οΈ Tiny model (~13M–28M params) vs GPT-3.5 (175B)</li>
53
+ <li>βœ”οΈ Runs on CPU or low-end GPUs</li>
54
+ <li>βœ”οΈ Transparent architecture: shows how it thinks</li>
55
+ <li>βœ”οΈ Can be deployed, fine-tuned, and evolved per user/domain</li>
56
+ </ul>
57
+ </div>
58
+ """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
 
 
60
  with gr.Row():
61
+ with gr.Column():
62
+ question.render()
63
+ option1.render()
64
+ option2.render()
65
+ choice.render()
66
+
67
+ with gr.Column():
68
+ ask_btn = gr.Button("πŸ’‘ Ask Evo", variant="primary")
69
+ retrain_btn = gr.Button("πŸ” Retrain Evo")
70
+ clear_btn = gr.Button("🧹 Clear")
71
+ export_btn = gr.Button("⬇️ Export Feedback CSV")
72
+ evo_stats.render()
73
 
74
+ with gr.Row():
75
+ evo_out.render()
76
+ gpt_out.render()
 
 
 
 
 
 
 
 
77
 
78
  with gr.Row():
79
+ retrain_status = gr.Textbox(label="πŸ” Status", interactive=False)
80
+
81
+ with gr.Accordion("πŸ“š Conversation History", open=True):
82
+ history.render()
 
 
 
83
 
84
+ ask_btn.click(fn=run_inference, inputs=[question, option1, option2, choice], outputs=[evo_out, gpt_out, history])
85
+ clear_btn.click(fn=clear, outputs=[question, option1, option2, choice, evo_out, gpt_out, history])
86
+ export_btn.click(fn=export_csv, outputs=[])
87
+ retrain_btn.click(fn=retrain, outputs=[retrain_status])
 
88
 
89
+ if __name__ == "__main__":
90
+ demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))