HemanM commited on
Commit
d9a17fb
Β·
verified Β·
1 Parent(s): cf51319

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -35
app.py CHANGED
@@ -1,17 +1,17 @@
1
  import gradio as gr
2
  from inference import evo_chat_predict, get_gpt_response, get_model_config
3
  from logger import log_feedback
4
- import subprocess, os
 
5
 
6
  chat_history = []
7
 
8
- # πŸ” Core logic
9
-
10
  def chat_fn(user_input, option1, option2, user_vote=None):
11
  global chat_history
12
 
13
  if not user_input or not option1 or not option2:
14
- return "❗ Provide a question and two options.", chat_history, "", ""
15
 
16
  options = [option1.strip(), option2.strip()]
17
  evo_result = evo_chat_predict(chat_history, user_input, options)
@@ -19,9 +19,8 @@ def chat_fn(user_input, option1, option2, user_vote=None):
19
 
20
  evo_msg = (
21
  f"### πŸ€– Evo\n"
22
- f"**Answer:** {evo_result['answer']}\n"
23
- f"**Reasoning:** {evo_result['reasoning']}\n"
24
- f"**Confidence:** {evo_result['confidence']}\n"
25
  f"---\n"
26
  f"### 🧠 GPT-3.5\n"
27
  f"{gpt_response}"
@@ -31,6 +30,7 @@ def chat_fn(user_input, option1, option2, user_vote=None):
31
  chat_history.append(f"πŸ€– Evo: {evo_result['answer']}")
32
  chat_history.append(f"🧠 GPT: {gpt_response}")
33
 
 
34
  log_feedback(
35
  question=user_input,
36
  option1=option1,
@@ -42,13 +42,17 @@ def chat_fn(user_input, option1, option2, user_vote=None):
42
  user_preference=user_vote
43
  )
44
 
 
45
  config = get_model_config()
46
  config_str = (
47
- f"**Layers:** {config['num_layers']} | **Heads:** {config['num_heads']} | "
48
- f"**FFN:** {config['ffn_dim']} | **Memory:** {'βœ…' if config['memory_enabled'] else '❌'}"
 
 
49
  )
50
 
51
- log_txt = "Genome log not found."
 
52
  if os.path.exists("genome_log.csv"):
53
  with open("genome_log.csv", "r", encoding="utf-8") as f:
54
  lines = f.readlines()
@@ -56,55 +60,75 @@ def chat_fn(user_input, option1, option2, user_vote=None):
56
  last = lines[-1].strip().split(",")
57
  log_txt = f"🧬 Genome ID: {last[0]} | Accuracy: {last[-1]}"
58
 
59
- return evo_msg, chat_history, config_str, log_txt
60
-
 
 
 
 
 
 
 
 
 
 
 
61
 
 
62
  def clear_fn():
63
  global chat_history
64
  chat_history = []
65
  return "", "", "", None, [], "", ""
66
 
67
-
68
  def retrain_model():
69
  try:
70
  subprocess.run(["python", "retrain_from_feedback.py"], check=True)
71
- return "βœ… Evo retrained."
72
  except Exception as e:
73
- return f"❌ Retrain error: {str(e)}"
74
-
75
 
 
76
  def export_feedback():
77
- return "feedback_log.csv" if os.path.exists("feedback_log.csv") else None
78
-
79
-
80
- # πŸš€ UI Launch
81
- with gr.Blocks(title="EvoRAG – Real-Time Reasoning AI") as demo:
82
- gr.Markdown("## 🧠 EvoRAG – Real-Time Reasoning AI")
83
- gr.Markdown("Built Different. Learns Live. Evolves from You.")
84
 
 
 
85
  with gr.Row():
86
- with gr.Column(scale=4):
87
- user_input = gr.Textbox(label="Your Question", lines=2)
 
 
 
 
 
 
 
88
  option1 = gr.Textbox(label="Option 1")
89
  option2 = gr.Textbox(label="Option 2")
90
  user_vote = gr.Radio(["Evo", "GPT"], label="πŸ—³οΈ Who was better?", info="Optional – fuels evolution")
91
- submit = gr.Button("Ask Evo")
92
- clear = gr.Button("Clear")
93
- retrain = gr.Button("Retrain Evo")
94
- export = gr.Button("Export Feedback")
 
 
95
 
 
96
  with gr.Column(scale=6):
97
  evo_reply = gr.Markdown()
98
  chat_display = gr.HighlightedText(label="Conversation History")
 
99
  model_info = gr.Markdown(label="🧠 Evo Architecture")
100
- genome_log = gr.Markdown(label="πŸ“Š Last Evolution")
 
101
 
102
  submit.click(chat_fn, inputs=[user_input, option1, option2, user_vote],
103
- outputs=[evo_reply, chat_display, model_info, genome_log])
104
  clear.click(clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display, model_info, genome_log])
105
  retrain.click(retrain_model, outputs=evo_reply)
106
- export.click(export_feedback, outputs=[gr.File(label="πŸ“₯ Download Feedback CSV")])
107
-
108
 
109
- if __name__ == "__main__":
110
- demo.launch()
 
1
  import gradio as gr
2
  from inference import evo_chat_predict, get_gpt_response, get_model_config
3
  from logger import log_feedback
4
+ import subprocess
5
+ import os
6
 
7
  chat_history = []
8
 
9
+ # πŸ” Handle chat logic
 
10
  def chat_fn(user_input, option1, option2, user_vote=None):
11
  global chat_history
12
 
13
  if not user_input or not option1 or not option2:
14
+ return "❗ Please provide a question and two options.", chat_history, "", "", ""
15
 
16
  options = [option1.strip(), option2.strip()]
17
  evo_result = evo_chat_predict(chat_history, user_input, options)
 
19
 
20
  evo_msg = (
21
  f"### πŸ€– Evo\n"
22
+ f"**Answer:** {evo_result['answer']} \n"
23
+ f"**Reasoning:** {evo_result['reasoning']}\n\n"
 
24
  f"---\n"
25
  f"### 🧠 GPT-3.5\n"
26
  f"{gpt_response}"
 
30
  chat_history.append(f"πŸ€– Evo: {evo_result['answer']}")
31
  chat_history.append(f"🧠 GPT: {gpt_response}")
32
 
33
+ # Log feedback
34
  log_feedback(
35
  question=user_input,
36
  option1=option1,
 
42
  user_preference=user_vote
43
  )
44
 
45
+ # Show genome config
46
  config = get_model_config()
47
  config_str = (
48
+ f"**Layers:** {config['num_layers']} \n"
49
+ f"**Heads:** {config['num_heads']} \n"
50
+ f"**FFN Dim:** {config['ffn_dim']} \n"
51
+ f"**Memory:** {'Enabled' if config['memory_enabled'] else 'Disabled'}"
52
  )
53
 
54
+ # Load latest genome score
55
+ log_txt = "No genome log yet."
56
  if os.path.exists("genome_log.csv"):
57
  with open("genome_log.csv", "r", encoding="utf-8") as f:
58
  lines = f.readlines()
 
60
  last = lines[-1].strip().split(",")
61
  log_txt = f"🧬 Genome ID: {last[0]} | Accuracy: {last[-1]}"
62
 
63
+ return evo_msg, chat_history, config_str, log_txt, why_evo_panel()
64
+
65
+ # 🧠 Static "Why Evo?" panel
66
+ def why_evo_panel():
67
+ return (
68
+ "### πŸš€ Why Evo?\n"
69
+ "- Learns from your input β€” evolves in real time\n"
70
+ "- Adaptive architecture (changes #layers, memory, etc.)\n"
71
+ "- Tiny model (~13M–28M params) vs GPT-3.5 (175B)\n"
72
+ "- Runs on CPU or low-end GPUs\n"
73
+ "- Transparent architecture: shows how it thinks\n"
74
+ "- Can be deployed, fine-tuned, and evolved per user/domain"
75
+ )
76
 
77
+ # πŸ” Clear everything
78
  def clear_fn():
79
  global chat_history
80
  chat_history = []
81
  return "", "", "", None, [], "", ""
82
 
83
+ # πŸ“ˆ Retrain
84
  def retrain_model():
85
  try:
86
  subprocess.run(["python", "retrain_from_feedback.py"], check=True)
87
+ return "βœ… Evo retrained successfully."
88
  except Exception as e:
89
+ return f"❌ Retraining failed: {str(e)}"
 
90
 
91
+ # ⬇️ Download feedback
92
  def export_feedback():
93
+ if os.path.exists("feedback_log.csv"):
94
+ return "feedback_log.csv"
95
+ return None
 
 
 
 
96
 
97
+ # 🌐 Gradio UI
98
+ with gr.Blocks(title="EvoRAG – Real-Time Adaptive Reasoning AI", css="body { font-family: 'Segoe UI', sans-serif; background-color: #f8f9fa; }") as demo:
99
  with gr.Row():
100
+ with gr.Column(scale=2):
101
+ gr.Markdown("""
102
+ # 🧠 EvoRAG
103
+ ### Built Different. Learns Live. Evolves from You.
104
+ """)
105
+ gr.Markdown(why_evo_panel(), elem_id="why-evo")
106
+
107
+ with gr.Column(scale=5):
108
+ user_input = gr.Textbox(label="Your Question", lines=2, placeholder="e.g. What should you do if there's a fire?")
109
  option1 = gr.Textbox(label="Option 1")
110
  option2 = gr.Textbox(label="Option 2")
111
  user_vote = gr.Radio(["Evo", "GPT"], label="πŸ—³οΈ Who was better?", info="Optional – fuels evolution")
112
+ with gr.Row():
113
+ submit = gr.Button("🧠 Ask Evo")
114
+ clear = gr.Button("πŸ” Clear")
115
+ with gr.Row():
116
+ retrain = gr.Button("πŸ“ˆ Retrain Evo")
117
+ export = gr.Button("⬇️ Export Feedback CSV")
118
 
119
+ with gr.Row():
120
  with gr.Column(scale=6):
121
  evo_reply = gr.Markdown()
122
  chat_display = gr.HighlightedText(label="Conversation History")
123
+ with gr.Column(scale=4):
124
  model_info = gr.Markdown(label="🧠 Evo Architecture")
125
+ genome_log = gr.Markdown(label="πŸ“Š Evolution Log")
126
+ evo_why = gr.Markdown(label="πŸ”¬ Why Evo?")
127
 
128
  submit.click(chat_fn, inputs=[user_input, option1, option2, user_vote],
129
+ outputs=[evo_reply, chat_display, model_info, genome_log, evo_why])
130
  clear.click(clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display, model_info, genome_log])
131
  retrain.click(retrain_model, outputs=evo_reply)
132
+ export.click(export_feedback, outputs=[])
 
133
 
134
+ demo.launch()