HemanM commited on
Commit
22d947b
Β·
verified Β·
1 Parent(s): 3e6ca34

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -48
app.py CHANGED
@@ -1,17 +1,17 @@
1
  import gradio as gr
2
- from inference import evo_chat_predict, get_gpt_response, get_model_config, get_system_stats
3
  from logger import log_feedback
4
- import subprocess
5
- import os
6
 
7
  chat_history = []
8
 
9
- # πŸ” Handle chat logic
 
10
  def chat_fn(user_input, option1, option2, user_vote=None):
11
  global chat_history
12
 
13
  if not user_input or not option1 or not option2:
14
- return "❗ Please provide a question and two options.", chat_history, "", "", ""
15
 
16
  options = [option1.strip(), option2.strip()]
17
  evo_result = evo_chat_predict(chat_history, user_input, options)
@@ -19,8 +19,9 @@ def chat_fn(user_input, option1, option2, user_vote=None):
19
 
20
  evo_msg = (
21
  f"### πŸ€– Evo\n"
22
- f"**Answer:** {evo_result['answer']} \n"
23
- f"**Reasoning:** {evo_result['reasoning']}\n\n"
 
24
  f"---\n"
25
  f"### 🧠 GPT-3.5\n"
26
  f"{gpt_response}"
@@ -30,7 +31,6 @@ def chat_fn(user_input, option1, option2, user_vote=None):
30
  chat_history.append(f"πŸ€– Evo: {evo_result['answer']}")
31
  chat_history.append(f"🧠 GPT: {gpt_response}")
32
 
33
- # Log feedback
34
  log_feedback(
35
  question=user_input,
36
  option1=option1,
@@ -42,17 +42,13 @@ def chat_fn(user_input, option1, option2, user_vote=None):
42
  user_preference=user_vote
43
  )
44
 
45
- # Show genome config
46
  config = get_model_config()
47
  config_str = (
48
- f"**Layers:** {config['num_layers']} \n"
49
- f"**Heads:** {config['num_heads']} \n"
50
- f"**FFN Dim:** {config['ffn_dim']} \n"
51
- f"**Memory:** {'Enabled' if config['memory_enabled'] else 'Disabled'}"
52
  )
53
 
54
- # Load latest genome score
55
- log_txt = "No genome log yet."
56
  if os.path.exists("genome_log.csv"):
57
  with open("genome_log.csv", "r", encoding="utf-8") as f:
58
  lines = f.readlines()
@@ -60,65 +56,54 @@ def chat_fn(user_input, option1, option2, user_vote=None):
60
  last = lines[-1].strip().split(",")
61
  log_txt = f"🧬 Genome ID: {last[0]} | Accuracy: {last[-1]}"
62
 
63
- # System stats
64
- sys = get_system_stats()
65
- sys_str = (
66
- f"**Device:** {sys['device']} ({sys['platform']}) \n"
67
- f"**CPU Usage:** {sys['cpu_usage_percent']}% \n"
68
- f"**RAM:** {sys['memory_used_gb']} GB / {sys['memory_total_gb']} GB \n"
69
- f"**GPU:** {sys['gpu_name']} \n"
70
- f"**GPU Memory:** {sys['gpu_memory_used_gb']} GB / {sys['gpu_memory_total_gb']} GB"
71
- )
72
 
73
- return evo_msg, chat_history, config_str, log_txt, sys_str
74
 
75
- # πŸ” Clear everything
76
  def clear_fn():
77
  global chat_history
78
  chat_history = []
79
- return "", "", "", None, [], "", "", ""
 
80
 
81
- # πŸ“ˆ Retrain
82
  def retrain_model():
83
  try:
84
  subprocess.run(["python", "retrain_from_feedback.py"], check=True)
85
- return "βœ… Evo retrained successfully."
86
  except Exception as e:
87
- return f"❌ Retraining failed: {str(e)}"
 
88
 
89
- # ⬇️ Download feedback
90
  def export_feedback():
91
- if os.path.exists("feedback_log.csv"):
92
- return "feedback_log.csv"
93
- return None
94
 
95
- # 🌐 Gradio UI
96
- with gr.Blocks(title="EvoRAG – Real-Time Adaptive Reasoning AI") as demo:
97
- gr.Markdown("## 🧬 EvoRAG – Real-Time Adaptive Reasoning AI")
98
- gr.Markdown("Ask Evo a question and give two options. Evo chooses, explains, and evolves. Compare with GPT-3.5.")
99
 
100
  with gr.Row():
101
  with gr.Column(scale=4):
102
  user_input = gr.Textbox(label="Your Question", lines=2)
103
  option1 = gr.Textbox(label="Option 1")
104
  option2 = gr.Textbox(label="Option 2")
105
- user_vote = gr.Radio(["Evo", "GPT"], label="πŸ—³οΈ Who gave the better answer?", info="Optional – helps Evo learn.")
106
- submit = gr.Button("🧠 Ask Evo")
107
- clear = gr.Button("πŸ” Clear")
108
- retrain = gr.Button("πŸ“ˆ Retrain Evo from Feedback")
109
- export = gr.Button("⬇️ Export Feedback CSV")
110
 
111
  with gr.Column(scale=6):
112
  evo_reply = gr.Markdown()
113
  chat_display = gr.HighlightedText(label="Conversation History")
114
  model_info = gr.Markdown(label="🧠 Evo Architecture")
115
- genome_log = gr.Markdown(label="πŸ“Š Evolution Log")
116
- sys_stats = gr.Markdown(label="πŸ“Ÿ System Stats")
117
 
118
  submit.click(chat_fn, inputs=[user_input, option1, option2, user_vote],
119
- outputs=[evo_reply, chat_display, model_info, genome_log, sys_stats])
120
- clear.click(clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display, model_info, genome_log, sys_stats])
121
  retrain.click(retrain_model, outputs=evo_reply)
122
  export.click(export_feedback, outputs=[])
123
 
124
- demo.launch()
 
 
1
  import gradio as gr
2
+ from inference import evo_chat_predict, get_gpt_response, get_model_config
3
  from logger import log_feedback
4
+ import subprocess, os
 
5
 
6
  chat_history = []
7
 
8
+ # πŸ” Core logic
9
+
10
  def chat_fn(user_input, option1, option2, user_vote=None):
11
  global chat_history
12
 
13
  if not user_input or not option1 or not option2:
14
+ return "❗ Provide a question and two options.", chat_history, "", ""
15
 
16
  options = [option1.strip(), option2.strip()]
17
  evo_result = evo_chat_predict(chat_history, user_input, options)
 
19
 
20
  evo_msg = (
21
  f"### πŸ€– Evo\n"
22
+ f"**Answer:** {evo_result['answer']}\n"
23
+ f"**Reasoning:** {evo_result['reasoning']}\n"
24
+ f"**Confidence:** {evo_result['confidence']}\n"
25
  f"---\n"
26
  f"### 🧠 GPT-3.5\n"
27
  f"{gpt_response}"
 
31
  chat_history.append(f"πŸ€– Evo: {evo_result['answer']}")
32
  chat_history.append(f"🧠 GPT: {gpt_response}")
33
 
 
34
  log_feedback(
35
  question=user_input,
36
  option1=option1,
 
42
  user_preference=user_vote
43
  )
44
 
 
45
  config = get_model_config()
46
  config_str = (
47
+ f"**Layers:** {config['num_layers']} | **Heads:** {config['num_heads']} | "
48
+ f"**FFN:** {config['ffn_dim']} | **Memory:** {'βœ…' if config['memory_enabled'] else '❌'}"
 
 
49
  )
50
 
51
+ log_txt = "Genome log not found."
 
52
  if os.path.exists("genome_log.csv"):
53
  with open("genome_log.csv", "r", encoding="utf-8") as f:
54
  lines = f.readlines()
 
56
  last = lines[-1].strip().split(",")
57
  log_txt = f"🧬 Genome ID: {last[0]} | Accuracy: {last[-1]}"
58
 
59
+ return evo_msg, chat_history, config_str, log_txt
 
 
 
 
 
 
 
 
60
 
 
61
 
 
62
  def clear_fn():
63
  global chat_history
64
  chat_history = []
65
+ return "", "", "", None, [], "", ""
66
+
67
 
 
68
  def retrain_model():
69
  try:
70
  subprocess.run(["python", "retrain_from_feedback.py"], check=True)
71
+ return "βœ… Evo retrained."
72
  except Exception as e:
73
+ return f"❌ Retrain error: {str(e)}"
74
+
75
 
 
76
  def export_feedback():
77
+ return "feedback_log.csv" if os.path.exists("feedback_log.csv") else None
78
+
 
79
 
80
+ # πŸš€ UI Launch
81
+ with gr.Blocks(title="EvoRAG – Real-Time Reasoning AI") as demo:
82
+ gr.Markdown("## 🧠 EvoRAG – Real-Time Reasoning AI")
83
+ gr.Markdown("Built Different. Learns Live. Evolves from You.")
84
 
85
  with gr.Row():
86
  with gr.Column(scale=4):
87
  user_input = gr.Textbox(label="Your Question", lines=2)
88
  option1 = gr.Textbox(label="Option 1")
89
  option2 = gr.Textbox(label="Option 2")
90
+ user_vote = gr.Radio(["Evo", "GPT"], label="πŸ—³οΈ Who was better?", info="Optional – fuels evolution")
91
+ submit = gr.Button("Ask Evo")
92
+ clear = gr.Button("Clear")
93
+ retrain = gr.Button("Retrain Evo")
94
+ export = gr.Button("Export Feedback")
95
 
96
  with gr.Column(scale=6):
97
  evo_reply = gr.Markdown()
98
  chat_display = gr.HighlightedText(label="Conversation History")
99
  model_info = gr.Markdown(label="🧠 Evo Architecture")
100
+ genome_log = gr.Markdown(label="πŸ“Š Last Evolution")
 
101
 
102
  submit.click(chat_fn, inputs=[user_input, option1, option2, user_vote],
103
+ outputs=[evo_reply, chat_display, model_info, genome_log])
104
+ clear.click(clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display, model_info, genome_log])
105
  retrain.click(retrain_model, outputs=evo_reply)
106
  export.click(export_feedback, outputs=[])
107
 
108
+ if __name__ == "__main__":
109
+ demo.launch()