HemanM commited on
Commit
b0ba5ba
Β·
verified Β·
1 Parent(s): 1b55012

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -44
app.py CHANGED
@@ -1,100 +1,113 @@
1
  import gradio as gr
2
- from inference import evo_chat_predict, get_gpt_response
3
  from logger import log_feedback
4
  import subprocess
 
5
 
6
- # Global chat history
7
  chat_history = []
8
 
9
- # 🧠 Handle main chat logic
10
  def chat_fn(user_input, option1, option2, user_vote=None):
11
  global chat_history
12
 
13
- # Validate
14
  if not user_input or not option1 or not option2:
15
- return "❗ Please enter your question and both options.", chat_history
16
 
17
  options = [option1.strip(), option2.strip()]
18
-
19
- # Evo prediction
20
  evo_result = evo_chat_predict(chat_history, user_input, options)
21
-
22
- # GPT fallback (background comparison)
23
  gpt_response = get_gpt_response(user_input)
24
 
25
- # Format response
26
  evo_msg = (
27
- f"### πŸ€– Evo\n"
28
- f"**Answer:** {evo_result['answer']} \n"
29
- f"**Reasoning:** {evo_result['reasoning']}\n\n"
30
- f"---\n"
31
- f"### 🧠 GPT-3.5\n"
32
- f"{gpt_response}"
33
- )
34
 
35
  chat_history.append(f"πŸ‘€ User: {user_input}")
36
- chat_history.append(f"πŸ€– Evo: {evo_msg}")
37
  chat_history.append(f"🧠 GPT: {gpt_response}")
38
 
39
- # Logging for Evo retraining
40
  log_feedback(
41
  question=user_input,
42
  option1=option1,
43
  option2=option2,
44
- context=evo_result['context_used'],
45
- evo_output=evo_result['answer'],
46
  gpt_output=gpt_response,
47
- evo_reasoning=evo_result['reasoning'],
48
  user_preference=user_vote
49
  )
50
 
51
- return evo_msg, chat_history
 
 
 
 
 
 
 
52
 
53
- # πŸ” Clear chat state
 
 
 
 
 
 
 
 
 
 
 
54
  def clear_fn():
55
  global chat_history
56
  chat_history = []
57
- return "", "", "", None, []
58
 
59
- # πŸ“ˆ Live retrain
60
  def retrain_model():
61
  try:
62
- result = subprocess.run(
63
- ["python", "retrain_from_feedback.py"],
64
- check=True,
65
- capture_output=True,
66
- text=True
67
- )
68
- print(result.stdout) # Log to terminal
69
  return "βœ… Evo retrained successfully."
70
- except subprocess.CalledProcessError as e:
71
- print("STDOUT:", e.stdout)
72
- print("STDERR:", e.stderr)
73
- return f"❌ Retraining failed:\n{e.stderr}"
74
 
 
 
 
 
 
75
 
76
  # 🌐 Gradio UI
77
  with gr.Blocks(title="EvoRAG – Real-Time Adaptive Reasoning AI") as demo:
78
  gr.Markdown("## 🧬 EvoRAG – Real-Time Adaptive Reasoning AI")
79
- gr.Markdown("Ask Evo a question, give two options. Evo chooses with reasoning. Compare with GPT. Feedback fuels evolution.")
80
 
81
  with gr.Row():
82
  with gr.Column(scale=4):
83
  user_input = gr.Textbox(label="Your Question", lines=2)
84
  option1 = gr.Textbox(label="Option 1")
85
  option2 = gr.Textbox(label="Option 2")
86
- user_vote = gr.Radio(["Evo", "GPT"], label="πŸ—³οΈ Who gave the better answer?", info="Optional – improves Evo.")
87
  submit = gr.Button("🧠 Ask Evo")
88
  clear = gr.Button("πŸ” Clear")
89
  retrain = gr.Button("πŸ“ˆ Retrain Evo from Feedback")
 
90
 
91
  with gr.Column(scale=6):
92
  evo_reply = gr.Markdown()
93
  chat_display = gr.HighlightedText(label="Conversation History")
94
-
95
- submit.click(fn=chat_fn, inputs=[user_input, option1, option2, user_vote],
96
- outputs=[evo_reply, chat_display])
97
- clear.click(fn=clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display])
98
- retrain.click(fn=retrain_model, outputs=evo_reply)
 
 
 
99
 
100
  demo.launch()
 
1
  import gradio as gr
2
+ from inference import evo_chat_predict, get_gpt_response, get_model_config
3
  from logger import log_feedback
4
  import subprocess
5
+ import os
6
 
 
7
  chat_history = []
8
 
9
+ # πŸ” Handle chat logic
10
  def chat_fn(user_input, option1, option2, user_vote=None):
11
  global chat_history
12
 
 
13
  if not user_input or not option1 or not option2:
14
+ return "❗ Please provide a question and two options.", chat_history, "", ""
15
 
16
  options = [option1.strip(), option2.strip()]
 
 
17
  evo_result = evo_chat_predict(chat_history, user_input, options)
 
 
18
  gpt_response = get_gpt_response(user_input)
19
 
 
20
  evo_msg = (
21
+ f"### πŸ€– Evo\n"
22
+ f"**Answer:** {evo_result['answer']} \n"
23
+ f"**Reasoning:** {evo_result['reasoning']}\n\n"
24
+ f"---\n"
25
+ f"### 🧠 GPT-3.5\n"
26
+ f"{gpt_response}"
27
+ )
28
 
29
  chat_history.append(f"πŸ‘€ User: {user_input}")
30
+ chat_history.append(f"πŸ€– Evo: {evo_result['answer']}")
31
  chat_history.append(f"🧠 GPT: {gpt_response}")
32
 
33
+ # Log feedback
34
  log_feedback(
35
  question=user_input,
36
  option1=option1,
37
  option2=option2,
38
+ context=evo_result["context_used"],
39
+ evo_output=evo_result["answer"],
40
  gpt_output=gpt_response,
41
+ evo_reasoning=evo_result["reasoning"],
42
  user_preference=user_vote
43
  )
44
 
45
+ # Show genome config
46
+ config = get_model_config()
47
+ config_str = (
48
+ f"**Layers:** {config['num_layers']} \n"
49
+ f"**Heads:** {config['num_heads']} \n"
50
+ f"**FFN Dim:** {config['ffn_dim']} \n"
51
+ f"**Memory:** {'Enabled' if config['memory_enabled'] else 'Disabled'}"
52
+ )
53
 
54
+ # Load latest genome score
55
+ log_txt = "No genome log yet."
56
+ if os.path.exists("genome_log.csv"):
57
+ with open("genome_log.csv", "r", encoding="utf-8") as f:
58
+ lines = f.readlines()
59
+ if len(lines) > 1:
60
+ last = lines[-1].strip().split(",")
61
+ log_txt = f"🧬 Genome ID: {last[0]} | Accuracy: {last[-1]}"
62
+
63
+ return evo_msg, chat_history, config_str, log_txt
64
+
65
+ # πŸ” Clear everything
66
  def clear_fn():
67
  global chat_history
68
  chat_history = []
69
+ return "", "", "", None, [], "", ""
70
 
71
+ # πŸ“ˆ Retrain
72
  def retrain_model():
73
  try:
74
+ subprocess.run(["python", "retrain_from_feedback.py"], check=True)
 
 
 
 
 
 
75
  return "βœ… Evo retrained successfully."
76
+ except Exception as e:
77
+ return f"❌ Retraining failed: {str(e)}"
 
 
78
 
79
+ # ⬇️ Download feedback
80
+ def export_feedback():
81
+ if os.path.exists("feedback_log.csv"):
82
+ return "feedback_log.csv"
83
+ return None
84
 
85
  # 🌐 Gradio UI
86
  with gr.Blocks(title="EvoRAG – Real-Time Adaptive Reasoning AI") as demo:
87
  gr.Markdown("## 🧬 EvoRAG – Real-Time Adaptive Reasoning AI")
88
+ gr.Markdown("Ask Evo a question and give two options. Evo chooses, explains, and evolves. Compare with GPT-3.5.")
89
 
90
  with gr.Row():
91
  with gr.Column(scale=4):
92
  user_input = gr.Textbox(label="Your Question", lines=2)
93
  option1 = gr.Textbox(label="Option 1")
94
  option2 = gr.Textbox(label="Option 2")
95
+ user_vote = gr.Radio(["Evo", "GPT"], label="πŸ—³οΈ Who gave the better answer?", info="Optional – helps Evo learn.")
96
  submit = gr.Button("🧠 Ask Evo")
97
  clear = gr.Button("πŸ” Clear")
98
  retrain = gr.Button("πŸ“ˆ Retrain Evo from Feedback")
99
+ export = gr.Button("⬇️ Export Feedback CSV")
100
 
101
  with gr.Column(scale=6):
102
  evo_reply = gr.Markdown()
103
  chat_display = gr.HighlightedText(label="Conversation History")
104
+ model_info = gr.Markdown(label="🧠 Evo Architecture")
105
+ genome_log = gr.Markdown(label="πŸ“Š Evolution Log")
106
+
107
+ submit.click(chat_fn, inputs=[user_input, option1, option2, user_vote],
108
+ outputs=[evo_reply, chat_display, model_info, genome_log])
109
+ clear.click(clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display, model_info, genome_log])
110
+ retrain.click(retrain_model, outputs=evo_reply)
111
+ export.click(export_feedback, outputs=[])
112
 
113
  demo.launch()