HemanM commited on
Commit
1622676
Β·
verified Β·
1 Parent(s): e54d95b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -221
app.py CHANGED
@@ -1,240 +1,63 @@
1
- '''import gradio as gr
2
- import os
3
- from inference import get_evo_response, get_gpt_response
4
- from logger import log_feedback
5
- import csv
6
  import subprocess
7
 
8
- # Helper to load Hall of Fame
9
- def load_hall_of_fame():
10
- entries = []
11
- if os.path.exists("feedback_log.csv"):
12
- with open("feedback_log.csv", newline='', encoding='utf-8') as f:
13
- reader = csv.DictReader(f)
14
- for row in reader:
15
- try:
16
- score = float(row.get("evo_was_correct", "0") == "yes")
17
- if "πŸ‘" in row.get("feedback", "") or score > 0.85:
18
- entries.append(row)
19
- except:
20
- continue
21
- return entries[-10:][::-1] # last 10, reverse order
22
-
23
- def handle_query(question, option1, option2, context):
24
- options = [option1, option2]
25
- evo_answer, evo_reasoning, evo_score, evo_context = get_evo_response(question, options, context)
26
- gpt_answer = get_gpt_response(question, context)
27
- return (
28
- f"Answer: {evo_answer} (Confidence: {evo_score:.2f})\n\nReasoning: {evo_reasoning}\n\nContext used: {evo_context[:400]}...",
29
- gpt_answer,
30
- f"{question} | {context} | {evo_answer}"
31
- )
32
-
33
- def handle_feedback(feedback_text, question, option1, option2, context, evo_output):
34
- evo_was_correct = "πŸ‘" in feedback_text
35
- log_feedback(question, option1, option2, context, evo_output, evo_was_correct)
36
- return "βœ… Feedback logged and Evo will improve."
37
-
38
- def trigger_retrain():
39
- try:
40
- subprocess.run(["python", "retrain_from_feedback.py"], check=True)
41
- return "πŸ” Evo retraining completed."
42
- except subprocess.CalledProcessError:
43
- return "❌ Retraining failed. Check logs."
44
-
45
- def render_hof():
46
- entries = load_hall_of_fame()
47
- if not entries:
48
- return "No Hall of Fame entries yet. Submit feedback!"
49
- result = "\n\n".join(
50
- [
51
- f"πŸ† **Q:** {e['question']}\n**A:** {e['evo_output']}\n**Feedback:** {e.get('feedback', 'N/A')}\n**Context:** {e['context'][:200]}..."
52
- for e in entries
53
- ]
54
- )
55
- return result
56
-
57
- description = """
58
- # 🧠 EvoRAG – Adaptive Reasoning AI
59
-
60
- **What is Evo?**
61
- EvoTransformer is a lightweight, evolving neural network with ~28M parameters.
62
- It learns from feedback, adapts over time, and reasons using both web and context data.
63
-
64
- **Why Evo?**
65
- βœ… Evolves from human input
66
- βœ… Architecturally updatable
67
- βœ… Transparent and fine-tunable
68
- βœ… Efficient on modest hardware
69
-
70
- **Hardware**: Trained on Google Colab CPU/GPU
71
- **Token limit**: 128
72
- **Benchmark**: PIQA, HellaSwag, ARC
73
- **Version**: Evo v2.2 (Memory + Web Retrieval + Feedback Learning)
74
- """
75
-
76
- with gr.Blocks(title="EvoRAG") as demo:
77
- gr.Markdown(description)
78
- with gr.Row():
79
- question = gr.Textbox(label="πŸ“ Ask anything", placeholder="e.g., What’s the best way to escape a house fire?")
80
- with gr.Row():
81
- option1 = gr.Textbox(label="Option A", placeholder="e.g., Run outside")
82
- option2 = gr.Textbox(label="Option B", placeholder="e.g., Hide under bed")
83
- context = gr.Textbox(label="πŸ“‚ Optional Context", placeholder="Paste any extra background info here", lines=3)
84
-
85
- submit_btn = gr.Button("πŸ” Run Comparison")
86
- with gr.Row():
87
- evo_output = gr.Textbox(label="🧠 EvoRAG's Reasoned Answer", lines=6)
88
- gpt_output = gr.Textbox(label="πŸ€– GPT-3.5's Suggestion", lines=6)
89
-
90
- feedback = gr.Radio(["πŸ‘ Evo was correct. Retrain from this.", "πŸ‘Ž Evo was wrong. Don't retrain."], label="Was Evo’s answer useful?", value=None)
91
- submit_feedback = gr.Button("πŸ“¬ Submit Feedback")
92
- feedback_status = gr.Textbox(label="Feedback Status", interactive=False)
93
-
94
- retrain_button = gr.Button("πŸ”„ Retrain Evo Now")
95
- retrain_status = gr.Textbox(label="Retraining Status", interactive=False)
96
-
97
- with gr.Accordion("πŸ† Evo Hall of Fame (Top Reasoning Entries)", open=False):
98
- hof_display = gr.Markdown(render_hof())
99
 
100
- submit_btn.click(fn=handle_query, inputs=[question, option1, option2, context], outputs=[evo_output, gpt_output, feedback_status])
101
- submit_feedback.click(
102
- fn=lambda fb, q, o1, o2, ctx, eo: handle_feedback(fb, q, o1, o2, ctx, eo),
103
- inputs=[feedback, question, option1, option2, context, feedback_status],
104
- outputs=[feedback_status]
105
- )
106
- retrain_button.click(fn=trigger_retrain, inputs=[], outputs=[retrain_status])
107
 
108
- demo.launch(server_name="0.0.0.0", server_port=7860, share=True)'''
 
 
109
 
110
- import gradio as gr
111
- import torch
112
- import time
113
- import os
114
- from inference import load_model_and_tokenizer, infer
115
- from logger import log_feedback
116
- from retrain_from_feedback import train_evo
117
- from datetime import datetime
118
- from inference import get_gpt_response
119
-
120
- # Globals
121
- model, tokenizer = load_model_and_tokenizer()
122
-
123
- # Helper to reload model
124
- def reload_model():
125
- global model, tokenizer
126
- model, tokenizer = load_model_and_tokenizer()
127
 
128
- # Get last update time
129
- def get_last_update():
130
- if os.path.exists("last_updated.txt"):
131
- with open("last_updated.txt") as f:
132
- return f.read().strip()
133
- return "Never"
134
 
135
- # Summarize Evo architecture
136
- def get_model_summary():
137
- num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
138
- summary = f"""
139
- β€’ πŸ”’ Parameters: {num_params:,}
140
- β€’ 🧱 Layers: 6 TransformerEncoder
141
- β€’ 🎯 Attention Heads: 8
142
- β€’ 🧠 FFN Dim: 1024
143
- β€’ 🧬 Memory Module: Enabled
144
- β€’ βš™οΈ Pooling: AdaptiveAvgPool1d
145
- β€’ 🧾 Classifier: Linear(512 β†’ 1)
146
- """
147
- return summary.strip()
148
 
149
- # Handle inference
150
- def compare(question, option1, option2, context):
151
- evo_ans, evo_score, evo_reason, evo_ctx = infer(question, [option1, option2], context)
152
- gpt_ans = get_gpt_response(question, context)
153
- return (
154
- f"Answer: {evo_ans} (Confidence: {evo_score:.2f})\n\nReasoning: {evo_reason}\n\nContext used: {evo_ctx}",
155
- gpt_ans
156
- )
157
 
158
- # Handle feedback
159
- def handle_feedback(feedback_text, question, option1, option2, context, evo_output):
160
- evo_was_correct = feedback_text.strip().lower() == "πŸ‘ evo was correct. retrain from this."
161
- log_feedback(question, option1, option2, context, evo_output, evo_was_correct)
162
- return "βœ… Feedback logged and Evo will improve."
163
-
164
- # Manual retrain
165
- def manual_retrain():
166
  try:
167
- train_evo()
168
- reload_model()
169
- ts = datetime.utcnow().strftime("%Y-%m-%d %H:%M GMT")
170
- with open("last_updated.txt", "w") as f:
171
- f.write(ts)
172
- return f"βœ… Evo successfully evolved! Reloaded at {ts}"
173
  except Exception as e:
174
  return f"❌ Retraining failed: {str(e)}"
175
 
176
- with gr.Blocks(title="EvoRAG – Adaptive Reasoning AI", theme=gr.themes.Soft()) as demo:
177
- gr.Markdown(f"""
178
- # EvoRAG – Adaptive Reasoning AI
179
- **What is Evo?**
180
- EvoTransformer is a lightweight, evolving neural network with ~28M parameters.
181
- It learns from feedback, adapts over time, and reasons using both web and context data.
182
-
183
- **Why Evo?**
184
- βœ… Evolves from human input
185
- βœ… Architecturally updatable
186
- βœ… Transparent and fine-tunable
187
- βœ… Efficient on modest hardware
188
-
189
- **Hardware:** Trained on Google Colab CPU/GPU
190
- **Token limit:** 128
191
- **Benchmark:** PIQA, HellaSwag, ARC
192
- **Version:** Evo v2.2 (Memory + Web Retrieval + Feedback Learning)
193
- **πŸ•’ Last Evolution:** {get_last_update()}
194
- """)
195
-
196
- gr.Markdown(f"""
197
- ## 🧠 EvoTransformer Architecture Summary
198
- {get_model_summary()}
199
- """)
200
-
201
- with gr.Row():
202
- question = gr.Textbox(label="Ask anything", placeholder="e.g. What’s the best way to boil water?")
203
-
204
- with gr.Row():
205
- option1 = gr.Textbox(label="Option A")
206
- option2 = gr.Textbox(label="Option B")
207
-
208
- context = gr.Textbox(label="πŸ“‚ Optional Context", lines=2, placeholder="Paste any extra background info here")
209
-
210
- run_btn = gr.Button("πŸ” Run Comparison")
211
-
212
- with gr.Row():
213
- evo_out = gr.Textbox(label="🧠 EvoRAG's Reasoned Answer")
214
- gpt_out = gr.Textbox(label="πŸ€– GPT-3.5's Suggestion")
215
-
216
- with gr.Row():
217
- feedback_dropdown = gr.Dropdown([
218
- "πŸ‘ Evo was correct. Retrain from this.",
219
- "πŸ‘Ž Evo was wrong. Don't retrain."
220
- ], label="Was Evo’s answer useful?")
221
- submit_btn = gr.Button("πŸ“¬ Submit Feedback")
222
-
223
- feedback_status = gr.Textbox(label="Feedback Status")
224
 
225
  with gr.Row():
226
- retrain_btn = gr.Button("πŸ”„ Retrain Evo Now")
227
- retrain_status = gr.Textbox(label="Retraining Status")
 
 
 
 
 
228
 
229
- hall = gr.Markdown("""
230
- ## πŸ† Evo Hall of Fame (Top Reasoning Entries)
231
- *(Coming soon)*
232
- """)
233
 
234
- run_btn.click(fn=compare, inputs=[question, option1, option2, context], outputs=[evo_out, gpt_out])
235
- submit_btn.click(fn=lambda fb, q, o1, o2, ctx, eo: handle_feedback(fb, q, o1, o2, ctx, eo),
236
- inputs=[feedback_dropdown, question, option1, option2, context, evo_out],
237
- outputs=feedback_status)
238
- retrain_btn.click(fn=manual_retrain, outputs=retrain_status)
239
 
240
  demo.launch()
 
1
+ import gradio as gr
2
+ from inference import evo_chat_predict
 
 
 
3
  import subprocess
4
 
5
+ # Global chat history buffer
6
+ chat_history = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ # 🧠 Main chat handler
9
+ def chat_fn(user_input, option1, option2):
10
+ global chat_history
 
 
 
 
11
 
12
+ # Validate input
13
+ if not user_input or not option1 or not option2:
14
+ return "Please enter a message and both options.", chat_history
15
 
16
+ options = [option1.strip(), option2.strip()]
17
+ result = evo_chat_predict(chat_history, user_input, options)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
+ # Format Evo reply
20
+ evo_response = f"**Answer:** {result['answer']} \n**Reasoning:** {result['reasoning']}"
21
+ chat_history.append(f"User: {user_input}")
22
+ chat_history.append(f"Evo: {evo_response}")
 
 
23
 
24
+ return evo_response, chat_history
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
+ # πŸ” Reset chat history
27
+ def clear_fn():
28
+ global chat_history
29
+ chat_history = []
30
+ return "", "", "", []
 
 
 
31
 
32
+ # πŸ” Trigger Evo retraining
33
+ def retrain_model():
 
 
 
 
 
 
34
  try:
35
+ subprocess.run(["python", "retrain_from_feedback.py"], check=True)
36
+ return "βœ… Evo retrained successfully."
 
 
 
 
37
  except Exception as e:
38
  return f"❌ Retraining failed: {str(e)}"
39
 
40
+ # 🧠 Gradio UI layout
41
+ with gr.Blocks(title="EvoRAG – Real-Time Adaptive Reasoning AI") as demo:
42
+ gr.Markdown("## 🧬 EvoRAG – The Evolving Reasoning AI")
43
+ gr.Markdown("Ask a question, give two options, and Evo will decide with confidence. Then, retrain it live.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
  with gr.Row():
46
+ with gr.Column(scale=4):
47
+ user_input = gr.Textbox(label="Your Question", lines=2)
48
+ option1 = gr.Textbox(label="Option 1")
49
+ option2 = gr.Textbox(label="Option 2")
50
+ submit = gr.Button("🧠 Ask Evo")
51
+ clear = gr.Button("πŸ” Clear")
52
+ retrain = gr.Button("πŸ“ˆ Retrain Evo from Feedback")
53
 
54
+ with gr.Column(scale=6):
55
+ evo_reply = gr.Markdown()
56
+ chat_display = gr.HighlightedText(label="Conversation History")
 
57
 
58
+ submit.click(fn=chat_fn, inputs=[user_input, option1, option2],
59
+ outputs=[evo_reply, chat_display])
60
+ clear.click(fn=clear_fn, inputs=[], outputs=[user_input, option1, option2, chat_display])
61
+ retrain.click(fn=retrain_model, inputs=[], outputs=evo_reply)
 
62
 
63
  demo.launch()