Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import gradio as gr
|
2 |
import os
|
3 |
from inference import get_evo_response, get_gpt_response
|
4 |
from logger import log_feedback
|
@@ -105,4 +105,117 @@ with gr.Blocks(title="EvoRAG") as demo:
|
|
105 |
)
|
106 |
retrain_button.click(fn=trigger_retrain, inputs=[], outputs=[retrain_status])
|
107 |
|
108 |
-
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''import gradio as gr
|
2 |
import os
|
3 |
from inference import get_evo_response, get_gpt_response
|
4 |
from logger import log_feedback
|
|
|
105 |
)
|
106 |
retrain_button.click(fn=trigger_retrain, inputs=[], outputs=[retrain_status])
|
107 |
|
108 |
+
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)'''
|
109 |
+
|
110 |
+
import gradio as gr
|
111 |
+
import torch
|
112 |
+
import time
|
113 |
+
import os
|
114 |
+
from inference import load_model_and_tokenizer, infer
|
115 |
+
from logger import log_feedback
|
116 |
+
from retrain_from_feedback import train_evo
|
117 |
+
from datetime import datetime
|
118 |
+
|
119 |
+
# Globals
|
120 |
+
model, tokenizer = load_model_and_tokenizer("trained_model/evo_retrained.pt")
|
121 |
+
|
122 |
+
# Helper to reload model
|
123 |
+
def reload_model():
|
124 |
+
global model, tokenizer
|
125 |
+
model, tokenizer = load_model_and_tokenizer("trained_model/evo_retrained.pt")
|
126 |
+
|
127 |
+
# Get last update time
|
128 |
+
def get_last_update():
|
129 |
+
if os.path.exists("last_updated.txt"):
|
130 |
+
with open("last_updated.txt") as f:
|
131 |
+
return f.read().strip()
|
132 |
+
return "Never"
|
133 |
+
|
134 |
+
# Handle inference
|
135 |
+
def compare(question, option1, option2, context):
|
136 |
+
evo_ans, evo_score, evo_reason, evo_ctx = infer(model, tokenizer, question, option1, option2, context)
|
137 |
+
gpt_ans = "Coming soon via API"
|
138 |
+
return (
|
139 |
+
f"Answer: {evo_ans} (Confidence: {evo_score:.2f})\n\nReasoning: {evo_reason}\n\nContext used: {evo_ctx}",
|
140 |
+
gpt_ans
|
141 |
+
)
|
142 |
+
|
143 |
+
# Handle feedback
|
144 |
+
def handle_feedback(feedback_text, question, option1, option2, context, evo_output):
|
145 |
+
evo_was_correct = feedback_text.strip().lower() == "π evo was correct. retrain from this."
|
146 |
+
log_feedback(question, option1, option2, context, evo_output, evo_was_correct)
|
147 |
+
return "β
Feedback logged and Evo will improve."
|
148 |
+
|
149 |
+
# Manual retrain
|
150 |
+
def manual_retrain():
|
151 |
+
try:
|
152 |
+
train_evo()
|
153 |
+
reload_model()
|
154 |
+
ts = datetime.utcnow().strftime("%Y-%m-%d %H:%M GMT")
|
155 |
+
with open("last_updated.txt", "w") as f:
|
156 |
+
f.write(ts)
|
157 |
+
return f"β
Evo successfully evolved! Reloaded at {ts}"
|
158 |
+
except Exception as e:
|
159 |
+
return f"β Retraining failed: {str(e)}"
|
160 |
+
|
161 |
+
with gr.Blocks(title="EvoRAG β Adaptive Reasoning AI", theme=gr.themes.Soft()) as demo:
|
162 |
+
gr.Markdown("""
|
163 |
+
# EvoRAG β Adaptive Reasoning AI
|
164 |
+
**What is Evo?**
|
165 |
+
EvoTransformer is a lightweight, evolving neural network with ~28M parameters.
|
166 |
+
It learns from feedback, adapts over time, and reasons using both web and context data.
|
167 |
+
|
168 |
+
**Why Evo?**
|
169 |
+
β
Evolves from human input
|
170 |
+
β
Architecturally updatable
|
171 |
+
β
Transparent and fine-tunable
|
172 |
+
β
Efficient on modest hardware
|
173 |
+
|
174 |
+
**Hardware:** Trained on Google Colab CPU/GPU
|
175 |
+
**Token limit:** 128
|
176 |
+
**Benchmark:** PIQA, HellaSwag, ARC
|
177 |
+
**Version:** Evo v2.2 (Memory + Web Retrieval + Feedback Learning)
|
178 |
+
**π Last Evolution:** {get_last_update()}
|
179 |
+
""")
|
180 |
+
|
181 |
+
with gr.Row():
|
182 |
+
question = gr.Textbox(label="Ask anything", placeholder="e.g. Whatβs the best way to boil water?")
|
183 |
+
|
184 |
+
with gr.Row():
|
185 |
+
option1 = gr.Textbox(label="Option A")
|
186 |
+
option2 = gr.Textbox(label="Option B")
|
187 |
+
|
188 |
+
context = gr.Textbox(label="π Optional Context", lines=2, placeholder="Paste any extra background info here")
|
189 |
+
|
190 |
+
run_btn = gr.Button("π Run Comparison")
|
191 |
+
|
192 |
+
with gr.Row():
|
193 |
+
evo_out = gr.Textbox(label="π§ EvoRAG's Reasoned Answer")
|
194 |
+
gpt_out = gr.Textbox(label="π€ GPT-3.5's Suggestion")
|
195 |
+
|
196 |
+
with gr.Row():
|
197 |
+
feedback_dropdown = gr.Dropdown([
|
198 |
+
"π Evo was correct. Retrain from this.",
|
199 |
+
"π Evo was wrong. Don't retrain."
|
200 |
+
], label="Was Evoβs answer useful?")
|
201 |
+
submit_btn = gr.Button("π¬ Submit Feedback")
|
202 |
+
|
203 |
+
feedback_status = gr.Textbox(label="Feedback Status")
|
204 |
+
|
205 |
+
with gr.Row():
|
206 |
+
retrain_btn = gr.Button("π Retrain Evo Now")
|
207 |
+
retrain_status = gr.Textbox(label="Retraining Status")
|
208 |
+
|
209 |
+
hall = gr.Markdown("""
|
210 |
+
## π Evo Hall of Fame (Top Reasoning Entries)
|
211 |
+
*(Coming soon)*
|
212 |
+
""")
|
213 |
+
|
214 |
+
run_btn.click(fn=compare, inputs=[question, option1, option2, context], outputs=[evo_out, gpt_out])
|
215 |
+
submit_btn.click(fn=lambda fb, q, o1, o2, ctx, eo: handle_feedback(fb, q, o1, o2, ctx, eo),
|
216 |
+
inputs=[feedback_dropdown, question, option1, option2, context, evo_out],
|
217 |
+
outputs=feedback_status)
|
218 |
+
retrain_btn.click(fn=manual_retrain, outputs=retrain_status)
|
219 |
+
|
220 |
+
demo.launch()
|
221 |
+
|