Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -109,43 +109,58 @@ demo.launch(server_name="0.0.0.0", server_port=7860, share=True)'''
|
|
| 109 |
|
| 110 |
import gradio as gr
|
| 111 |
import torch
|
|
|
|
| 112 |
import os
|
| 113 |
from inference import load_model_and_tokenizer, infer
|
| 114 |
from logger import log_feedback
|
| 115 |
from retrain_from_feedback import train_evo
|
| 116 |
from datetime import datetime
|
| 117 |
|
| 118 |
-
#
|
| 119 |
-
model, tokenizer = load_model_and_tokenizer(
|
| 120 |
|
| 121 |
-
#
|
| 122 |
def reload_model():
|
| 123 |
global model, tokenizer
|
| 124 |
-
model, tokenizer = load_model_and_tokenizer(
|
| 125 |
|
| 126 |
-
# Get last
|
| 127 |
def get_last_update():
|
| 128 |
if os.path.exists("last_updated.txt"):
|
| 129 |
with open("last_updated.txt") as f:
|
| 130 |
return f.read().strip()
|
| 131 |
return "Never"
|
| 132 |
|
| 133 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 134 |
def compare(question, option1, option2, context):
|
| 135 |
-
evo_ans, evo_score, evo_reason, evo_ctx = infer(
|
| 136 |
gpt_ans = "Coming soon via API"
|
| 137 |
return (
|
| 138 |
f"Answer: {evo_ans} (Confidence: {evo_score:.2f})\n\nReasoning: {evo_reason}\n\nContext used: {evo_ctx}",
|
| 139 |
gpt_ans
|
| 140 |
)
|
| 141 |
|
| 142 |
-
#
|
| 143 |
def handle_feedback(feedback_text, question, option1, option2, context, evo_output):
|
| 144 |
evo_was_correct = feedback_text.strip().lower() == "π evo was correct. retrain from this."
|
| 145 |
log_feedback(question, option1, option2, context, evo_output, evo_was_correct)
|
| 146 |
return "β
Feedback logged and Evo will improve."
|
| 147 |
|
| 148 |
-
#
|
| 149 |
def manual_retrain():
|
| 150 |
try:
|
| 151 |
train_evo()
|
|
@@ -153,13 +168,12 @@ def manual_retrain():
|
|
| 153 |
ts = datetime.utcnow().strftime("%Y-%m-%d %H:%M GMT")
|
| 154 |
with open("last_updated.txt", "w") as f:
|
| 155 |
f.write(ts)
|
| 156 |
-
return f"β
Evo successfully evolved
|
| 157 |
except Exception as e:
|
| 158 |
-
return f"β Retraining failed: {str(e)}"
|
| 159 |
|
| 160 |
-
# Gradio UI
|
| 161 |
with gr.Blocks(title="EvoRAG β Adaptive Reasoning AI", theme=gr.themes.Soft()) as demo:
|
| 162 |
-
gr.Markdown("""
|
| 163 |
# EvoRAG β Adaptive Reasoning AI
|
| 164 |
**What is Evo?**
|
| 165 |
EvoTransformer is a lightweight, evolving neural network with ~28M parameters.
|
|
@@ -175,6 +189,12 @@ with gr.Blocks(title="EvoRAG β Adaptive Reasoning AI", theme=gr.themes.Soft())
|
|
| 175 |
**Token limit:** 128
|
| 176 |
**Benchmark:** PIQA, HellaSwag, ARC
|
| 177 |
**Version:** Evo v2.2 (Memory + Web Retrieval + Feedback Learning)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 178 |
""")
|
| 179 |
|
| 180 |
with gr.Row():
|
|
@@ -205,16 +225,15 @@ with gr.Blocks(title="EvoRAG β Adaptive Reasoning AI", theme=gr.themes.Soft())
|
|
| 205 |
retrain_btn = gr.Button("π Retrain Evo Now")
|
| 206 |
retrain_status = gr.Textbox(label="Retraining Status")
|
| 207 |
|
| 208 |
-
last_update = gr.Textbox(label="π Last Evolution Timestamp", value=get_last_update(), interactive=False)
|
| 209 |
-
|
| 210 |
hall = gr.Markdown("""
|
| 211 |
## π Evo Hall of Fame (Top Reasoning Entries)
|
| 212 |
*(Coming soon)*
|
| 213 |
""")
|
| 214 |
|
| 215 |
run_btn.click(fn=compare, inputs=[question, option1, option2, context], outputs=[evo_out, gpt_out])
|
| 216 |
-
submit_btn.click(fn=
|
| 217 |
-
|
|
|
|
|
|
|
| 218 |
|
| 219 |
demo.launch()
|
| 220 |
-
|
|
|
|
| 109 |
|
| 110 |
import gradio as gr
|
| 111 |
import torch
|
| 112 |
+
import time
|
| 113 |
import os
|
| 114 |
from inference import load_model_and_tokenizer, infer
|
| 115 |
from logger import log_feedback
|
| 116 |
from retrain_from_feedback import train_evo
|
| 117 |
from datetime import datetime
|
| 118 |
|
| 119 |
+
# Globals
|
| 120 |
+
model, tokenizer = load_model_and_tokenizer()
|
| 121 |
|
| 122 |
+
# Helper to reload model
|
| 123 |
def reload_model():
|
| 124 |
global model, tokenizer
|
| 125 |
+
model, tokenizer = load_model_and_tokenizer()
|
| 126 |
|
| 127 |
+
# Get last update time
|
| 128 |
def get_last_update():
|
| 129 |
if os.path.exists("last_updated.txt"):
|
| 130 |
with open("last_updated.txt") as f:
|
| 131 |
return f.read().strip()
|
| 132 |
return "Never"
|
| 133 |
|
| 134 |
+
# Summarize Evo architecture
|
| 135 |
+
def get_model_summary():
|
| 136 |
+
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 137 |
+
summary = f"""
|
| 138 |
+
β’ π’ Parameters: {num_params:,}
|
| 139 |
+
β’ π§± Layers: 6 TransformerEncoder
|
| 140 |
+
β’ π― Attention Heads: 8
|
| 141 |
+
β’ π§ FFN Dim: 1024
|
| 142 |
+
⒠𧬠Memory Module: Enabled
|
| 143 |
+
β’ βοΈ Pooling: AdaptiveAvgPool1d
|
| 144 |
+
β’ π§Ύ Classifier: Linear(512 β 1)
|
| 145 |
+
"""
|
| 146 |
+
return summary.strip()
|
| 147 |
+
|
| 148 |
+
# Handle inference
|
| 149 |
def compare(question, option1, option2, context):
|
| 150 |
+
evo_ans, evo_score, evo_reason, evo_ctx = infer(question, [option1, option2], context)
|
| 151 |
gpt_ans = "Coming soon via API"
|
| 152 |
return (
|
| 153 |
f"Answer: {evo_ans} (Confidence: {evo_score:.2f})\n\nReasoning: {evo_reason}\n\nContext used: {evo_ctx}",
|
| 154 |
gpt_ans
|
| 155 |
)
|
| 156 |
|
| 157 |
+
# Handle feedback
|
| 158 |
def handle_feedback(feedback_text, question, option1, option2, context, evo_output):
|
| 159 |
evo_was_correct = feedback_text.strip().lower() == "π evo was correct. retrain from this."
|
| 160 |
log_feedback(question, option1, option2, context, evo_output, evo_was_correct)
|
| 161 |
return "β
Feedback logged and Evo will improve."
|
| 162 |
|
| 163 |
+
# Manual retrain
|
| 164 |
def manual_retrain():
|
| 165 |
try:
|
| 166 |
train_evo()
|
|
|
|
| 168 |
ts = datetime.utcnow().strftime("%Y-%m-%d %H:%M GMT")
|
| 169 |
with open("last_updated.txt", "w") as f:
|
| 170 |
f.write(ts)
|
| 171 |
+
return f"β
Evo successfully evolved! Reloaded at {ts}"
|
| 172 |
except Exception as e:
|
| 173 |
+
return f"β Retraining failed: {str(e)}"
|
| 174 |
|
|
|
|
| 175 |
with gr.Blocks(title="EvoRAG β Adaptive Reasoning AI", theme=gr.themes.Soft()) as demo:
|
| 176 |
+
gr.Markdown(f"""
|
| 177 |
# EvoRAG β Adaptive Reasoning AI
|
| 178 |
**What is Evo?**
|
| 179 |
EvoTransformer is a lightweight, evolving neural network with ~28M parameters.
|
|
|
|
| 189 |
**Token limit:** 128
|
| 190 |
**Benchmark:** PIQA, HellaSwag, ARC
|
| 191 |
**Version:** Evo v2.2 (Memory + Web Retrieval + Feedback Learning)
|
| 192 |
+
**π Last Evolution:** {get_last_update()}
|
| 193 |
+
""")
|
| 194 |
+
|
| 195 |
+
gr.Markdown(f"""
|
| 196 |
+
## π§ EvoTransformer Architecture Summary
|
| 197 |
+
{get_model_summary()}
|
| 198 |
""")
|
| 199 |
|
| 200 |
with gr.Row():
|
|
|
|
| 225 |
retrain_btn = gr.Button("π Retrain Evo Now")
|
| 226 |
retrain_status = gr.Textbox(label="Retraining Status")
|
| 227 |
|
|
|
|
|
|
|
| 228 |
hall = gr.Markdown("""
|
| 229 |
## π Evo Hall of Fame (Top Reasoning Entries)
|
| 230 |
*(Coming soon)*
|
| 231 |
""")
|
| 232 |
|
| 233 |
run_btn.click(fn=compare, inputs=[question, option1, option2, context], outputs=[evo_out, gpt_out])
|
| 234 |
+
submit_btn.click(fn=lambda fb, q, o1, o2, ctx, eo: handle_feedback(fb, q, o1, o2, ctx, eo),
|
| 235 |
+
inputs=[feedback_dropdown, question, option1, option2, context, evo_out],
|
| 236 |
+
outputs=feedback_status)
|
| 237 |
+
retrain_btn.click(fn=manual_retrain, outputs=retrain_status)
|
| 238 |
|
| 239 |
demo.launch()
|
|
|