Spaces:
Sleeping
Sleeping
import gradio as gr | |
import os | |
from inference import evo_infer, gpt_infer | |
from retrain_from_feedback import train_evo | |
from logger import log_feedback | |
question = gr.Textbox(label="π§ Your Question", placeholder="e.g. What should you do if there's a fire?", lines=1) | |
option1 = gr.Textbox(label="π °οΈ Option 1", placeholder="Enter the first option") | |
option2 = gr.Textbox(label="π ±οΈ Option 2", placeholder="Enter the second option") | |
choice = gr.Radio(["Evo", "GPT"], label="π³οΈ Who was better?", info="Optional β fuels evolution", type="value") | |
evo_out = gr.Textbox(label="π€ Evo", interactive=False) | |
gpt_out = gr.Textbox(label="π§ GPT-3.5", interactive=False) | |
history = gr.Textbox(label="π Conversation History", interactive=False) | |
evo_stats = gr.Textbox(label="π Evo Stats", value="Layers: 6 | Heads: 8 | FFN: 1024 | Memory: β | Accuracy: ~64.5% | Phase: v2.2", interactive=False) | |
def run_inference(q, o1, o2, winner): | |
evo_answer, evo_reasoning = evo_infer(q, o1, o2) | |
gpt_answer = gpt_infer(q, o1, o2) | |
context = f"Question: {q}\nOptions: {o1}, {o2}" | |
log_feedback(q, o1, o2, context, evo_answer, gpt_answer, evo_reasoning, winner) | |
conv_log = f"π€ {q}\nπ °οΈ {o1} | π ±οΈ {o2}\nπ€ Evo: {evo_answer} ({evo_reasoning})\nπ§ GPT: {gpt_answer}" | |
return evo_answer, gpt_answer, conv_log | |
def clear(): | |
return "", "", "", None, "", "", "" | |
def export_csv(): | |
return gr.File("feedback_log.csv") | |
def retrain(): | |
train_evo() | |
return "π Evo model reloaded." | |
with gr.Blocks(theme=gr.themes.Soft(), css=""" | |
body { background-color: #f3f6fb; font-family: 'Segoe UI', sans-serif; } | |
.gradio-container { max-width: 1024px; margin: auto; } | |
.gr-box { box-shadow: 0 4px 16px rgba(0,0,0,0.1); border-radius: 12px; padding: 16px; transition: all 0.3s ease-in-out; } | |
.gr-button { border-radius: 8px; font-weight: 600; transition: all 0.2s ease-in-out; } | |
.gr-button:hover { transform: scale(1.03); background-color: #e6f2ff; } | |
.gr-textbox, .gr-radio { border-radius: 8px; } | |
""") as demo: | |
gr.Markdown(""" | |
<h1 style="font-size: 2.2em;">π§ EvoRAG β Real-Time Reasoning AI</h1> | |
<p><b>Built Different. Learns Live. Evolves from You.</b></p> | |
<div style="margin-top: 10px; font-size: 0.9em;"> | |
<ul> | |
<li>π <b>Why Evo?</b></li> | |
<li>βοΈ Learns from your input β evolves in real time</li> | |
<li>βοΈ Adaptive architecture (changes #layers, memory, etc.)</li> | |
<li>βοΈ Tiny model (~13Mβ28M params) vs GPT-3.5 (175B)</li> | |
<li>βοΈ Runs on CPU or low-end GPUs</li> | |
<li>βοΈ Transparent architecture: shows how it thinks</li> | |
<li>βοΈ Can be deployed, fine-tuned, and evolved per user/domain</li> | |
</ul> | |
</div> | |
""") | |
with gr.Row(): | |
with gr.Column(): | |
question.render() | |
option1.render() | |
option2.render() | |
choice.render() | |
with gr.Column(): | |
ask_btn = gr.Button("π‘ Ask Evo", variant="primary") | |
retrain_btn = gr.Button("π Retrain Evo") | |
clear_btn = gr.Button("π§Ή Clear") | |
export_btn = gr.Button("β¬οΈ Export Feedback CSV") | |
evo_stats.render() | |
with gr.Row(): | |
evo_out.render() | |
gpt_out.render() | |
with gr.Row(): | |
retrain_status = gr.Textbox(label="π Status", interactive=False) | |
with gr.Accordion("π Conversation History", open=True): | |
history.render() | |
ask_btn.click(fn=run_inference, inputs=[question, option1, option2, choice], outputs=[evo_out, gpt_out, history]) | |
clear_btn.click(fn=clear, outputs=[question, option1, option2, choice, evo_out, gpt_out, history]) | |
export_btn.click(fn=export_csv, outputs=[]) | |
retrain_btn.click(fn=retrain, outputs=[retrain_status]) | |
if __name__ == "__main__": | |
demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860))) | |