EvoPlatformV3 / app.py
HemanM's picture
Update app.py
ec9b863 verified
raw
history blame
3.95 kB
import gradio as gr
import os
from inference import evo_infer, gpt_infer
from retrain_from_feedback import train_evo
from logger import log_feedback
question = gr.Textbox(label="🧠 Your Question", placeholder="e.g. What should you do if there's a fire?", lines=1)
option1 = gr.Textbox(label="πŸ…°οΈ Option 1", placeholder="Enter the first option")
option2 = gr.Textbox(label="πŸ…±οΈ Option 2", placeholder="Enter the second option")
choice = gr.Radio(["Evo", "GPT"], label="πŸ—³οΈ Who was better?", info="Optional – fuels evolution", type="value")
evo_out = gr.Textbox(label="πŸ€– Evo", interactive=False)
gpt_out = gr.Textbox(label="🧠 GPT-3.5", interactive=False)
history = gr.Textbox(label="πŸ“œ Conversation History", interactive=False)
evo_stats = gr.Textbox(label="πŸ“Š Evo Stats", value="Layers: 6 | Heads: 8 | FFN: 1024 | Memory: βœ… | Accuracy: ~64.5% | Phase: v2.2", interactive=False)
def run_inference(q, o1, o2, winner):
evo_answer, evo_reasoning = evo_infer(q, o1, o2)
gpt_answer = gpt_infer(q, o1, o2)
context = f"Question: {q}\nOptions: {o1}, {o2}"
log_feedback(q, o1, o2, context, evo_answer, gpt_answer, evo_reasoning, winner)
conv_log = f"πŸ‘€ {q}\nπŸ…°οΈ {o1} | πŸ…±οΈ {o2}\nπŸ€– Evo: {evo_answer} ({evo_reasoning})\n🧠 GPT: {gpt_answer}"
return evo_answer, gpt_answer, conv_log
def clear():
return "", "", "", None, "", "", ""
def export_csv():
return gr.File("feedback_log.csv")
def retrain():
train_evo()
return "πŸ” Evo model reloaded."
with gr.Blocks(theme=gr.themes.Soft(), css="""
body { background-color: #f3f6fb; font-family: 'Segoe UI', sans-serif; }
.gradio-container { max-width: 1024px; margin: auto; }
.gr-box { box-shadow: 0 4px 16px rgba(0,0,0,0.1); border-radius: 12px; padding: 16px; transition: all 0.3s ease-in-out; }
.gr-button { border-radius: 8px; font-weight: 600; transition: all 0.2s ease-in-out; }
.gr-button:hover { transform: scale(1.03); background-color: #e6f2ff; }
.gr-textbox, .gr-radio { border-radius: 8px; }
""") as demo:
gr.Markdown("""
<h1 style="font-size: 2.2em;">🧠 EvoRAG – Real-Time Reasoning AI</h1>
<p><b>Built Different. Learns Live. Evolves from You.</b></p>
<div style="margin-top: 10px; font-size: 0.9em;">
<ul>
<li>πŸš€ <b>Why Evo?</b></li>
<li>βœ”οΈ Learns from your input β€” evolves in real time</li>
<li>βœ”οΈ Adaptive architecture (changes #layers, memory, etc.)</li>
<li>βœ”οΈ Tiny model (~13M–28M params) vs GPT-3.5 (175B)</li>
<li>βœ”οΈ Runs on CPU or low-end GPUs</li>
<li>βœ”οΈ Transparent architecture: shows how it thinks</li>
<li>βœ”οΈ Can be deployed, fine-tuned, and evolved per user/domain</li>
</ul>
</div>
""")
with gr.Row():
with gr.Column():
question.render()
option1.render()
option2.render()
choice.render()
with gr.Column():
ask_btn = gr.Button("πŸ’‘ Ask Evo", variant="primary")
retrain_btn = gr.Button("πŸ” Retrain Evo")
clear_btn = gr.Button("🧹 Clear")
export_btn = gr.Button("⬇️ Export Feedback CSV")
evo_stats.render()
with gr.Row():
evo_out.render()
gpt_out.render()
with gr.Row():
retrain_status = gr.Textbox(label="πŸ” Status", interactive=False)
with gr.Accordion("πŸ“š Conversation History", open=True):
history.render()
ask_btn.click(fn=run_inference, inputs=[question, option1, option2, choice], outputs=[evo_out, gpt_out, history])
clear_btn.click(fn=clear, outputs=[question, option1, option2, choice, evo_out, gpt_out, history])
export_btn.click(fn=export_csv, outputs=[])
retrain_btn.click(fn=retrain, outputs=[retrain_status])
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))