Spaces:
Sleeping
Sleeping
File size: 3,954 Bytes
1622676 d9a17fb ec9b863 5340d71 ec9b863 3eaea0f ec9b863 d9a17fb ec9b863 3eaea0f d9a17fb ec9b863 b0ba5ba ec9b863 3eaea0f ec9b863 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
import gradio as gr
import os
from inference import evo_infer, gpt_infer
from retrain_from_feedback import train_evo
from logger import log_feedback
question = gr.Textbox(label="π§ Your Question", placeholder="e.g. What should you do if there's a fire?", lines=1)
option1 = gr.Textbox(label="π
°οΈ Option 1", placeholder="Enter the first option")
option2 = gr.Textbox(label="π
±οΈ Option 2", placeholder="Enter the second option")
choice = gr.Radio(["Evo", "GPT"], label="π³οΈ Who was better?", info="Optional β fuels evolution", type="value")
evo_out = gr.Textbox(label="π€ Evo", interactive=False)
gpt_out = gr.Textbox(label="π§ GPT-3.5", interactive=False)
history = gr.Textbox(label="π Conversation History", interactive=False)
evo_stats = gr.Textbox(label="π Evo Stats", value="Layers: 6 | Heads: 8 | FFN: 1024 | Memory: β
| Accuracy: ~64.5% | Phase: v2.2", interactive=False)
def run_inference(q, o1, o2, winner):
evo_answer, evo_reasoning = evo_infer(q, o1, o2)
gpt_answer = gpt_infer(q, o1, o2)
context = f"Question: {q}\nOptions: {o1}, {o2}"
log_feedback(q, o1, o2, context, evo_answer, gpt_answer, evo_reasoning, winner)
conv_log = f"π€ {q}\nπ
°οΈ {o1} | π
±οΈ {o2}\nπ€ Evo: {evo_answer} ({evo_reasoning})\nπ§ GPT: {gpt_answer}"
return evo_answer, gpt_answer, conv_log
def clear():
return "", "", "", None, "", "", ""
def export_csv():
return gr.File("feedback_log.csv")
def retrain():
train_evo()
return "π Evo model reloaded."
with gr.Blocks(theme=gr.themes.Soft(), css="""
body { background-color: #f3f6fb; font-family: 'Segoe UI', sans-serif; }
.gradio-container { max-width: 1024px; margin: auto; }
.gr-box { box-shadow: 0 4px 16px rgba(0,0,0,0.1); border-radius: 12px; padding: 16px; transition: all 0.3s ease-in-out; }
.gr-button { border-radius: 8px; font-weight: 600; transition: all 0.2s ease-in-out; }
.gr-button:hover { transform: scale(1.03); background-color: #e6f2ff; }
.gr-textbox, .gr-radio { border-radius: 8px; }
""") as demo:
gr.Markdown("""
<h1 style="font-size: 2.2em;">π§ EvoRAG β Real-Time Reasoning AI</h1>
<p><b>Built Different. Learns Live. Evolves from You.</b></p>
<div style="margin-top: 10px; font-size: 0.9em;">
<ul>
<li>π <b>Why Evo?</b></li>
<li>βοΈ Learns from your input β evolves in real time</li>
<li>βοΈ Adaptive architecture (changes #layers, memory, etc.)</li>
<li>βοΈ Tiny model (~13Mβ28M params) vs GPT-3.5 (175B)</li>
<li>βοΈ Runs on CPU or low-end GPUs</li>
<li>βοΈ Transparent architecture: shows how it thinks</li>
<li>βοΈ Can be deployed, fine-tuned, and evolved per user/domain</li>
</ul>
</div>
""")
with gr.Row():
with gr.Column():
question.render()
option1.render()
option2.render()
choice.render()
with gr.Column():
ask_btn = gr.Button("π‘ Ask Evo", variant="primary")
retrain_btn = gr.Button("π Retrain Evo")
clear_btn = gr.Button("π§Ή Clear")
export_btn = gr.Button("β¬οΈ Export Feedback CSV")
evo_stats.render()
with gr.Row():
evo_out.render()
gpt_out.render()
with gr.Row():
retrain_status = gr.Textbox(label="π Status", interactive=False)
with gr.Accordion("π Conversation History", open=True):
history.render()
ask_btn.click(fn=run_inference, inputs=[question, option1, option2, choice], outputs=[evo_out, gpt_out, history])
clear_btn.click(fn=clear, outputs=[question, option1, option2, choice, evo_out, gpt_out, history])
export_btn.click(fn=export_csv, outputs=[])
retrain_btn.click(fn=retrain, outputs=[retrain_status])
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))
|