EvoPlatformV3 / app.py
HemanM's picture
Update app.py
b0ba5ba verified
raw
history blame
4.07 kB
import gradio as gr
from inference import evo_chat_predict, get_gpt_response, get_model_config
from logger import log_feedback
import subprocess
import os
chat_history = []
# πŸ” Handle chat logic
def chat_fn(user_input, option1, option2, user_vote=None):
global chat_history
if not user_input or not option1 or not option2:
return "❗ Please provide a question and two options.", chat_history, "", ""
options = [option1.strip(), option2.strip()]
evo_result = evo_chat_predict(chat_history, user_input, options)
gpt_response = get_gpt_response(user_input)
evo_msg = (
f"### πŸ€– Evo\n"
f"**Answer:** {evo_result['answer']} \n"
f"**Reasoning:** {evo_result['reasoning']}\n\n"
f"---\n"
f"### 🧠 GPT-3.5\n"
f"{gpt_response}"
)
chat_history.append(f"πŸ‘€ User: {user_input}")
chat_history.append(f"πŸ€– Evo: {evo_result['answer']}")
chat_history.append(f"🧠 GPT: {gpt_response}")
# Log feedback
log_feedback(
question=user_input,
option1=option1,
option2=option2,
context=evo_result["context_used"],
evo_output=evo_result["answer"],
gpt_output=gpt_response,
evo_reasoning=evo_result["reasoning"],
user_preference=user_vote
)
# Show genome config
config = get_model_config()
config_str = (
f"**Layers:** {config['num_layers']} \n"
f"**Heads:** {config['num_heads']} \n"
f"**FFN Dim:** {config['ffn_dim']} \n"
f"**Memory:** {'Enabled' if config['memory_enabled'] else 'Disabled'}"
)
# Load latest genome score
log_txt = "No genome log yet."
if os.path.exists("genome_log.csv"):
with open("genome_log.csv", "r", encoding="utf-8") as f:
lines = f.readlines()
if len(lines) > 1:
last = lines[-1].strip().split(",")
log_txt = f"🧬 Genome ID: {last[0]} | Accuracy: {last[-1]}"
return evo_msg, chat_history, config_str, log_txt
# πŸ” Clear everything
def clear_fn():
global chat_history
chat_history = []
return "", "", "", None, [], "", ""
# πŸ“ˆ Retrain
def retrain_model():
try:
subprocess.run(["python", "retrain_from_feedback.py"], check=True)
return "βœ… Evo retrained successfully."
except Exception as e:
return f"❌ Retraining failed: {str(e)}"
# ⬇️ Download feedback
def export_feedback():
if os.path.exists("feedback_log.csv"):
return "feedback_log.csv"
return None
# 🌐 Gradio UI
with gr.Blocks(title="EvoRAG – Real-Time Adaptive Reasoning AI") as demo:
gr.Markdown("## 🧬 EvoRAG – Real-Time Adaptive Reasoning AI")
gr.Markdown("Ask Evo a question and give two options. Evo chooses, explains, and evolves. Compare with GPT-3.5.")
with gr.Row():
with gr.Column(scale=4):
user_input = gr.Textbox(label="Your Question", lines=2)
option1 = gr.Textbox(label="Option 1")
option2 = gr.Textbox(label="Option 2")
user_vote = gr.Radio(["Evo", "GPT"], label="πŸ—³οΈ Who gave the better answer?", info="Optional – helps Evo learn.")
submit = gr.Button("🧠 Ask Evo")
clear = gr.Button("πŸ” Clear")
retrain = gr.Button("πŸ“ˆ Retrain Evo from Feedback")
export = gr.Button("⬇️ Export Feedback CSV")
with gr.Column(scale=6):
evo_reply = gr.Markdown()
chat_display = gr.HighlightedText(label="Conversation History")
model_info = gr.Markdown(label="🧠 Evo Architecture")
genome_log = gr.Markdown(label="πŸ“Š Evolution Log")
submit.click(chat_fn, inputs=[user_input, option1, option2, user_vote],
outputs=[evo_reply, chat_display, model_info, genome_log])
clear.click(clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display, model_info, genome_log])
retrain.click(retrain_model, outputs=evo_reply)
export.click(export_feedback, outputs=[])
demo.launch()