EvoPlatformV3 / app.py
HemanM's picture
Update app.py
1e9e306 verified
raw
history blame
4.35 kB
# app.py
import gradio as gr
import pandas as pd
from inference import (
evo_chat_predict,
get_gpt_response,
get_model_config,
get_system_stats,
retrain_from_feedback_csv,
load_model,
)
import os
import csv
FEEDBACK_LOG = "feedback_log.csv"
# 🧠 Ask Evo
def ask_evo(question, option1, option2, history, user_vote):
options = [option1.strip(), option2.strip()]
result = evo_chat_predict(history, question.strip(), options)
# Create feedback_log.csv with headers if it doesn't exist
if not os.path.exists(FEEDBACK_LOG):
with open(FEEDBACK_LOG, "w", encoding="utf-8", newline="") as f:
writer = csv.writer(f)
writer.writerow(["question", "option1", "option2", "evo_answer", "confidence", "reasoning", "context", "vote"])
row = {
"question": question.strip(),
"option1": option1.strip(),
"option2": option2.strip(),
"evo_answer": result["answer"],
"confidence": result["confidence"],
"reasoning": result["reasoning"],
"context": result["context_used"],
"vote": user_vote or "" # βœ… Must be named 'vote' for retraining
}
# Log feedback
with open(FEEDBACK_LOG, "a", newline='', encoding="utf-8") as f:
writer = csv.DictWriter(f, fieldnames=row.keys())
writer.writerow(row)
# Prepare outputs
evo_output = f"Answer: {row['evo_answer']} (Confidence: {row['confidence']})\n\nReasoning: {row['reasoning']}\n\nContext used: {row['context']}"
gpt_output = get_gpt_response(question)
history.append(row)
stats = get_model_config()
sys_stats = get_system_stats()
stats_text = f"Layers: {stats.get('num_layers', '?')} | Heads: {stats.get('num_heads', '?')} | FFN: {stats.get('ffn_dim', '?')} | Memory: {stats.get('memory_enabled', '?')} | Accuracy: {stats.get('accuracy', '?')}"
sys_text = f"Device: {sys_stats['device']} | CPU: {sys_stats['cpu_usage_percent']}% | RAM: {sys_stats['memory_used_gb']}GB / {sys_stats['memory_total_gb']}GB | GPU: {sys_stats['gpu_name']} ({sys_stats['gpu_memory_used_gb']}GB / {sys_stats['gpu_memory_total_gb']}GB)"
return evo_output, gpt_output, stats_text, sys_text, history
# πŸ” Manual retrain button
def retrain_evo():
msg = retrain_from_feedback_csv()
load_model(force_reload=True)
return msg
# πŸ“€ Export feedback
def export_feedback():
if not os.path.exists(FEEDBACK_LOG):
return pd.DataFrame()
return pd.read_csv(FEEDBACK_LOG)
# 🧹 Clear
def clear_all():
return "", "", "", "", [], None
# πŸ–ΌοΈ UI
with gr.Blocks(title="🧠 Evo – Reasoning AI") as demo:
gr.Markdown("## Why Evo? πŸš€ Evo is not just another AI. It evolves. It learns from you. It adapts its architecture live based on feedback.\n\nNo retraining labs, no frozen weights. This is live reasoning meets evolution. Built to outperform, built to survive.")
with gr.Row():
question = gr.Textbox(label="🧠 Your Question", placeholder="e.g. Why is the sky blue?")
with gr.Row():
option1 = gr.Textbox(label="❌ Option 1")
option2 = gr.Textbox(label="❌ Option 2")
with gr.Row():
with gr.Column():
evo_ans = gr.Textbox(label="🧠 Evo", lines=6)
with gr.Column():
gpt_ans = gr.Textbox(label="πŸ€– GPT-3.5", lines=6)
with gr.Row():
stats = gr.Textbox(label="πŸ“Š Evo Stats")
system = gr.Textbox(label="πŸ”΅ Status")
evo_radio = gr.Radio(["Evo", "GPT"], label="🧠 Who was better?", info="Optional – fuels evolution")
history = gr.State([])
with gr.Row():
ask_btn = gr.Button("⚑ Ask Evo")
retrain_btn = gr.Button("πŸ” Retrain Evo")
clear_btn = gr.Button("🧹 Clear")
export_btn = gr.Button("πŸ“€ Export Feedback CSV")
export_table = gr.Dataframe(label="πŸ“œ Conversation History")
ask_btn.click(fn=ask_evo, inputs=[question, option1, option2, history, evo_radio], outputs=[evo_ans, gpt_ans, stats, system, history])
retrain_btn.click(fn=retrain_evo, inputs=[], outputs=[stats])
clear_btn.click(fn=clear_all, inputs=[], outputs=[question, option1, option2, evo_ans, gpt_ans, stats, system, history, evo_radio])
export_btn.click(fn=export_feedback, inputs=[], outputs=[export_table])
if __name__ == "__main__":
demo.launch()