Spaces:
Sleeping
Sleeping
import gradio as gr | |
from inference import evo_chat_predict, get_gpt_response, get_model_config | |
from logger import log_feedback | |
import subprocess | |
import os | |
chat_history = [] | |
# π Handle chat logic | |
def chat_fn(user_input, option1, option2, user_vote=None): | |
global chat_history | |
if not user_input or not option1 or not option2: | |
return "β Please provide a question and two options.", chat_history, "", "" | |
options = [option1.strip(), option2.strip()] | |
evo_result = evo_chat_predict(chat_history, user_input, options) | |
gpt_response = get_gpt_response(user_input) | |
evo_msg = ( | |
f"### π€ Evo\n" | |
f"**Answer:** {evo_result['answer']} \n" | |
f"**Reasoning:** {evo_result['reasoning']}\n\n" | |
f"---\n" | |
f"### π§ GPT-3.5\n" | |
f"{gpt_response}" | |
) | |
chat_history.append(f"π€ User: {user_input}") | |
chat_history.append(f"π€ Evo: {evo_result['answer']}") | |
chat_history.append(f"π§ GPT: {gpt_response}") | |
# Log feedback | |
log_feedback( | |
question=user_input, | |
option1=option1, | |
option2=option2, | |
context=evo_result["context_used"], | |
evo_output=evo_result["answer"], | |
gpt_output=gpt_response, | |
evo_reasoning=evo_result["reasoning"], | |
user_preference=user_vote | |
) | |
# Show genome config | |
config = get_model_config() | |
config_str = ( | |
f"**Layers:** {config['num_layers']} \n" | |
f"**Heads:** {config['num_heads']} \n" | |
f"**FFN Dim:** {config['ffn_dim']} \n" | |
f"**Memory:** {'Enabled' if config['memory_enabled'] else 'Disabled'}" | |
) | |
# Load latest genome score | |
log_txt = "No genome log yet." | |
if os.path.exists("genome_log.csv"): | |
with open("genome_log.csv", "r", encoding="utf-8") as f: | |
lines = f.readlines() | |
if len(lines) > 1: | |
last = lines[-1].strip().split(",") | |
log_txt = f"𧬠Genome ID: {last[0]} | Accuracy: {last[-1]}" | |
return evo_msg, chat_history, config_str, log_txt | |
# π Clear everything | |
def clear_fn(): | |
global chat_history | |
chat_history = [] | |
return "", "", "", None, [], "", "" | |
# π Retrain | |
def retrain_model(): | |
try: | |
subprocess.run(["python", "retrain_from_feedback.py"], check=True) | |
return "β Evo retrained successfully." | |
except Exception as e: | |
return f"β Retraining failed: {str(e)}" | |
# β¬οΈ Download feedback | |
def export_feedback(): | |
if os.path.exists("feedback_log.csv"): | |
return "feedback_log.csv" | |
return None | |
# π Gradio UI | |
with gr.Blocks(title="EvoRAG β Real-Time Adaptive Reasoning AI") as demo: | |
gr.Markdown("## 𧬠EvoRAG β Real-Time Adaptive Reasoning AI") | |
gr.Markdown("Ask Evo a question and give two options. Evo chooses, explains, and evolves. Compare with GPT-3.5.") | |
with gr.Row(): | |
with gr.Column(scale=4): | |
user_input = gr.Textbox(label="Your Question", lines=2) | |
option1 = gr.Textbox(label="Option 1") | |
option2 = gr.Textbox(label="Option 2") | |
user_vote = gr.Radio(["Evo", "GPT"], label="π³οΈ Who gave the better answer?", info="Optional β helps Evo learn.") | |
submit = gr.Button("π§ Ask Evo") | |
clear = gr.Button("π Clear") | |
retrain = gr.Button("π Retrain Evo from Feedback") | |
export = gr.Button("β¬οΈ Export Feedback CSV") | |
with gr.Column(scale=6): | |
evo_reply = gr.Markdown() | |
chat_display = gr.HighlightedText(label="Conversation History") | |
model_info = gr.Markdown(label="π§ Evo Architecture") | |
genome_log = gr.Markdown(label="π Evolution Log") | |
submit.click(chat_fn, inputs=[user_input, option1, option2, user_vote], | |
outputs=[evo_reply, chat_display, model_info, genome_log]) | |
clear.click(clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display, model_info, genome_log]) | |
retrain.click(retrain_model, outputs=evo_reply) | |
export.click(export_feedback, outputs=[]) | |
demo.launch() | |