Spaces:
Sleeping
Sleeping
File size: 4,073 Bytes
1622676 b0ba5ba 1dec93e 5340d71 b0ba5ba 5340d71 1622676 da4e68d b0ba5ba 1dec93e 1622676 da4e68d 1622676 b0ba5ba 3eaea0f 1622676 1dec93e 3ec70fc ac8c35d b0ba5ba ac8c35d 1dec93e b0ba5ba 1dec93e b0ba5ba 1dec93e b0ba5ba 1dec93e b0ba5ba 1dec93e b0ba5ba 1dec93e b0ba5ba 1622676 b0ba5ba 3eaea0f b0ba5ba 1622676 3eaea0f b0ba5ba 1622676 b0ba5ba c67fe91 b0ba5ba 3eaea0f 1dec93e 1622676 1dec93e b0ba5ba 3eaea0f 1622676 b0ba5ba 1622676 b0ba5ba 3eaea0f 1622676 b0ba5ba 3eaea0f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import gradio as gr
from inference import evo_chat_predict, get_gpt_response, get_model_config
from logger import log_feedback
import subprocess
import os
chat_history = []
# π Handle chat logic
def chat_fn(user_input, option1, option2, user_vote=None):
global chat_history
if not user_input or not option1 or not option2:
return "β Please provide a question and two options.", chat_history, "", ""
options = [option1.strip(), option2.strip()]
evo_result = evo_chat_predict(chat_history, user_input, options)
gpt_response = get_gpt_response(user_input)
evo_msg = (
f"### π€ Evo\n"
f"**Answer:** {evo_result['answer']} \n"
f"**Reasoning:** {evo_result['reasoning']}\n\n"
f"---\n"
f"### π§ GPT-3.5\n"
f"{gpt_response}"
)
chat_history.append(f"π€ User: {user_input}")
chat_history.append(f"π€ Evo: {evo_result['answer']}")
chat_history.append(f"π§ GPT: {gpt_response}")
# Log feedback
log_feedback(
question=user_input,
option1=option1,
option2=option2,
context=evo_result["context_used"],
evo_output=evo_result["answer"],
gpt_output=gpt_response,
evo_reasoning=evo_result["reasoning"],
user_preference=user_vote
)
# Show genome config
config = get_model_config()
config_str = (
f"**Layers:** {config['num_layers']} \n"
f"**Heads:** {config['num_heads']} \n"
f"**FFN Dim:** {config['ffn_dim']} \n"
f"**Memory:** {'Enabled' if config['memory_enabled'] else 'Disabled'}"
)
# Load latest genome score
log_txt = "No genome log yet."
if os.path.exists("genome_log.csv"):
with open("genome_log.csv", "r", encoding="utf-8") as f:
lines = f.readlines()
if len(lines) > 1:
last = lines[-1].strip().split(",")
log_txt = f"𧬠Genome ID: {last[0]} | Accuracy: {last[-1]}"
return evo_msg, chat_history, config_str, log_txt
# π Clear everything
def clear_fn():
global chat_history
chat_history = []
return "", "", "", None, [], "", ""
# π Retrain
def retrain_model():
try:
subprocess.run(["python", "retrain_from_feedback.py"], check=True)
return "β
Evo retrained successfully."
except Exception as e:
return f"β Retraining failed: {str(e)}"
# β¬οΈ Download feedback
def export_feedback():
if os.path.exists("feedback_log.csv"):
return "feedback_log.csv"
return None
# π Gradio UI
with gr.Blocks(title="EvoRAG β Real-Time Adaptive Reasoning AI") as demo:
gr.Markdown("## 𧬠EvoRAG β Real-Time Adaptive Reasoning AI")
gr.Markdown("Ask Evo a question and give two options. Evo chooses, explains, and evolves. Compare with GPT-3.5.")
with gr.Row():
with gr.Column(scale=4):
user_input = gr.Textbox(label="Your Question", lines=2)
option1 = gr.Textbox(label="Option 1")
option2 = gr.Textbox(label="Option 2")
user_vote = gr.Radio(["Evo", "GPT"], label="π³οΈ Who gave the better answer?", info="Optional β helps Evo learn.")
submit = gr.Button("π§ Ask Evo")
clear = gr.Button("π Clear")
retrain = gr.Button("π Retrain Evo from Feedback")
export = gr.Button("β¬οΈ Export Feedback CSV")
with gr.Column(scale=6):
evo_reply = gr.Markdown()
chat_display = gr.HighlightedText(label="Conversation History")
model_info = gr.Markdown(label="π§ Evo Architecture")
genome_log = gr.Markdown(label="π Evolution Log")
submit.click(chat_fn, inputs=[user_input, option1, option2, user_vote],
outputs=[evo_reply, chat_display, model_info, genome_log])
clear.click(clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display, model_info, genome_log])
retrain.click(retrain_model, outputs=evo_reply)
export.click(export_feedback, outputs=[])
demo.launch()
|