Spaces:
Sleeping
Sleeping
import gradio as gr | |
from inference import evo_chat_predict, get_gpt_response | |
from logger import log_feedback | |
import subprocess | |
# Global chat history | |
chat_history = [] | |
# π§ Handle main chat logic | |
def chat_fn(user_input, option1, option2, user_vote=None): | |
global chat_history | |
# Validate | |
if not user_input or not option1 or not option2: | |
return "β Please enter your question and both options.", chat_history | |
options = [option1.strip(), option2.strip()] | |
# Evo prediction | |
evo_result = evo_chat_predict(chat_history, user_input, options) | |
# GPT fallback (background comparison) | |
gpt_response = get_gpt_response(user_input) | |
# Format response | |
evo_msg = ( | |
f"### π€ Evo\n" | |
f"**Answer:** {evo_result['answer']} \n" | |
f"**Reasoning:** {evo_result['reasoning']}\n\n" | |
f"---\n" | |
f"### π§ GPT-3.5\n" | |
f"{gpt_response}" | |
) | |
chat_history.append(f"π€ User: {user_input}") | |
chat_history.append(f"π€ Evo: {evo_msg}") | |
chat_history.append(f"π§ GPT: {gpt_response}") | |
# Logging for Evo retraining | |
log_feedback( | |
question=user_input, | |
option1=option1, | |
option2=option2, | |
context=evo_result['context_used'], | |
evo_output=evo_result['answer'], | |
gpt_output=gpt_response, | |
evo_reasoning=evo_result['reasoning'], | |
user_preference=user_vote | |
) | |
return evo_msg, chat_history | |
# π Clear chat state | |
def clear_fn(): | |
global chat_history | |
chat_history = [] | |
return "", "", "", None, [] | |
# π Live retrain | |
def retrain_model(): | |
try: | |
result = subprocess.run( | |
["python", "retrain_from_feedback.py"], | |
check=True, | |
capture_output=True, | |
text=True | |
) | |
print(result.stdout) # Log to terminal | |
return "β Evo retrained successfully." | |
except subprocess.CalledProcessError as e: | |
print("STDOUT:", e.stdout) | |
print("STDERR:", e.stderr) | |
return f"β Retraining failed:\n{e.stderr}" | |
# π Gradio UI | |
with gr.Blocks(title="EvoRAG β Real-Time Adaptive Reasoning AI") as demo: | |
gr.Markdown("## 𧬠EvoRAG β Real-Time Adaptive Reasoning AI") | |
gr.Markdown("Ask Evo a question, give two options. Evo chooses with reasoning. Compare with GPT. Feedback fuels evolution.") | |
with gr.Row(): | |
with gr.Column(scale=4): | |
user_input = gr.Textbox(label="Your Question", lines=2) | |
option1 = gr.Textbox(label="Option 1") | |
option2 = gr.Textbox(label="Option 2") | |
user_vote = gr.Radio(["Evo", "GPT"], label="π³οΈ Who gave the better answer?", info="Optional β improves Evo.") | |
submit = gr.Button("π§ Ask Evo") | |
clear = gr.Button("π Clear") | |
retrain = gr.Button("π Retrain Evo from Feedback") | |
with gr.Column(scale=6): | |
evo_reply = gr.Markdown() | |
chat_display = gr.HighlightedText(label="Conversation History") | |
submit.click(fn=chat_fn, inputs=[user_input, option1, option2, user_vote], | |
outputs=[evo_reply, chat_display]) | |
clear.click(fn=clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display]) | |
retrain.click(fn=retrain_model, outputs=evo_reply) | |
demo.launch() | |