Spaces:
Sleeping
Sleeping
File size: 3,268 Bytes
1622676 1dec93e 5340d71 1dec93e 1622676 da4e68d 1dec93e 1622676 da4e68d 1dec93e 1622676 1dec93e 3eaea0f 1622676 3eaea0f 1dec93e 3eaea0f 1dec93e 3ec70fc 1dec93e ac8c35d 1dec93e 818883b 1dec93e 1622676 1dec93e 3eaea0f 1dec93e 1622676 3eaea0f c67fe91 1622676 c67fe91 3eaea0f 1dec93e 1622676 1dec93e 3eaea0f 1622676 1dec93e 1622676 3eaea0f 1622676 3eaea0f 1dec93e 1622676 1dec93e 3eaea0f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import gradio as gr
from inference import evo_chat_predict, get_gpt_response
from logger import log_feedback
import subprocess
# Global chat history
chat_history = []
# π§ Handle main chat logic
def chat_fn(user_input, option1, option2, user_vote=None):
global chat_history
# Validate
if not user_input or not option1 or not option2:
return "β Please enter your question and both options.", chat_history
options = [option1.strip(), option2.strip()]
# Evo prediction
evo_result = evo_chat_predict(chat_history, user_input, options)
# GPT fallback (background comparison)
gpt_response = get_gpt_response(user_input)
# Format response
evo_msg = (
f"### π€ Evo\n"
f"**Answer:** {evo_result['answer']} \n"
f"**Reasoning:** {evo_result['reasoning']}\n\n"
f"---\n"
f"### π§ GPT-3.5\n"
f"{gpt_response}"
)
chat_history.append(f"π€ User: {user_input}")
chat_history.append(f"π€ Evo: {evo_msg}")
chat_history.append(f"π§ GPT: {gpt_response}")
# Logging for Evo retraining
log_feedback(
question=user_input,
option1=option1,
option2=option2,
context=evo_result['context_used'],
evo_output=evo_result['answer'],
gpt_output=gpt_response,
evo_reasoning=evo_result['reasoning'],
user_preference=user_vote
)
return evo_msg, chat_history
# π Clear chat state
def clear_fn():
global chat_history
chat_history = []
return "", "", "", None, []
# π Live retrain
def retrain_model():
try:
result = subprocess.run(
["python", "retrain_from_feedback.py"],
check=True,
capture_output=True,
text=True
)
print(result.stdout) # Log to terminal
return "β
Evo retrained successfully."
except subprocess.CalledProcessError as e:
print("STDOUT:", e.stdout)
print("STDERR:", e.stderr)
return f"β Retraining failed:\n{e.stderr}"
# π Gradio UI
with gr.Blocks(title="EvoRAG β Real-Time Adaptive Reasoning AI") as demo:
gr.Markdown("## 𧬠EvoRAG β Real-Time Adaptive Reasoning AI")
gr.Markdown("Ask Evo a question, give two options. Evo chooses with reasoning. Compare with GPT. Feedback fuels evolution.")
with gr.Row():
with gr.Column(scale=4):
user_input = gr.Textbox(label="Your Question", lines=2)
option1 = gr.Textbox(label="Option 1")
option2 = gr.Textbox(label="Option 2")
user_vote = gr.Radio(["Evo", "GPT"], label="π³οΈ Who gave the better answer?", info="Optional β improves Evo.")
submit = gr.Button("π§ Ask Evo")
clear = gr.Button("π Clear")
retrain = gr.Button("π Retrain Evo from Feedback")
with gr.Column(scale=6):
evo_reply = gr.Markdown()
chat_display = gr.HighlightedText(label="Conversation History")
submit.click(fn=chat_fn, inputs=[user_input, option1, option2, user_vote],
outputs=[evo_reply, chat_display])
clear.click(fn=clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display])
retrain.click(fn=retrain_model, outputs=evo_reply)
demo.launch()
|