File size: 2,918 Bytes
1622676
1dec93e
 
5340d71
 
1dec93e
1622676
da4e68d
1dec93e
 
1622676
da4e68d
1dec93e
1622676
1dec93e
3eaea0f
1622676
3eaea0f
1dec93e
 
3eaea0f
1dec93e
 
3ec70fc
1dec93e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1622676
 
 
1dec93e
3eaea0f
1dec93e
1622676
3eaea0f
1622676
 
3eaea0f
3ec70fc
3eaea0f
1dec93e
1622676
1dec93e
 
3eaea0f
 
1622676
 
 
 
1dec93e
1622676
 
 
3eaea0f
1622676
 
 
3eaea0f
1dec93e
1622676
1dec93e
 
3eaea0f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import gradio as gr
from inference import evo_chat_predict, get_gpt_response
from logger import log_feedback
import subprocess

# Global chat history
chat_history = []

# 🧠 Handle main chat logic
def chat_fn(user_input, option1, option2, user_vote=None):
    global chat_history

    # Validate
    if not user_input or not option1 or not option2:
        return "❗ Please enter your question and both options.", chat_history

    options = [option1.strip(), option2.strip()]

    # Evo prediction
    evo_result = evo_chat_predict(chat_history, user_input, options)

    # GPT fallback (background comparison)
    gpt_response = get_gpt_response(user_input)

    # Format response
    evo_msg = f"**Answer:** {evo_result['answer']}  \n**Reasoning:** {evo_result['reasoning']}"
    chat_history.append(f"πŸ‘€ User: {user_input}")
    chat_history.append(f"πŸ€– Evo: {evo_msg}")
    chat_history.append(f"🧠 GPT: {gpt_response}")

    # Logging for Evo retraining
    log_feedback(
        question=user_input,
        option1=option1,
        option2=option2,
        context=evo_result['context'],
        evo_output=evo_result['answer'],
        gpt_output=gpt_response,
        evo_reasoning=evo_result['reasoning'],
        user_preference=user_vote
    )

    return evo_msg, chat_history

# πŸ” Clear chat state
def clear_fn():
    global chat_history
    chat_history = []
    return "", "", "", None, []

# πŸ“ˆ Live retrain
def retrain_model():
    try:
        subprocess.run(["python", "retrain_from_feedback.py"], check=True)
        return "βœ… Evo retrained successfully."
    except Exception as e:
        return f"❌ Retraining failed: {str(e)}"

# 🌐 Gradio UI
with gr.Blocks(title="EvoRAG – Real-Time Adaptive Reasoning AI") as demo:
    gr.Markdown("## 🧬 EvoRAG – Real-Time Adaptive Reasoning AI")
    gr.Markdown("Ask Evo a question, give two options. Evo chooses with reasoning. Compare with GPT. Feedback fuels evolution.")

    with gr.Row():
        with gr.Column(scale=4):
            user_input = gr.Textbox(label="Your Question", lines=2)
            option1 = gr.Textbox(label="Option 1")
            option2 = gr.Textbox(label="Option 2")
            user_vote = gr.Radio(["Evo", "GPT"], label="πŸ—³οΈ Who gave the better answer?", info="Optional – improves Evo.")
            submit = gr.Button("🧠 Ask Evo")
            clear = gr.Button("πŸ” Clear")
            retrain = gr.Button("πŸ“ˆ Retrain Evo from Feedback")

        with gr.Column(scale=6):
            evo_reply = gr.Markdown()
            chat_display = gr.HighlightedText(label="Conversation History")

    submit.click(fn=chat_fn, inputs=[user_input, option1, option2, user_vote],
                 outputs=[evo_reply, chat_display])
    clear.click(fn=clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display])
    retrain.click(fn=retrain_model, outputs=evo_reply)

demo.launch()