File size: 6,779 Bytes
b70942c
da4e68d
0513045
 
 
95a6056
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76788c6
0513045
 
95a6056
 
 
 
 
0513045
 
95a6056
 
0513045
95a6056
 
 
 
 
 
 
 
 
 
 
76788c6
0513045
 
da4e68d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142

'''import gradio as gr
from inference import get_evo_response, get_gpt_response
from logger import log_feedback

# Default demo
default_question = "What should I do if my house is on fire?"
default_option1 = "I hide under the bed"
default_option2 = "I run for dear life"
default_context = ""

def run_comparison(question, option1, option2, context):
    options = [option1.strip(), option2.strip()]
    evo_answer, evo_reasoning, evo_conf, evo_context = get_evo_response(question, options, context)
    gpt_answer = get_gpt_response(question, context)

    evo_output = f"""**βœ… Evo's Suggestion:** {evo_answer}\n\n**Why?** {evo_reasoning}\n\n**Context Used:** {evo_context[:500]}..."""
    gpt_output = f"""**πŸ€– GPT-3.5's Suggestion:**\n\n{gpt_answer}"""

    return evo_output, gpt_output

def handle_feedback(evo_was_correct, question, option1, option2, context, evo_output):
    if evo_was_correct is not None:
        log_feedback(question, option1, option2, context, evo_output, evo_was_correct)
        return "βœ… Feedback recorded. Evo will retrain based on this soon!"
    return "⚠️ Please select feedback before submitting."

with gr.Blocks(theme=gr.themes.Base(), title="EvoRAG - Smarter Than GPT?") as demo:
    gr.Markdown("## 🧠 EvoRAG")
    gr.Markdown("**General-Purpose Adaptive AI that Thinks, Reads, and Evolves β€” Powered by Real-Time Web Search**")
    gr.Markdown("> Compare reasoning between Evo (which learns) and GPT-3.5 (which doesn't). You decide who wins.")

    with gr.Row():
        with gr.Column():
            question = gr.Textbox(label="❓ Your Question", placeholder="e.g. What should I do if my house is on fire?", value=default_question)
            option1 = gr.Textbox(label="πŸ”Ή Option 1", placeholder="e.g. I hide under the bed", value=default_option1)
            option2 = gr.Textbox(label="πŸ”Έ Option 2", placeholder="e.g. I run for dear life", value=default_option2)
            context = gr.Textbox(label="πŸ“‚ Extra Notes or Context (Optional)", placeholder="Paste news, user context, or background information", lines=4, value=default_context)
            compare_btn = gr.Button("πŸš€ Think & Compare")

        with gr.Column():
            evo_out = gr.Markdown(label="🧬 EvoRAG's Response")
            gpt_out = gr.Markdown(label="πŸ€– GPT-3.5's Suggestion")

    compare_btn.click(fn=run_comparison, inputs=[question, option1, option2, context], outputs=[evo_out, gpt_out])

    gr.Markdown("---")
    gr.Markdown("### 🧠 Help Evo Get Smarter – Give Feedback")
    feedback = gr.Radio(["πŸ‘ Evo was correct. Retrain from this.", "πŸ‘Ž Evo was wrong."], label="What did you think of Evo's answer?")
    submit_feedback = gr.Button("πŸ“¬ Submit Feedback / Retrain Evo")
    feedback_status = gr.Textbox(label="", interactive=False)

    submit_feedback.click(
        fn=lambda fb, q, o1, o2, ctx, eo: handle_feedback(fb == "πŸ‘ Evo was correct. Retrain from this.", q, o1, o2, ctx, eo),
        inputs=[feedback, question, option1, option2, context, evo_out],
        outputs=[feedback_status]
    )

demo.launch()'''

import gradio as gr
import torch
import os
from inference import get_evo_response, get_gpt_response
from logger import log_feedback

# ⬇️ Evo Model Stats
EVO_PARAMS = "~28M Parameters"
EVO_HARDWARE = "Running on CPU (Colab/Space)"
EVO_VERSION = "EvoRAG v2.2 – Adaptive Reasoning"

# ⬇️ Feedback Logger Wrapper
FEEDBACK_LOG_PATH = "feedback_log.csv"
os.makedirs(os.path.dirname(FEEDBACK_LOG_PATH), exist_ok=True) if os.path.dirname(FEEDBACK_LOG_PATH) else None

def handle_feedback(is_correct, question, option1, option2, context, evo_output):
    feedback = "πŸ‘" if is_correct else "πŸ‘Ž"
    log_feedback(question, context, evo_output, feedback)
    return "βœ… Feedback recorded. Evo will learn from this." if is_correct else "βœ… Feedback noted."

def run_comparison(question, option1, option2, context):
    options = [option1.strip(), option2.strip()]
    evo_ans, evo_reason, evo_conf, evo_ctx = get_evo_response(question, options, context)
    gpt_ans = get_gpt_response(question, context)

    evo_output = f"Evo's Suggestion: βœ… {evo_ans}\n\nWhy? {evo_reason}\n\nContext Used: {evo_ctx[:400]}..."
    gpt_output = f"GPT-3.5's Suggestion: {gpt_ans}"
    return evo_output, gpt_output, evo_ans

# ⬇️ Interface
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as demo:
    with gr.Column():
        gr.Markdown(f"""
        <div style='padding: 1em; border-radius: 12px; background: linear-gradient(90deg, #f0f4ff, #eef2fa); border: 1px solid #ccc;'>
            <h1 style='font-size: 2em; font-weight: 800;'>🧠 EvoRAG – General-Purpose Adaptive AI</h1>
            <p><b>{EVO_VERSION}</b></p>
            <p>Trained on reasoning tasks. Live learning from feedback. Combines architecture evolution and retrieval-augmented generation.</p>
            <ul>
                <li><b>Parameters:</b> {EVO_PARAMS}</li>
                <li><b>Hardware:</b> {EVO_HARDWARE}</li>
                <li><b>Live Feedback:</b> Logs every correction to evolve smarter.</li>
                <li><b>Compare:</b> Evo vs GPT-3.5 on the same question.</li>
            </ul>
            <p style='font-style: italic; font-size: 0.9em;'>Built for ethical, explainable, and adaptive intelligence.</p>
        </div>
        """)

    with gr.Row():
        question = gr.Textbox(label="πŸ“ Ask a Question", placeholder="e.g., What should you do in case of a fire?", lines=2)
    with gr.Row():
        option1 = gr.Textbox(label="Option A", placeholder="e.g., Hide inside")
        option2 = gr.Textbox(label="Option B", placeholder="e.g., Run for dear life")
    context = gr.Textbox(label="πŸ“‚ Optional Context or Notes", placeholder="Paste any extra info here", lines=2)

    with gr.Row():
        evo_out = gr.Textbox(label="🧬 EvoRAG's Reasoned Answer")
        gpt_out = gr.Textbox(label="πŸ€– GPT-3.5's Suggestion")

    evo_choice = gr.State()

    with gr.Row():
        run_btn = gr.Button("πŸ” Run Comparison")

    with gr.Row():
        feedback = gr.Radio(
            ["πŸ‘ Evo was correct. Retrain from this.", "πŸ‘Ž Evo was wrong. Improve it."],
            label="Was Evo’s answer useful?"
        )
        submit_feedback = gr.Button("πŸ“¬ Submit Feedback")
        feedback_output = gr.Textbox(label="Feedback Status")

    run_btn.click(fn=run_comparison, inputs=[question, option1, option2, context], outputs=[evo_out, gpt_out, evo_choice])

    submit_feedback.click(
        fn=lambda fb, q, o1, o2, ctx, eo: handle_feedback(fb == "πŸ‘ Evo was correct. Retrain from this.", q, o1, o2, ctx, eo),
        inputs=[feedback, question, option1, option2, context, evo_choice],
        outputs=[feedback_output]
    )

demo.launch(ssr=True)