File size: 2,897 Bytes
b70942c
e44d530
f18c30e
b70942c
676eb03
b70942c
 
 
f18c30e
b70942c
 
 
 
 
 
2fec12b
f18c30e
 
b70942c
2fec12b
b70942c
2fec12b
f39d1fb
b70942c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f39d1fb
75dd2e7
2fec12b
b70942c
 
 
 
 
 
 
 
 
 
 
 
 
3363688
b70942c
 
f39d1fb
b70942c
 
e44d530
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import os
import gradio as gr
import pandas as pd
from inference import get_evo_response, get_gpt_response

LOG_PATH = "feedback_log.csv"
if os.path.dirname(LOG_PATH):
    os.makedirs(os.path.dirname(LOG_PATH), exist_ok=True)

def log_feedback(question, context, evo_answer, feedback):
    df = pd.DataFrame([[question, context, evo_answer, feedback]],
                      columns=["question", "context", "evo_answer", "feedback"])
    if os.path.exists(LOG_PATH):
        df.to_csv(LOG_PATH, mode="a", header=False, index=False)
    else:
        df.to_csv(LOG_PATH, index=False)

def load_history():
    if not os.path.exists(LOG_PATH):
        return "No feedback yet."
    df = pd.read_csv(LOG_PATH)
    return df.tail(10).to_markdown(index=False)

def advisor_interface(query, context, options_input):
    options = [opt.strip() for opt in options_input.strip().split("\n") if opt.strip()]
    if len(options) != 2:
        return "Please enter exactly two options (one per line).", "", ""

    evo_answer, evo_reasoning, evo_conf, evo_context = get_evo_response(query, options, context)
    gpt_answer = get_gpt_response(query, context)
    history = load_history()
    return (
        f"### Evo Suggestion\n**Answer**: {evo_answer} (Confidence: {evo_conf:.2f})\n\n**Reasoning**: {evo_reasoning}\n\n_Context used:_\n{evo_context}",
        f"### GPT-3.5 Suggestion\n{gpt_answer}",
        history
    )

def feedback_interface(question, context, evo_answer, feedback):
    log_feedback(question, context, evo_answer, feedback)
    return "βœ… Feedback logged."

with gr.Blocks(title="EvoRAG – General-Purpose Adaptive AI with Web Reasoning") as demo:
    gr.Markdown("## 🧠 EvoRAG – General-Purpose Adaptive AI with Web Reasoning")

    with gr.Row():
        with gr.Column():
            query = gr.Textbox(label="πŸ“ Ask anything", placeholder="e.g. Who is the president of the US?")
            context = gr.Textbox(label="πŸ“‚ Optional Context or Notes", lines=3, placeholder="Paste extra info or leave blank")
            options = gr.Textbox(label="🧠 Options (2 lines)", lines=2, placeholder="Option 1\nOption 2")
            submit = gr.Button("πŸ” Run Advisors")

        with gr.Column():
            evo_output = gr.Markdown()
            gpt_output = gr.Markdown()

    with gr.Row():
        feedback = gr.Radio(["πŸ‘ Helpful", "πŸ‘Ž Not Helpful"], label="Was Evo’s answer useful?")
        submit_feedback = gr.Button("πŸ“¬ Submit Feedback")
        feedback_status = gr.Textbox(interactive=False)

    with gr.Accordion("πŸ“œ Recent History", open=False):
        history_display = gr.Markdown()

    submit.click(fn=advisor_interface, inputs=[query, context, options], outputs=[evo_output, gpt_output, history_display])
    submit_feedback.click(fn=feedback_interface, inputs=[query, context, evo_output, feedback], outputs=[feedback_status])

demo.launch()