Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,66 +1,67 @@
|
|
1 |
-
|
2 |
-
|
3 |
import gradio as gr
|
4 |
-
from inference import get_evo_response, get_gpt_response
|
5 |
import pandas as pd
|
6 |
-
import
|
7 |
-
|
8 |
-
LOG_PATH = "logs/feedback_log.csv"
|
9 |
-
os.makedirs(os.path.dirname(LOG_PATH), exist_ok=True)
|
10 |
-
if not os.path.exists(LOG_PATH):
|
11 |
-
pd.DataFrame(columns=["question", "context", "option1", "option2", "evo_answer", "feedback"]).to_csv(LOG_PATH, index=False)
|
12 |
-
|
13 |
-
def advisor_interface(question, context, options_text, feedback=None):
|
14 |
-
options = [o.strip() for o in options_text.split("\n") if o.strip()]
|
15 |
-
if len(options) != 2:
|
16 |
-
return "Please enter exactly two options (one per line).", "", ""
|
17 |
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
"option1": option1,
|
29 |
-
"option2": option2,
|
30 |
-
"evo_answer": evo_answer,
|
31 |
-
"feedback": feedback
|
32 |
-
}, ignore_index=True)
|
33 |
df.to_csv(LOG_PATH, index=False)
|
34 |
|
35 |
-
evo_msg = f"Evo suggests: **{evo_answer}** (Confidence: {confidence:.2f})"
|
36 |
-
context_used = f"Option 1 Score: {s1:.2f}\nOption 2 Score: {s2:.2f}"
|
37 |
-
return evo_msg + "\n\n" + context_used, gpt_output, load_history()
|
38 |
-
|
39 |
def load_history():
|
40 |
-
|
41 |
-
if df.empty:
|
42 |
return "No feedback yet."
|
|
|
43 |
return df.tail(10).to_markdown(index=False)
|
44 |
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
with gr.Row():
|
49 |
with gr.Column():
|
50 |
-
|
51 |
-
context = gr.Textbox(label="π Optional Context or Notes", lines=
|
52 |
-
options = gr.Textbox(label="π§ Options (
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
gpt_out = gr.Markdown()
|
58 |
-
history = gr.Markdown()
|
59 |
|
60 |
-
|
61 |
-
|
62 |
-
inputs=[question, context, options, feedback],
|
63 |
-
outputs=[evo_out, gpt_out, history]
|
64 |
-
)
|
65 |
|
66 |
demo.launch()
|
|
|
1 |
+
import os
|
|
|
2 |
import gradio as gr
|
|
|
3 |
import pandas as pd
|
4 |
+
from inference import get_evo_response, get_gpt_response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
+
LOG_PATH = "feedback_log.csv"
|
7 |
+
if os.path.dirname(LOG_PATH):
|
8 |
+
os.makedirs(os.path.dirname(LOG_PATH), exist_ok=True)
|
9 |
|
10 |
+
def log_feedback(question, context, evo_answer, feedback):
|
11 |
+
df = pd.DataFrame([[question, context, evo_answer, feedback]],
|
12 |
+
columns=["question", "context", "evo_answer", "feedback"])
|
13 |
+
if os.path.exists(LOG_PATH):
|
14 |
+
df.to_csv(LOG_PATH, mode="a", header=False, index=False)
|
15 |
+
else:
|
|
|
|
|
|
|
|
|
|
|
16 |
df.to_csv(LOG_PATH, index=False)
|
17 |
|
|
|
|
|
|
|
|
|
18 |
def load_history():
|
19 |
+
if not os.path.exists(LOG_PATH):
|
|
|
20 |
return "No feedback yet."
|
21 |
+
df = pd.read_csv(LOG_PATH)
|
22 |
return df.tail(10).to_markdown(index=False)
|
23 |
|
24 |
+
def advisor_interface(query, context, options_input):
|
25 |
+
options = [opt.strip() for opt in options_input.strip().split("\n") if opt.strip()]
|
26 |
+
if len(options) != 2:
|
27 |
+
return "Please enter exactly two options (one per line).", "", ""
|
28 |
+
|
29 |
+
evo_answer, evo_reasoning, evo_conf, evo_context = get_evo_response(query, options, context)
|
30 |
+
gpt_answer = get_gpt_response(query, context)
|
31 |
+
history = load_history()
|
32 |
+
return (
|
33 |
+
f"### Evo Suggestion\n**Answer**: {evo_answer} (Confidence: {evo_conf:.2f})\n\n**Reasoning**: {evo_reasoning}\n\n_Context used:_\n{evo_context}",
|
34 |
+
f"### GPT-3.5 Suggestion\n{gpt_answer}",
|
35 |
+
history
|
36 |
+
)
|
37 |
+
|
38 |
+
def feedback_interface(question, context, evo_answer, feedback):
|
39 |
+
log_feedback(question, context, evo_answer, feedback)
|
40 |
+
return "β
Feedback logged."
|
41 |
+
|
42 |
+
with gr.Blocks(title="EvoRAG β General-Purpose Adaptive AI with Web Reasoning") as demo:
|
43 |
+
gr.Markdown("## π§ EvoRAG β General-Purpose Adaptive AI with Web Reasoning")
|
44 |
|
45 |
with gr.Row():
|
46 |
with gr.Column():
|
47 |
+
query = gr.Textbox(label="π Ask anything", placeholder="e.g. Who is the president of the US?")
|
48 |
+
context = gr.Textbox(label="π Optional Context or Notes", lines=3, placeholder="Paste extra info or leave blank")
|
49 |
+
options = gr.Textbox(label="π§ Options (2 lines)", lines=2, placeholder="Option 1\nOption 2")
|
50 |
+
submit = gr.Button("π Run Advisors")
|
51 |
+
|
52 |
+
with gr.Column():
|
53 |
+
evo_output = gr.Markdown()
|
54 |
+
gpt_output = gr.Markdown()
|
55 |
+
|
56 |
+
with gr.Row():
|
57 |
+
feedback = gr.Radio(["π Helpful", "π Not Helpful"], label="Was Evoβs answer useful?")
|
58 |
+
submit_feedback = gr.Button("π¬ Submit Feedback")
|
59 |
+
feedback_status = gr.Textbox(interactive=False)
|
60 |
|
61 |
+
with gr.Accordion("π Recent History", open=False):
|
62 |
+
history_display = gr.Markdown()
|
|
|
|
|
63 |
|
64 |
+
submit.click(fn=advisor_interface, inputs=[query, context, options], outputs=[evo_output, gpt_output, history_display])
|
65 |
+
submit_feedback.click(fn=feedback_interface, inputs=[query, context, evo_output, feedback], outputs=[feedback_status])
|
|
|
|
|
|
|
66 |
|
67 |
demo.launch()
|