Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,66 +1,5 @@
|
|
1 |
-
'''import gradio as gr
|
2 |
-
from inference import get_evo_response, get_gpt_response
|
3 |
-
import os
|
4 |
-
from logger import log_feedback
|
5 |
-
|
6 |
-
LOG_PATH = "feedback_log.csv"
|
7 |
-
os.makedirs(os.path.dirname(LOG_PATH), exist_ok=True) if os.path.dirname(LOG_PATH) else None
|
8 |
-
|
9 |
-
def process_inputs(query, option_1, option_2, user_context):
|
10 |
-
options = [option_1, option_2]
|
11 |
-
evo_answer, reasoning, confidence, evo_context = get_evo_response(query, options, user_context)
|
12 |
-
gpt_answer = get_gpt_response(query, user_context)
|
13 |
-
return (
|
14 |
-
evo_answer,
|
15 |
-
reasoning,
|
16 |
-
f"Context used by Evo:\n{evo_context}",
|
17 |
-
gpt_answer
|
18 |
-
)
|
19 |
-
|
20 |
-
def feedback_submit(question, context, evo_answer, feedback):
|
21 |
-
log_feedback(question, context, evo_answer, feedback)
|
22 |
-
return "β
Feedback submitted. Thank you!"
|
23 |
|
24 |
-
|
25 |
-
gr.Markdown("## π§ EvoRAG β General-Purpose Adaptive AI with Web Reasoning")
|
26 |
-
|
27 |
-
with gr.Row():
|
28 |
-
with gr.Column():
|
29 |
-
query = gr.Textbox(label="π Ask anything", placeholder="e.g. Who is the current president of the US?")
|
30 |
-
user_context = gr.Textbox(label="π Optional Context or Notes", placeholder="Paste extra info or leave blank")
|
31 |
-
option_1 = gr.Textbox(label="πΉ Option 1", placeholder="e.g. Donald Trump")
|
32 |
-
option_2 = gr.Textbox(label="πΈ Option 2", placeholder="e.g. Joe Biden")
|
33 |
-
run_btn = gr.Button("π Get Answers")
|
34 |
-
|
35 |
-
with gr.Column():
|
36 |
-
gr.Markdown("### π§ EvoRAG Suggestion")
|
37 |
-
evo_out = gr.Textbox(label="Answer (Evo)", interactive=False)
|
38 |
-
evo_reason = gr.Textbox(label="Reasoning", interactive=False)
|
39 |
-
evo_context_used = gr.Textbox(label="Context Used", lines=4, interactive=False)
|
40 |
-
|
41 |
-
gr.Markdown("### π€ GPT-3.5 Suggestion")
|
42 |
-
gpt_out = gr.Textbox(label="Answer (GPT-3.5)", interactive=False)
|
43 |
-
|
44 |
-
run_btn.click(
|
45 |
-
fn=process_inputs,
|
46 |
-
inputs=[query, option_1, option_2, user_context],
|
47 |
-
outputs=[evo_out, evo_reason, evo_context_used, gpt_out]
|
48 |
-
)
|
49 |
-
|
50 |
-
gr.Markdown("### π³οΈ Feedback")
|
51 |
-
with gr.Row():
|
52 |
-
feedback = gr.Radio(["π Helpful", "π Not Helpful"], label="Was Evoβs answer useful?")
|
53 |
-
submit_btn = gr.Button("π¬ Submit Feedback")
|
54 |
-
feedback_result = gr.Textbox(visible=False)
|
55 |
-
|
56 |
-
submit_btn.click(
|
57 |
-
fn=feedback_submit,
|
58 |
-
inputs=[query, user_context, evo_out, feedback],
|
59 |
-
outputs=[feedback_result]
|
60 |
-
)
|
61 |
-
|
62 |
-
demo.launch()'''
|
63 |
-
import gradio as gr
|
64 |
from inference import get_evo_response, get_gpt_response
|
65 |
from logger import log_feedback
|
66 |
|
@@ -117,4 +56,86 @@ with gr.Blocks(theme=gr.themes.Base(), title="EvoRAG - Smarter Than GPT?") as de
|
|
117 |
outputs=[feedback_status]
|
118 |
)
|
119 |
|
120 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
|
2 |
+
'''import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from inference import get_evo_response, get_gpt_response
|
4 |
from logger import log_feedback
|
5 |
|
|
|
56 |
outputs=[feedback_status]
|
57 |
)
|
58 |
|
59 |
+
demo.launch()'''
|
60 |
+
|
61 |
+
import gradio as gr
|
62 |
+
import torch
|
63 |
+
import os
|
64 |
+
from inference import get_evo_response, get_gpt_response
|
65 |
+
from logger import log_feedback
|
66 |
+
|
67 |
+
# β¬οΈ Evo Model Stats
|
68 |
+
EVO_PARAMS = "~28M Parameters"
|
69 |
+
EVO_HARDWARE = "Running on CPU (Colab/Space)"
|
70 |
+
EVO_VERSION = "EvoRAG v2.2 β Adaptive Reasoning"
|
71 |
+
|
72 |
+
# β¬οΈ Feedback Logger Wrapper
|
73 |
+
FEEDBACK_LOG_PATH = "feedback_log.csv"
|
74 |
+
os.makedirs(os.path.dirname(FEEDBACK_LOG_PATH), exist_ok=True) if os.path.dirname(FEEDBACK_LOG_PATH) else None
|
75 |
+
|
76 |
+
def handle_feedback(is_correct, question, option1, option2, context, evo_output):
|
77 |
+
feedback = "π" if is_correct else "π"
|
78 |
+
log_feedback(question, context, evo_output, feedback)
|
79 |
+
return "β
Feedback recorded. Evo will learn from this." if is_correct else "β
Feedback noted."
|
80 |
+
|
81 |
+
def run_comparison(question, option1, option2, context):
|
82 |
+
options = [option1.strip(), option2.strip()]
|
83 |
+
evo_ans, evo_reason, evo_conf, evo_ctx = get_evo_response(question, options, context)
|
84 |
+
gpt_ans = get_gpt_response(question, context)
|
85 |
+
|
86 |
+
evo_output = f"Evo's Suggestion: β
{evo_ans}\n\nWhy? {evo_reason}\n\nContext Used: {evo_ctx[:400]}..."
|
87 |
+
gpt_output = f"GPT-3.5's Suggestion: {gpt_ans}"
|
88 |
+
return evo_output, gpt_output, evo_ans
|
89 |
+
|
90 |
+
# β¬οΈ Interface
|
91 |
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as demo:
|
92 |
+
with gr.Column():
|
93 |
+
gr.Markdown(f"""
|
94 |
+
<div style='padding: 1em; border-radius: 12px; background: linear-gradient(90deg, #f0f4ff, #eef2fa); border: 1px solid #ccc;'>
|
95 |
+
<h1 style='font-size: 2em; font-weight: 800;'>π§ EvoRAG β General-Purpose Adaptive AI</h1>
|
96 |
+
<p><b>{EVO_VERSION}</b></p>
|
97 |
+
<p>Trained on reasoning tasks. Live learning from feedback. Combines architecture evolution and retrieval-augmented generation.</p>
|
98 |
+
<ul>
|
99 |
+
<li><b>Parameters:</b> {EVO_PARAMS}</li>
|
100 |
+
<li><b>Hardware:</b> {EVO_HARDWARE}</li>
|
101 |
+
<li><b>Live Feedback:</b> Logs every correction to evolve smarter.</li>
|
102 |
+
<li><b>Compare:</b> Evo vs GPT-3.5 on the same question.</li>
|
103 |
+
</ul>
|
104 |
+
<p style='font-style: italic; font-size: 0.9em;'>Built for ethical, explainable, and adaptive intelligence.</p>
|
105 |
+
</div>
|
106 |
+
""")
|
107 |
+
|
108 |
+
with gr.Row():
|
109 |
+
question = gr.Textbox(label="π Ask a Question", placeholder="e.g., What should you do in case of a fire?", lines=2)
|
110 |
+
with gr.Row():
|
111 |
+
option1 = gr.Textbox(label="Option A", placeholder="e.g., Hide inside")
|
112 |
+
option2 = gr.Textbox(label="Option B", placeholder="e.g., Run for dear life")
|
113 |
+
context = gr.Textbox(label="π Optional Context or Notes", placeholder="Paste any extra info here", lines=2)
|
114 |
+
|
115 |
+
with gr.Row():
|
116 |
+
evo_out = gr.Textbox(label="𧬠EvoRAG's Reasoned Answer")
|
117 |
+
gpt_out = gr.Textbox(label="π€ GPT-3.5's Suggestion")
|
118 |
+
|
119 |
+
evo_choice = gr.State()
|
120 |
+
|
121 |
+
with gr.Row():
|
122 |
+
run_btn = gr.Button("π Run Comparison")
|
123 |
+
|
124 |
+
with gr.Row():
|
125 |
+
feedback = gr.Radio(
|
126 |
+
["π Evo was correct. Retrain from this.", "π Evo was wrong. Improve it."],
|
127 |
+
label="Was Evoβs answer useful?"
|
128 |
+
)
|
129 |
+
submit_feedback = gr.Button("π¬ Submit Feedback")
|
130 |
+
feedback_output = gr.Textbox(label="Feedback Status")
|
131 |
+
|
132 |
+
run_btn.click(fn=run_comparison, inputs=[question, option1, option2, context], outputs=[evo_out, gpt_out, evo_choice])
|
133 |
+
|
134 |
+
submit_feedback.click(
|
135 |
+
fn=lambda fb, q, o1, o2, ctx, eo: handle_feedback(fb == "π Evo was correct. Retrain from this.", q, o1, o2, ctx, eo),
|
136 |
+
inputs=[feedback, question, option1, option2, context, evo_choice],
|
137 |
+
outputs=[feedback_output]
|
138 |
+
)
|
139 |
+
|
140 |
+
demo.launch(ssr=True)
|
141 |
+
|