Spaces:
Sleeping
Sleeping
'''import gradio as gr | |
from inference import get_evo_response, get_gpt_response | |
from logger import log_feedback | |
# Default demo | |
default_question = "What should I do if my house is on fire?" | |
default_option1 = "I hide under the bed" | |
default_option2 = "I run for dear life" | |
default_context = "" | |
def run_comparison(question, option1, option2, context): | |
options = [option1.strip(), option2.strip()] | |
evo_answer, evo_reasoning, evo_conf, evo_context = get_evo_response(question, options, context) | |
gpt_answer = get_gpt_response(question, context) | |
evo_output = f"""**β Evo's Suggestion:** {evo_answer}\n\n**Why?** {evo_reasoning}\n\n**Context Used:** {evo_context[:500]}...""" | |
gpt_output = f"""**π€ GPT-3.5's Suggestion:**\n\n{gpt_answer}""" | |
return evo_output, gpt_output | |
def handle_feedback(evo_was_correct, question, option1, option2, context, evo_output): | |
if evo_was_correct is not None: | |
log_feedback(question, option1, option2, context, evo_output, evo_was_correct) | |
return "β Feedback recorded. Evo will retrain based on this soon!" | |
return "β οΈ Please select feedback before submitting." | |
with gr.Blocks(theme=gr.themes.Base(), title="EvoRAG - Smarter Than GPT?") as demo: | |
gr.Markdown("## π§ EvoRAG") | |
gr.Markdown("**General-Purpose Adaptive AI that Thinks, Reads, and Evolves β Powered by Real-Time Web Search**") | |
gr.Markdown("> Compare reasoning between Evo (which learns) and GPT-3.5 (which doesn't). You decide who wins.") | |
with gr.Row(): | |
with gr.Column(): | |
question = gr.Textbox(label="β Your Question", placeholder="e.g. What should I do if my house is on fire?", value=default_question) | |
option1 = gr.Textbox(label="πΉ Option 1", placeholder="e.g. I hide under the bed", value=default_option1) | |
option2 = gr.Textbox(label="πΈ Option 2", placeholder="e.g. I run for dear life", value=default_option2) | |
context = gr.Textbox(label="π Extra Notes or Context (Optional)", placeholder="Paste news, user context, or background information", lines=4, value=default_context) | |
compare_btn = gr.Button("π Think & Compare") | |
with gr.Column(): | |
evo_out = gr.Markdown(label="𧬠EvoRAG's Response") | |
gpt_out = gr.Markdown(label="π€ GPT-3.5's Suggestion") | |
compare_btn.click(fn=run_comparison, inputs=[question, option1, option2, context], outputs=[evo_out, gpt_out]) | |
gr.Markdown("---") | |
gr.Markdown("### π§ Help Evo Get Smarter β Give Feedback") | |
feedback = gr.Radio(["π Evo was correct. Retrain from this.", "π Evo was wrong."], label="What did you think of Evo's answer?") | |
submit_feedback = gr.Button("π¬ Submit Feedback / Retrain Evo") | |
feedback_status = gr.Textbox(label="", interactive=False) | |
submit_feedback.click( | |
fn=lambda fb, q, o1, o2, ctx, eo: handle_feedback(fb == "π Evo was correct. Retrain from this.", q, o1, o2, ctx, eo), | |
inputs=[feedback, question, option1, option2, context, evo_out], | |
outputs=[feedback_status] | |
) | |
demo.launch()''' | |
import gradio as gr | |
import torch | |
import os | |
from inference import get_evo_response, get_gpt_response | |
from logger import log_feedback | |
# β¬οΈ Evo Model Stats | |
EVO_PARAMS = "~28M Parameters" | |
EVO_HARDWARE = "Running on CPU (Colab/Space)" | |
EVO_VERSION = "EvoRAG v2.2 β Adaptive Reasoning" | |
# β¬οΈ Feedback Logger Wrapper | |
FEEDBACK_LOG_PATH = "feedback_log.csv" | |
os.makedirs(os.path.dirname(FEEDBACK_LOG_PATH), exist_ok=True) if os.path.dirname(FEEDBACK_LOG_PATH) else None | |
def handle_feedback(is_correct, question, option1, option2, context, evo_output): | |
feedback = "π" if is_correct else "π" | |
log_feedback(question, context, evo_output, feedback) | |
return "β Feedback recorded. Evo will learn from this." if is_correct else "β Feedback noted." | |
def run_comparison(question, option1, option2, context): | |
options = [option1.strip(), option2.strip()] | |
evo_ans, evo_reason, evo_conf, evo_ctx = get_evo_response(question, options, context) | |
gpt_ans = get_gpt_response(question, context) | |
evo_output = f"Evo's Suggestion: β {evo_ans}\n\nWhy? {evo_reason}\n\nContext Used: {evo_ctx[:400]}..." | |
gpt_output = f"GPT-3.5's Suggestion: {gpt_ans}" | |
return evo_output, gpt_output, evo_ans | |
# β¬οΈ Interface | |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as demo: | |
with gr.Column(): | |
gr.Markdown(f""" | |
<div style='padding: 1em; border-radius: 12px; background: linear-gradient(90deg, #f0f4ff, #eef2fa); border: 1px solid #ccc;'> | |
<h1 style='font-size: 2em; font-weight: 800;'>π§ EvoRAG β General-Purpose Adaptive AI</h1> | |
<p><b>{EVO_VERSION}</b></p> | |
<p>Trained on reasoning tasks. Live learning from feedback. Combines architecture evolution and retrieval-augmented generation.</p> | |
<ul> | |
<li><b>Parameters:</b> {EVO_PARAMS}</li> | |
<li><b>Hardware:</b> {EVO_HARDWARE}</li> | |
<li><b>Live Feedback:</b> Logs every correction to evolve smarter.</li> | |
<li><b>Compare:</b> Evo vs GPT-3.5 on the same question.</li> | |
</ul> | |
<p style='font-style: italic; font-size: 0.9em;'>Built for ethical, explainable, and adaptive intelligence.</p> | |
</div> | |
""") | |
with gr.Row(): | |
question = gr.Textbox(label="π Ask a Question", placeholder="e.g., What should you do in case of a fire?", lines=2) | |
with gr.Row(): | |
option1 = gr.Textbox(label="Option A", placeholder="e.g., Hide inside") | |
option2 = gr.Textbox(label="Option B", placeholder="e.g., Run for dear life") | |
context = gr.Textbox(label="π Optional Context or Notes", placeholder="Paste any extra info here", lines=2) | |
with gr.Row(): | |
evo_out = gr.Textbox(label="𧬠EvoRAG's Reasoned Answer") | |
gpt_out = gr.Textbox(label="π€ GPT-3.5's Suggestion") | |
evo_choice = gr.State() | |
with gr.Row(): | |
run_btn = gr.Button("π Run Comparison") | |
with gr.Row(): | |
feedback = gr.Radio( | |
["π Evo was correct. Retrain from this.", "π Evo was wrong. Improve it."], | |
label="Was Evoβs answer useful?" | |
) | |
submit_feedback = gr.Button("π¬ Submit Feedback") | |
feedback_output = gr.Textbox(label="Feedback Status") | |
run_btn.click(fn=run_comparison, inputs=[question, option1, option2, context], outputs=[evo_out, gpt_out, evo_choice]) | |
submit_feedback.click( | |
fn=lambda fb, q, o1, o2, ctx, eo: handle_feedback(fb == "π Evo was correct. Retrain from this.", q, o1, o2, ctx, eo), | |
inputs=[feedback, question, option1, option2, context, evo_choice], | |
outputs=[feedback_output] | |
) | |
demo.launch(ssr=True) | |