Spaces:
Sleeping
Sleeping
File size: 4,489 Bytes
da4e68d 5340d71 32d29da 5340d71 32d29da 5340d71 32d29da 5340d71 da4e68d 5340d71 32d29da 5340d71 32d29da 5340d71 32d29da 5340d71 32d29da da4e68d 5340d71 32d29da da4e68d 5340d71 32d29da 1db4c59 32d29da 1db4c59 32d29da 5340d71 da4e68d 32d29da 5340d71 da4e68d 32d29da da4e68d 5340d71 32d29da da4e68d 5340d71 da4e68d 32d29da 5340d71 32d29da 5340d71 da4e68d 32d29da 5340d71 da4e68d 5340d71 da4e68d 5340d71 da4e68d 32d29da da4e68d 5340d71 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
import gradio as gr
import os
from inference import get_evo_response, get_gpt_response
from logger import log_feedback
import csv
import subprocess
# Helper to load Hall of Fame
def load_hall_of_fame():
entries = []
if os.path.exists("feedback_log.csv"):
with open("feedback_log.csv", newline='', encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
try:
score = float(row.get("evo_was_correct", "0") == "yes")
if "π" in row.get("feedback", "") or score > 0.85:
entries.append(row)
except:
continue
return entries[-10:][::-1] # last 10, reverse order
def handle_query(question, option1, option2, context):
options = [option1, option2]
evo_answer, evo_reasoning, evo_score, evo_context = get_evo_response(question, options, context)
gpt_answer = get_gpt_response(question, context)
return (
f"Answer: {evo_answer} (Confidence: {evo_score:.2f})\n\nReasoning: {evo_reasoning}\n\nContext used: {evo_context[:400]}...",
gpt_answer,
f"{question} | {context} | {evo_answer}"
)
def handle_feedback(feedback_text, question, option1, option2, context, evo_output):
evo_was_correct = "π" in feedback_text
log_feedback(question, option1, option2, context, evo_output, evo_was_correct)
return "β
Feedback logged and Evo will improve."
def trigger_retrain():
try:
subprocess.run(["python", "retrain_from_feedback.py"], check=True)
return "π Evo retraining completed."
except subprocess.CalledProcessError:
return "β Retraining failed. Check logs."
def render_hof():
entries = load_hall_of_fame()
if not entries:
return "No Hall of Fame entries yet. Submit feedback!"
result = "\n\n".join(
[
f"π **Q:** {e['question']}\n**A:** {e['evo_output']}\n**Feedback:** {e.get('feedback', 'N/A')}\n**Context:** {e['context'][:200]}..."
for e in entries
]
)
return result
description = """
# π§ EvoRAG β Adaptive Reasoning AI
**What is Evo?**
EvoTransformer is a lightweight, evolving neural network with ~28M parameters.
It learns from feedback, adapts over time, and reasons using both web and context data.
**Why Evo?**
β
Evolves from human input
β
Architecturally updatable
β
Transparent and fine-tunable
β
Efficient on modest hardware
**Hardware**: Trained on Google Colab CPU/GPU
**Token limit**: 128
**Benchmark**: PIQA, HellaSwag, ARC
**Version**: Evo v2.2 (Memory + Web Retrieval + Feedback Learning)
"""
with gr.Blocks(title="EvoRAG") as demo:
gr.Markdown(description)
with gr.Row():
question = gr.Textbox(label="π Ask anything", placeholder="e.g., Whatβs the best way to escape a house fire?")
with gr.Row():
option1 = gr.Textbox(label="Option A", placeholder="e.g., Run outside")
option2 = gr.Textbox(label="Option B", placeholder="e.g., Hide under bed")
context = gr.Textbox(label="π Optional Context", placeholder="Paste any extra background info here", lines=3)
submit_btn = gr.Button("π Run Comparison")
with gr.Row():
evo_output = gr.Textbox(label="π§ EvoRAG's Reasoned Answer", lines=6)
gpt_output = gr.Textbox(label="π€ GPT-3.5's Suggestion", lines=6)
feedback = gr.Radio(["π Evo was correct. Retrain from this.", "π Evo was wrong. Don't retrain."], label="Was Evoβs answer useful?", value=None)
submit_feedback = gr.Button("π¬ Submit Feedback")
feedback_status = gr.Textbox(label="Feedback Status", interactive=False)
retrain_button = gr.Button("π Retrain Evo Now")
retrain_status = gr.Textbox(label="Retraining Status", interactive=False)
with gr.Accordion("π Evo Hall of Fame (Top Reasoning Entries)", open=False):
hof_display = gr.Markdown(render_hof())
submit_btn.click(fn=handle_query, inputs=[question, option1, option2, context], outputs=[evo_output, gpt_output, feedback_status])
submit_feedback.click(
fn=lambda fb, q, o1, o2, ctx, eo: handle_feedback(fb, q, o1, o2, ctx, eo),
inputs=[feedback, question, option1, option2, context, feedback_status],
outputs=[feedback_status]
)
retrain_button.click(fn=trigger_retrain, inputs=[], outputs=[retrain_status])
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
|