Spaces:
Sleeping
Sleeping
import gradio as gr | |
from inference import evo_rag_response, get_gpt_response | |
from retriever import build_index_from_file | |
from logger import log_feedback | |
def advisor_interface(query, file, feedback_choice): | |
# π§Ύ Build RAG index if a file is uploaded | |
if file is not None: | |
build_index_from_file(file.name) | |
# π§ Get EvoRAG + GPT responses | |
evo_output = evo_rag_response(query) | |
gpt_output = get_gpt_response(query, "") # GPT optional context | |
# π³οΈ Log feedback | |
if feedback_choice != "No feedback": | |
log_feedback(query, "[RAG context]", evo_output, feedback_choice) | |
return evo_output, gpt_output | |
with gr.Blocks() as demo: | |
gr.Markdown("## π§ EvoRAG β Retrieval-Augmented Adaptive AI for Finance") | |
with gr.Row(): | |
query = gr.Textbox(label="π Ask a financial question", placeholder="e.g. Should we reduce exposure to Fund A?") | |
file = gr.File(label="π Upload policy or memo (.pdf or .txt)", file_types=[".pdf", ".txt"]) | |
feedback = gr.Radio(["π Helpful", "π Not Helpful", "No feedback"], label="Was Evoβs answer useful?", value="No feedback") | |
with gr.Row(): | |
evo_out = gr.Textbox(label="π¬ EvoRAG Suggestion") | |
gpt_out = gr.Textbox(label="π€ GPT-3.5 Suggestion") | |
submit_btn = gr.Button("Run Advisors") | |
submit_btn.click(fn=advisor_interface, inputs=[query, file, feedback], outputs=[evo_out, gpt_out]) | |
demo.launch() | |