import gradio as gr from inference import evo_rag_response, get_gpt_response from retriever import build_index_from_file from logger import log_feedback def advisor_interface(query, file, feedback_choice): # ๐Ÿงพ Build RAG index if a file is uploaded if file is not None: build_index_from_file(file.name) # ๐Ÿง  Get EvoRAG + GPT responses evo_output = evo_rag_response(query) gpt_output = get_gpt_response(query, "") # GPT optional context # ๐Ÿ—ณ๏ธ Log feedback if feedback_choice != "No feedback": log_feedback(query, "[RAG context]", evo_output, feedback_choice) return evo_output, gpt_output with gr.Blocks() as demo: gr.Markdown("## ๐Ÿง  EvoRAG โ€“ Retrieval-Augmented Adaptive AI for Finance") with gr.Row(): query = gr.Textbox(label="๐Ÿ“ Ask a financial question", placeholder="e.g. Should we reduce exposure to Fund A?") file = gr.File(label="๐Ÿ“‚ Upload policy or memo (.pdf or .txt)", file_types=[".pdf", ".txt"]) feedback = gr.Radio(["๐Ÿ‘ Helpful", "๐Ÿ‘Ž Not Helpful", "No feedback"], label="Was Evoโ€™s answer useful?", value="No feedback") with gr.Row(): evo_out = gr.Textbox(label="๐Ÿ”ฌ EvoRAG Suggestion") gpt_out = gr.Textbox(label="๐Ÿค– GPT-3.5 Suggestion") submit_btn = gr.Button("Run Advisors") submit_btn.click(fn=advisor_interface, inputs=[query, file, feedback], outputs=[evo_out, gpt_out]) demo.launch()