File size: 1,444 Bytes
6a94f97
afd132a
 
6a94f97
 
afd132a
 
 
 
6a94f97
afd132a
 
 
 
 
6a94f97
afd132a
6a94f97
 
 
 
afd132a
6a94f97
 
afd132a
 
6a94f97
afd132a
6a94f97
 
afd132a
 
6a94f97
 
afd132a
6a94f97
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import gradio as gr
from inference import evo_rag_response, get_gpt_response
from retriever import build_index_from_file
from logger import log_feedback

def advisor_interface(query, file, feedback_choice):
    # 🧾 Build RAG index if a file is uploaded
    if file is not None:
        build_index_from_file(file.name)

    # 🧠 Get EvoRAG + GPT responses
    evo_output = evo_rag_response(query)
    gpt_output = get_gpt_response(query, "")  # GPT optional context

    # πŸ—³οΈ Log feedback
    if feedback_choice != "No feedback":
        log_feedback(query, "[RAG context]", evo_output, feedback_choice)

    return evo_output, gpt_output

with gr.Blocks() as demo:
    gr.Markdown("## 🧠 EvoRAG – Retrieval-Augmented Adaptive AI for Finance")

    with gr.Row():
        query = gr.Textbox(label="πŸ“ Ask a financial question", placeholder="e.g. Should we reduce exposure to Fund A?")
        file = gr.File(label="πŸ“‚ Upload policy or memo (.pdf or .txt)", file_types=[".pdf", ".txt"])

    feedback = gr.Radio(["πŸ‘ Helpful", "πŸ‘Ž Not Helpful", "No feedback"], label="Was Evo’s answer useful?", value="No feedback")

    with gr.Row():
        evo_out = gr.Textbox(label="πŸ”¬ EvoRAG Suggestion")
        gpt_out = gr.Textbox(label="πŸ€– GPT-3.5 Suggestion")

    submit_btn = gr.Button("Run Advisors")
    submit_btn.click(fn=advisor_interface, inputs=[query, file, feedback], outputs=[evo_out, gpt_out])

demo.launch()