Spaces:
Sleeping
Sleeping
File size: 1,317 Bytes
e44d530 676eb03 f39d1fb 676eb03 f39d1fb 676eb03 f39d1fb e44d530 676eb03 e44d530 676eb03 f39d1fb e44d530 676eb03 f39d1fb 676eb03 e44d530 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import gradio as gr
from inference import get_evo_response, get_gpt_response, get_context_from_file
from logger import log_feedback
def advisor_interface(query, file, feedback_choice):
context = None
if file is not None:
context = get_context_from_file(file)
evo_output = get_evo_response(query, file)
gpt_output = get_gpt_response(query, file)
if feedback_choice != "No feedback":
log_feedback(query, context, evo_output, feedback_choice)
return evo_output, gpt_output
with gr.Blocks() as demo:
gr.Markdown("## π§ EvoRAG β Retrieval-Augmented Adaptive AI for Finance")
with gr.Row():
query = gr.Textbox(label="π Ask a financial question", placeholder="e.g. Should we reduce exposure to Fund A?")
file = gr.File(label="π Upload policy or memo (.pdf or .txt)", type="file")
with gr.Row():
feedback = gr.Radio(["π Helpful", "π Not Helpful", "No feedback"], label="Was Evoβs answer useful?", value="No feedback")
with gr.Row():
evo_out = gr.Textbox(label="π¬ EvoRAG Suggestion")
gpt_out = gr.Textbox(label="π€ GPT-3.5 Suggestion")
submit_btn = gr.Button("Run Advisors")
submit_btn.click(fn=advisor_interface, inputs=[query, file, feedback], outputs=[evo_out, gpt_out])
demo.launch()
|