Spaces:
Sleeping
Sleeping
File size: 1,346 Bytes
e44d530 f39d1fb e44d530 f39d1fb e44d530 f39d1fb e44d530 f39d1fb e44d530 f39d1fb e44d530 f39d1fb e44d530 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import gradio as gr
from inference import get_evo_response, get_gpt_response
from logger import log_feedback
from utils import extract_text_from_file
def advisor_interface(question, file, feedback_choice):
context = extract_text_from_file(file) if file else ""
evo_answer = get_evo_response(question, context)
gpt_answer = get_gpt_response(question, context)
if feedback_choice != "No feedback":
log_feedback(question, context, evo_answer, feedback_choice)
return evo_answer, gpt_answer
with gr.Blocks() as demo:
gr.Markdown("## π§ EvoRAG β Retrieval-Augmented Adaptive AI")
with gr.Row():
question = gr.Textbox(label="π Ask anything", placeholder="e.g. Should we diversify the portfolio?")
with gr.Row():
file = gr.File(label="π Upload memo (.pdf or .txt)", file_types=[".pdf", ".txt"], type="binary")
with gr.Row():
feedback = gr.Radio(["π Helpful", "π Not Helpful", "No feedback"], label="Was Evoβs answer useful?", value="No feedback")
with gr.Row():
evo_out = gr.Textbox(label="π¬ EvoRAG Suggestion")
gpt_out = gr.Textbox(label="π€ GPT-3.5 Suggestion")
submit_btn = gr.Button("Run Advisors")
submit_btn.click(fn=advisor_interface, inputs=[question, file, feedback], outputs=[evo_out, gpt_out])
demo.launch()
|