HemanM commited on
Commit
f18c30e
Β·
verified Β·
1 Parent(s): 9e69980

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -16
app.py CHANGED
@@ -1,36 +1,53 @@
1
  import gradio as gr
2
- from inference import get_evo_response, get_gpt_response, get_context_from_file
3
  from logger import log_feedback
 
 
 
4
 
5
- def advisor_interface(query, file, feedback_choice):
6
- context = None
7
- if file is not None:
8
- context = get_context_from_file(file)
9
-
10
- evo_output = get_evo_response(query, file)
11
- gpt_output = get_gpt_response(query, file)
12
 
13
  if feedback_choice != "No feedback":
14
- log_feedback(query, context, evo_output, feedback_choice)
 
 
 
 
 
 
 
15
 
16
- return evo_output, gpt_output
 
 
 
 
17
 
18
  with gr.Blocks() as demo:
19
- gr.Markdown("## 🧠 EvoRAG – Retrieval-Augmented Adaptive AI for Finance")
20
 
21
  with gr.Row():
22
  query = gr.Textbox(label="πŸ“ Ask a financial question", placeholder="e.g. Should we reduce exposure to Fund A?")
23
-
24
- file = gr.File(label="πŸ“‚ Upload policy or memo (.pdf or .txt)", type="file")
25
 
26
  with gr.Row():
27
  feedback = gr.Radio(["πŸ‘ Helpful", "πŸ‘Ž Not Helpful", "No feedback"], label="Was Evo’s answer useful?", value="No feedback")
28
 
29
  with gr.Row():
30
- evo_out = gr.Textbox(label="πŸ”¬ EvoRAG Suggestion")
31
  gpt_out = gr.Textbox(label="πŸ€– GPT-3.5 Suggestion")
32
 
33
- submit_btn = gr.Button("Run Advisors")
34
- submit_btn.click(fn=advisor_interface, inputs=[query, file, feedback], outputs=[evo_out, gpt_out])
 
 
 
 
 
 
 
 
35
 
36
  demo.launch()
 
1
  import gradio as gr
2
+ from inference import get_evo_response, get_gpt_response
3
  from logger import log_feedback
4
+ import retrain
5
+ import pandas as pd
6
+ import os
7
 
8
+ def advisor_interface(query, context, feedback_choice):
9
+ evo_output, evo_reasoning = get_evo_response(query, context)
10
+ gpt_output = get_gpt_response(query, context)
 
 
 
 
11
 
12
  if feedback_choice != "No feedback":
13
+ label = 1 if feedback_choice == "πŸ‘ Helpful" else 0
14
+ log_feedback(query, context, evo_output, label)
15
+
16
+ return evo_reasoning, gpt_output, load_history()
17
+
18
+ def retrain_evo():
19
+ retrain.fine_tune_on_feedback()
20
+ return "βœ… Evo retrained on feedback.", load_history()
21
 
22
+ def load_history():
23
+ if os.path.exists("feedback_log.csv"):
24
+ df = pd.read_csv("feedback_log.csv")
25
+ return df.tail(10).to_markdown(index=False)
26
+ return "No history available yet."
27
 
28
  with gr.Blocks() as demo:
29
+ gr.Markdown("## 🧠 EvoRAG – Retrieval-Augmented Adaptive AI")
30
 
31
  with gr.Row():
32
  query = gr.Textbox(label="πŸ“ Ask a financial question", placeholder="e.g. Should we reduce exposure to Fund A?")
33
+ context = gr.Textbox(label="πŸ“‚ Paste memo, news, or background", placeholder="e.g. Tech Fund A underperformed 3.2%...")
 
34
 
35
  with gr.Row():
36
  feedback = gr.Radio(["πŸ‘ Helpful", "πŸ‘Ž Not Helpful", "No feedback"], label="Was Evo’s answer useful?", value="No feedback")
37
 
38
  with gr.Row():
39
+ evo_out = gr.Textbox(label="πŸ”¬ EvoRAG Suggestion (with reasoning)")
40
  gpt_out = gr.Textbox(label="πŸ€– GPT-3.5 Suggestion")
41
 
42
+ run_button = gr.Button("Run Advisors")
43
+ run_button.click(fn=advisor_interface, inputs=[query, context, feedback], outputs=[evo_out, gpt_out, gr.Textbox(label="πŸ“œ Recent History")])
44
+
45
+ gr.Markdown("---")
46
+ gr.Markdown("### πŸ” Retrain Evo from Feedback")
47
+ retrain_button = gr.Button("πŸ“š Retrain Evo")
48
+ retrain_output = gr.Textbox(label="πŸ› οΈ Retrain Status")
49
+ history_output = gr.Textbox(label="πŸ“œ Recent History")
50
+
51
+ retrain_button.click(fn=retrain_evo, inputs=[], outputs=[retrain_output, history_output])
52
 
53
  demo.launch()