Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,100 +1,113 @@
|
|
1 |
import gradio as gr
|
2 |
-
from inference import evo_chat_predict, get_gpt_response
|
3 |
from logger import log_feedback
|
4 |
import subprocess
|
|
|
5 |
|
6 |
-
# Global chat history
|
7 |
chat_history = []
|
8 |
|
9 |
-
#
|
10 |
def chat_fn(user_input, option1, option2, user_vote=None):
|
11 |
global chat_history
|
12 |
|
13 |
-
# Validate
|
14 |
if not user_input or not option1 or not option2:
|
15 |
-
return "β Please
|
16 |
|
17 |
options = [option1.strip(), option2.strip()]
|
18 |
-
|
19 |
-
# Evo prediction
|
20 |
evo_result = evo_chat_predict(chat_history, user_input, options)
|
21 |
-
|
22 |
-
# GPT fallback (background comparison)
|
23 |
gpt_response = get_gpt_response(user_input)
|
24 |
|
25 |
-
# Format response
|
26 |
evo_msg = (
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
)
|
34 |
|
35 |
chat_history.append(f"π€ User: {user_input}")
|
36 |
-
chat_history.append(f"π€ Evo: {
|
37 |
chat_history.append(f"π§ GPT: {gpt_response}")
|
38 |
|
39 |
-
#
|
40 |
log_feedback(
|
41 |
question=user_input,
|
42 |
option1=option1,
|
43 |
option2=option2,
|
44 |
-
context=evo_result[
|
45 |
-
evo_output=evo_result[
|
46 |
gpt_output=gpt_response,
|
47 |
-
evo_reasoning=evo_result[
|
48 |
user_preference=user_vote
|
49 |
)
|
50 |
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
def clear_fn():
|
55 |
global chat_history
|
56 |
chat_history = []
|
57 |
-
return "", "", "", None, []
|
58 |
|
59 |
-
# π
|
60 |
def retrain_model():
|
61 |
try:
|
62 |
-
|
63 |
-
["python", "retrain_from_feedback.py"],
|
64 |
-
check=True,
|
65 |
-
capture_output=True,
|
66 |
-
text=True
|
67 |
-
)
|
68 |
-
print(result.stdout) # Log to terminal
|
69 |
return "β
Evo retrained successfully."
|
70 |
-
except
|
71 |
-
|
72 |
-
print("STDERR:", e.stderr)
|
73 |
-
return f"β Retraining failed:\n{e.stderr}"
|
74 |
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
# π Gradio UI
|
77 |
with gr.Blocks(title="EvoRAG β Real-Time Adaptive Reasoning AI") as demo:
|
78 |
gr.Markdown("## 𧬠EvoRAG β Real-Time Adaptive Reasoning AI")
|
79 |
-
gr.Markdown("Ask Evo a question
|
80 |
|
81 |
with gr.Row():
|
82 |
with gr.Column(scale=4):
|
83 |
user_input = gr.Textbox(label="Your Question", lines=2)
|
84 |
option1 = gr.Textbox(label="Option 1")
|
85 |
option2 = gr.Textbox(label="Option 2")
|
86 |
-
user_vote = gr.Radio(["Evo", "GPT"], label="π³οΈ Who gave the better answer?", info="Optional β
|
87 |
submit = gr.Button("π§ Ask Evo")
|
88 |
clear = gr.Button("π Clear")
|
89 |
retrain = gr.Button("π Retrain Evo from Feedback")
|
|
|
90 |
|
91 |
with gr.Column(scale=6):
|
92 |
evo_reply = gr.Markdown()
|
93 |
chat_display = gr.HighlightedText(label="Conversation History")
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
|
|
|
|
99 |
|
100 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from inference import evo_chat_predict, get_gpt_response, get_model_config
|
3 |
from logger import log_feedback
|
4 |
import subprocess
|
5 |
+
import os
|
6 |
|
|
|
7 |
chat_history = []
|
8 |
|
9 |
+
# π Handle chat logic
|
10 |
def chat_fn(user_input, option1, option2, user_vote=None):
|
11 |
global chat_history
|
12 |
|
|
|
13 |
if not user_input or not option1 or not option2:
|
14 |
+
return "β Please provide a question and two options.", chat_history, "", ""
|
15 |
|
16 |
options = [option1.strip(), option2.strip()]
|
|
|
|
|
17 |
evo_result = evo_chat_predict(chat_history, user_input, options)
|
|
|
|
|
18 |
gpt_response = get_gpt_response(user_input)
|
19 |
|
|
|
20 |
evo_msg = (
|
21 |
+
f"### π€ Evo\n"
|
22 |
+
f"**Answer:** {evo_result['answer']} \n"
|
23 |
+
f"**Reasoning:** {evo_result['reasoning']}\n\n"
|
24 |
+
f"---\n"
|
25 |
+
f"### π§ GPT-3.5\n"
|
26 |
+
f"{gpt_response}"
|
27 |
+
)
|
28 |
|
29 |
chat_history.append(f"π€ User: {user_input}")
|
30 |
+
chat_history.append(f"π€ Evo: {evo_result['answer']}")
|
31 |
chat_history.append(f"π§ GPT: {gpt_response}")
|
32 |
|
33 |
+
# Log feedback
|
34 |
log_feedback(
|
35 |
question=user_input,
|
36 |
option1=option1,
|
37 |
option2=option2,
|
38 |
+
context=evo_result["context_used"],
|
39 |
+
evo_output=evo_result["answer"],
|
40 |
gpt_output=gpt_response,
|
41 |
+
evo_reasoning=evo_result["reasoning"],
|
42 |
user_preference=user_vote
|
43 |
)
|
44 |
|
45 |
+
# Show genome config
|
46 |
+
config = get_model_config()
|
47 |
+
config_str = (
|
48 |
+
f"**Layers:** {config['num_layers']} \n"
|
49 |
+
f"**Heads:** {config['num_heads']} \n"
|
50 |
+
f"**FFN Dim:** {config['ffn_dim']} \n"
|
51 |
+
f"**Memory:** {'Enabled' if config['memory_enabled'] else 'Disabled'}"
|
52 |
+
)
|
53 |
|
54 |
+
# Load latest genome score
|
55 |
+
log_txt = "No genome log yet."
|
56 |
+
if os.path.exists("genome_log.csv"):
|
57 |
+
with open("genome_log.csv", "r", encoding="utf-8") as f:
|
58 |
+
lines = f.readlines()
|
59 |
+
if len(lines) > 1:
|
60 |
+
last = lines[-1].strip().split(",")
|
61 |
+
log_txt = f"𧬠Genome ID: {last[0]} | Accuracy: {last[-1]}"
|
62 |
+
|
63 |
+
return evo_msg, chat_history, config_str, log_txt
|
64 |
+
|
65 |
+
# π Clear everything
|
66 |
def clear_fn():
|
67 |
global chat_history
|
68 |
chat_history = []
|
69 |
+
return "", "", "", None, [], "", ""
|
70 |
|
71 |
+
# π Retrain
|
72 |
def retrain_model():
|
73 |
try:
|
74 |
+
subprocess.run(["python", "retrain_from_feedback.py"], check=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
return "β
Evo retrained successfully."
|
76 |
+
except Exception as e:
|
77 |
+
return f"β Retraining failed: {str(e)}"
|
|
|
|
|
78 |
|
79 |
+
# β¬οΈ Download feedback
|
80 |
+
def export_feedback():
|
81 |
+
if os.path.exists("feedback_log.csv"):
|
82 |
+
return "feedback_log.csv"
|
83 |
+
return None
|
84 |
|
85 |
# π Gradio UI
|
86 |
with gr.Blocks(title="EvoRAG β Real-Time Adaptive Reasoning AI") as demo:
|
87 |
gr.Markdown("## 𧬠EvoRAG β Real-Time Adaptive Reasoning AI")
|
88 |
+
gr.Markdown("Ask Evo a question and give two options. Evo chooses, explains, and evolves. Compare with GPT-3.5.")
|
89 |
|
90 |
with gr.Row():
|
91 |
with gr.Column(scale=4):
|
92 |
user_input = gr.Textbox(label="Your Question", lines=2)
|
93 |
option1 = gr.Textbox(label="Option 1")
|
94 |
option2 = gr.Textbox(label="Option 2")
|
95 |
+
user_vote = gr.Radio(["Evo", "GPT"], label="π³οΈ Who gave the better answer?", info="Optional β helps Evo learn.")
|
96 |
submit = gr.Button("π§ Ask Evo")
|
97 |
clear = gr.Button("π Clear")
|
98 |
retrain = gr.Button("π Retrain Evo from Feedback")
|
99 |
+
export = gr.Button("β¬οΈ Export Feedback CSV")
|
100 |
|
101 |
with gr.Column(scale=6):
|
102 |
evo_reply = gr.Markdown()
|
103 |
chat_display = gr.HighlightedText(label="Conversation History")
|
104 |
+
model_info = gr.Markdown(label="π§ Evo Architecture")
|
105 |
+
genome_log = gr.Markdown(label="π Evolution Log")
|
106 |
+
|
107 |
+
submit.click(chat_fn, inputs=[user_input, option1, option2, user_vote],
|
108 |
+
outputs=[evo_reply, chat_display, model_info, genome_log])
|
109 |
+
clear.click(clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display, model_info, genome_log])
|
110 |
+
retrain.click(retrain_model, outputs=evo_reply)
|
111 |
+
export.click(export_feedback, outputs=[])
|
112 |
|
113 |
demo.launch()
|