Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,17 +1,17 @@
|
|
1 |
import gradio as gr
|
2 |
-
from inference import evo_chat_predict, get_gpt_response, get_model_config
|
3 |
from logger import log_feedback
|
4 |
-
import subprocess
|
5 |
-
import os
|
6 |
|
7 |
chat_history = []
|
8 |
|
9 |
-
# π
|
|
|
10 |
def chat_fn(user_input, option1, option2, user_vote=None):
|
11 |
global chat_history
|
12 |
|
13 |
if not user_input or not option1 or not option2:
|
14 |
-
return "β
|
15 |
|
16 |
options = [option1.strip(), option2.strip()]
|
17 |
evo_result = evo_chat_predict(chat_history, user_input, options)
|
@@ -19,8 +19,9 @@ def chat_fn(user_input, option1, option2, user_vote=None):
|
|
19 |
|
20 |
evo_msg = (
|
21 |
f"### π€ Evo\n"
|
22 |
-
f"**Answer:** {evo_result['answer']}
|
23 |
-
f"**Reasoning:** {evo_result['reasoning']}\n
|
|
|
24 |
f"---\n"
|
25 |
f"### π§ GPT-3.5\n"
|
26 |
f"{gpt_response}"
|
@@ -30,7 +31,6 @@ def chat_fn(user_input, option1, option2, user_vote=None):
|
|
30 |
chat_history.append(f"π€ Evo: {evo_result['answer']}")
|
31 |
chat_history.append(f"π§ GPT: {gpt_response}")
|
32 |
|
33 |
-
# Log feedback
|
34 |
log_feedback(
|
35 |
question=user_input,
|
36 |
option1=option1,
|
@@ -42,17 +42,13 @@ def chat_fn(user_input, option1, option2, user_vote=None):
|
|
42 |
user_preference=user_vote
|
43 |
)
|
44 |
|
45 |
-
# Show genome config
|
46 |
config = get_model_config()
|
47 |
config_str = (
|
48 |
-
f"**Layers:** {config['num_layers']}
|
49 |
-
f"**
|
50 |
-
f"**FFN Dim:** {config['ffn_dim']} \n"
|
51 |
-
f"**Memory:** {'Enabled' if config['memory_enabled'] else 'Disabled'}"
|
52 |
)
|
53 |
|
54 |
-
|
55 |
-
log_txt = "No genome log yet."
|
56 |
if os.path.exists("genome_log.csv"):
|
57 |
with open("genome_log.csv", "r", encoding="utf-8") as f:
|
58 |
lines = f.readlines()
|
@@ -60,65 +56,54 @@ def chat_fn(user_input, option1, option2, user_vote=None):
|
|
60 |
last = lines[-1].strip().split(",")
|
61 |
log_txt = f"𧬠Genome ID: {last[0]} | Accuracy: {last[-1]}"
|
62 |
|
63 |
-
|
64 |
-
sys = get_system_stats()
|
65 |
-
sys_str = (
|
66 |
-
f"**Device:** {sys['device']} ({sys['platform']}) \n"
|
67 |
-
f"**CPU Usage:** {sys['cpu_usage_percent']}% \n"
|
68 |
-
f"**RAM:** {sys['memory_used_gb']} GB / {sys['memory_total_gb']} GB \n"
|
69 |
-
f"**GPU:** {sys['gpu_name']} \n"
|
70 |
-
f"**GPU Memory:** {sys['gpu_memory_used_gb']} GB / {sys['gpu_memory_total_gb']} GB"
|
71 |
-
)
|
72 |
|
73 |
-
return evo_msg, chat_history, config_str, log_txt, sys_str
|
74 |
|
75 |
-
# π Clear everything
|
76 |
def clear_fn():
|
77 |
global chat_history
|
78 |
chat_history = []
|
79 |
-
return "", "", "", None, [], "", ""
|
|
|
80 |
|
81 |
-
# π Retrain
|
82 |
def retrain_model():
|
83 |
try:
|
84 |
subprocess.run(["python", "retrain_from_feedback.py"], check=True)
|
85 |
-
return "β
Evo retrained
|
86 |
except Exception as e:
|
87 |
-
return f"β
|
|
|
88 |
|
89 |
-
# β¬οΈ Download feedback
|
90 |
def export_feedback():
|
91 |
-
if os.path.exists("feedback_log.csv")
|
92 |
-
|
93 |
-
return None
|
94 |
|
95 |
-
#
|
96 |
-
with gr.Blocks(title="EvoRAG β Real-Time
|
97 |
-
gr.Markdown("##
|
98 |
-
gr.Markdown("
|
99 |
|
100 |
with gr.Row():
|
101 |
with gr.Column(scale=4):
|
102 |
user_input = gr.Textbox(label="Your Question", lines=2)
|
103 |
option1 = gr.Textbox(label="Option 1")
|
104 |
option2 = gr.Textbox(label="Option 2")
|
105 |
-
user_vote = gr.Radio(["Evo", "GPT"], label="π³οΈ Who
|
106 |
-
submit = gr.Button("
|
107 |
-
clear = gr.Button("
|
108 |
-
retrain = gr.Button("
|
109 |
-
export = gr.Button("
|
110 |
|
111 |
with gr.Column(scale=6):
|
112 |
evo_reply = gr.Markdown()
|
113 |
chat_display = gr.HighlightedText(label="Conversation History")
|
114 |
model_info = gr.Markdown(label="π§ Evo Architecture")
|
115 |
-
genome_log = gr.Markdown(label="π Evolution
|
116 |
-
sys_stats = gr.Markdown(label="π System Stats")
|
117 |
|
118 |
submit.click(chat_fn, inputs=[user_input, option1, option2, user_vote],
|
119 |
-
outputs=[evo_reply, chat_display, model_info, genome_log
|
120 |
-
clear.click(clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display, model_info, genome_log
|
121 |
retrain.click(retrain_model, outputs=evo_reply)
|
122 |
export.click(export_feedback, outputs=[])
|
123 |
|
124 |
-
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from inference import evo_chat_predict, get_gpt_response, get_model_config
|
3 |
from logger import log_feedback
|
4 |
+
import subprocess, os
|
|
|
5 |
|
6 |
chat_history = []
|
7 |
|
8 |
+
# π Core logic
|
9 |
+
|
10 |
def chat_fn(user_input, option1, option2, user_vote=None):
|
11 |
global chat_history
|
12 |
|
13 |
if not user_input or not option1 or not option2:
|
14 |
+
return "β Provide a question and two options.", chat_history, "", ""
|
15 |
|
16 |
options = [option1.strip(), option2.strip()]
|
17 |
evo_result = evo_chat_predict(chat_history, user_input, options)
|
|
|
19 |
|
20 |
evo_msg = (
|
21 |
f"### π€ Evo\n"
|
22 |
+
f"**Answer:** {evo_result['answer']}\n"
|
23 |
+
f"**Reasoning:** {evo_result['reasoning']}\n"
|
24 |
+
f"**Confidence:** {evo_result['confidence']}\n"
|
25 |
f"---\n"
|
26 |
f"### π§ GPT-3.5\n"
|
27 |
f"{gpt_response}"
|
|
|
31 |
chat_history.append(f"π€ Evo: {evo_result['answer']}")
|
32 |
chat_history.append(f"π§ GPT: {gpt_response}")
|
33 |
|
|
|
34 |
log_feedback(
|
35 |
question=user_input,
|
36 |
option1=option1,
|
|
|
42 |
user_preference=user_vote
|
43 |
)
|
44 |
|
|
|
45 |
config = get_model_config()
|
46 |
config_str = (
|
47 |
+
f"**Layers:** {config['num_layers']} | **Heads:** {config['num_heads']} | "
|
48 |
+
f"**FFN:** {config['ffn_dim']} | **Memory:** {'β
' if config['memory_enabled'] else 'β'}"
|
|
|
|
|
49 |
)
|
50 |
|
51 |
+
log_txt = "Genome log not found."
|
|
|
52 |
if os.path.exists("genome_log.csv"):
|
53 |
with open("genome_log.csv", "r", encoding="utf-8") as f:
|
54 |
lines = f.readlines()
|
|
|
56 |
last = lines[-1].strip().split(",")
|
57 |
log_txt = f"𧬠Genome ID: {last[0]} | Accuracy: {last[-1]}"
|
58 |
|
59 |
+
return evo_msg, chat_history, config_str, log_txt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
|
|
61 |
|
|
|
62 |
def clear_fn():
|
63 |
global chat_history
|
64 |
chat_history = []
|
65 |
+
return "", "", "", None, [], "", ""
|
66 |
+
|
67 |
|
|
|
68 |
def retrain_model():
|
69 |
try:
|
70 |
subprocess.run(["python", "retrain_from_feedback.py"], check=True)
|
71 |
+
return "β
Evo retrained."
|
72 |
except Exception as e:
|
73 |
+
return f"β Retrain error: {str(e)}"
|
74 |
+
|
75 |
|
|
|
76 |
def export_feedback():
|
77 |
+
return "feedback_log.csv" if os.path.exists("feedback_log.csv") else None
|
78 |
+
|
|
|
79 |
|
80 |
+
# π UI Launch
|
81 |
+
with gr.Blocks(title="EvoRAG β Real-Time Reasoning AI") as demo:
|
82 |
+
gr.Markdown("## π§ EvoRAG β Real-Time Reasoning AI")
|
83 |
+
gr.Markdown("Built Different. Learns Live. Evolves from You.")
|
84 |
|
85 |
with gr.Row():
|
86 |
with gr.Column(scale=4):
|
87 |
user_input = gr.Textbox(label="Your Question", lines=2)
|
88 |
option1 = gr.Textbox(label="Option 1")
|
89 |
option2 = gr.Textbox(label="Option 2")
|
90 |
+
user_vote = gr.Radio(["Evo", "GPT"], label="π³οΈ Who was better?", info="Optional β fuels evolution")
|
91 |
+
submit = gr.Button("Ask Evo")
|
92 |
+
clear = gr.Button("Clear")
|
93 |
+
retrain = gr.Button("Retrain Evo")
|
94 |
+
export = gr.Button("Export Feedback")
|
95 |
|
96 |
with gr.Column(scale=6):
|
97 |
evo_reply = gr.Markdown()
|
98 |
chat_display = gr.HighlightedText(label="Conversation History")
|
99 |
model_info = gr.Markdown(label="π§ Evo Architecture")
|
100 |
+
genome_log = gr.Markdown(label="π Last Evolution")
|
|
|
101 |
|
102 |
submit.click(chat_fn, inputs=[user_input, option1, option2, user_vote],
|
103 |
+
outputs=[evo_reply, chat_display, model_info, genome_log])
|
104 |
+
clear.click(clear_fn, outputs=[user_input, option1, option2, user_vote, chat_display, model_info, genome_log])
|
105 |
retrain.click(retrain_model, outputs=evo_reply)
|
106 |
export.click(export_feedback, outputs=[])
|
107 |
|
108 |
+
if __name__ == "__main__":
|
109 |
+
demo.launch()
|