Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,15 +1,7 @@
|
|
| 1 |
import os
|
| 2 |
-
# --- CONFIGURATION TOGGLES ---
|
| 3 |
-
# Set these values to configure the application's behavior.
|
| 4 |
-
|
| 5 |
-
# Set to True to disable destructive actions (clearing all data, saving edited rules, and all uploads).
|
| 6 |
DEMO_MODE = False
|
| 7 |
-
# Select the storage backend: "HF_DATASET", "SQLITE", or "RAM".
|
| 8 |
-
# This will override the .env file setting for STORAGE_BACKEND.
|
| 9 |
MEMORY_STORAGE_TYPE = "RAM"
|
| 10 |
|
| 11 |
-
# If using HF_DATASET, specify the repository names here.
|
| 12 |
-
# These will override the .env file settings.
|
| 13 |
HF_DATASET_MEMORY_REPO = "broadfield-dev/ai-brain"
|
| 14 |
HF_DATASET_RULES_REPO = "broadfield-dev/ai-rules"
|
| 15 |
|
|
@@ -41,6 +33,7 @@ from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
|
|
| 41 |
from cryptography.hazmat.primitives import hashes
|
| 42 |
from cryptography.exceptions import InvalidTag
|
| 43 |
# --- End New Imports ---
|
|
|
|
| 44 |
|
| 45 |
load_dotenv() # Load .env file, but our settings above will take precedence if set.
|
| 46 |
|
|
@@ -90,7 +83,11 @@ PBKDF2_ITERATIONS = 480000
|
|
| 90 |
LENGTH_HEADER_SIZE = 4 # struct.pack('>I') uses 4 bytes
|
| 91 |
PREFERRED_FONTS = ["Arial", "Helvetica", "DejaVu Sans", "Verdana", "Calibri", "sans-serif"]
|
| 92 |
MAX_KEYS_TO_DISPLAY_OVERLAY = 15
|
| 93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
def _get_font(preferred_fonts, base_size):
|
| 95 |
fp = None
|
| 96 |
safe_base_size = int(base_size)
|
|
@@ -167,74 +164,81 @@ def parse_kv_string_to_dict(kv_str:str)->dict:
|
|
| 167 |
dd[k]=v
|
| 168 |
return dd
|
| 169 |
|
| 170 |
-
def generate_brain_carrier_image(w=800, h=800
|
| 171 |
"""
|
| 172 |
-
Generates a carrier image with a
|
|
|
|
| 173 |
"""
|
| 174 |
-
# ---
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
icon_color = (180, 220, 255, 200) # Light, slightly transparent color for the icon text
|
| 178 |
-
text_color = (230, 235, 245) # Bright text color for the main message
|
| 179 |
-
shadow_color = (0, 0, 0, 128) # Text shadow
|
| 180 |
-
border_color = (255, 255, 255, 50) # Subtle white border
|
| 181 |
-
|
| 182 |
-
# --- Font and Icon Selection ---
|
| 183 |
-
PREFERRED_MONO_FONTS = ["Courier New", "Consolas", "Menlo", "Monaco", "Courier", "monospace"]
|
| 184 |
-
|
| 185 |
-
# Use the single line "thinking" bubble as the main icon.
|
| 186 |
-
ascii_art_icon = ". o O ( hmm... )"
|
| 187 |
|
| 188 |
-
#
|
| 189 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
draw = ImageDraw.Draw(img)
|
| 191 |
|
| 192 |
-
# ---
|
| 193 |
-
center_x, center_y = w / 2, h / 2
|
| 194 |
-
max_radius = int((center_x**2 + center_y**2)**0.5)
|
| 195 |
-
|
| 196 |
-
for r in range(max_radius, 0, -3):
|
| 197 |
-
ratio = 1 - (r / max_radius)
|
| 198 |
-
inter_color = tuple(int(bg_outer_color[i] + (bg_center_color[i] - bg_center_color[i]) * ratio) for i in range(3))
|
| 199 |
-
box = [center_x - r, center_y - r, center_x + r, center_y + r]
|
| 200 |
-
draw.ellipse(box, fill=inter_color)
|
| 201 |
-
|
| 202 |
-
# --- 2. Draw Text-based Icon ---
|
| 203 |
-
# Make the icon much larger, based on image width.
|
| 204 |
-
icon_font_size = max(24, int(w / 15))
|
| 205 |
-
icon_font = _get_font(PREFERRED_MONO_FONTS, icon_font_size)
|
| 206 |
-
|
| 207 |
-
# Position the icon lower, closer to the bottom text.
|
| 208 |
-
icon_cx = w / 2
|
| 209 |
-
icon_cy = h * 0.58
|
| 210 |
|
| 211 |
-
#
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
fill=
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 232 |
|
| 233 |
-
# --- Finalize ---
|
| 234 |
-
final_image_rgb = Image.new("RGB", img.size, (0, 0, 0))
|
| 235 |
-
final_image_rgb.paste(img, (0, 0), img)
|
| 236 |
-
|
| 237 |
-
return final_image_rgb
|
| 238 |
|
| 239 |
def _get_text_measurement(draw_obj, text_str, font_obj):
|
| 240 |
if hasattr(draw_obj, 'textbbox'):
|
|
@@ -256,63 +260,44 @@ def _get_text_measurement(draw_obj, text_str, font_obj):
|
|
| 256 |
except: return len(text_str) * 8, 10
|
| 257 |
|
| 258 |
def draw_key_list_dropdown_overlay(image: Image.Image, keys: list[str] = None, title: str = "Data Embedded") -> Image.Image:
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
actual_title_w, actual_title_h = _get_text_measurement(draw, title, title_font)
|
| 275 |
-
disp_keys, actual_key_text_widths, total_keys_render_h, key_line_heights = [],[],0,[]
|
| 276 |
if keys:
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
if current_text_y+ellipsis_h <= key_list_y0+current_key_list_box_h-padding['key_y']:
|
| 300 |
-
ellipsis_w, _ = _get_text_measurement(draw,"...",key_font)
|
| 301 |
-
draw.text((key_list_x0+(final_overlay_box_width-ellipsis_w)/2, current_text_y), "...", font=key_font, fill=ellipsis_color)
|
| 302 |
-
break
|
| 303 |
-
original_key_text_w = actual_key_text_widths[i]; text_to_draw = key_text_item
|
| 304 |
-
if original_key_text_w > available_text_width_for_keys:
|
| 305 |
-
temp_text = key_text_item
|
| 306 |
-
while _get_text_measurement(draw, temp_text+"...", key_font)[0] > available_text_width_for_keys and len(temp_text) > 0: temp_text=temp_text[:-1]
|
| 307 |
-
text_to_draw = temp_text+"..." if len(temp_text)<len(key_text_item) else temp_text
|
| 308 |
-
final_key_text_w, _ = _get_text_measurement(draw, text_to_draw, key_font)
|
| 309 |
-
key_text_draw_x = key_list_x0+padding['key_x']+max(0,(available_text_width_for_keys-final_key_text_w)/2)
|
| 310 |
-
text_color_to_use = ellipsis_color if "..." in text_to_draw or f"... ({len(keys)-(MAX_KEYS_TO_DISPLAY_OVERLAY-1)} more)"==key_text_item else key_text_color
|
| 311 |
-
draw.text((key_text_draw_x, current_text_y), text_to_draw, font=key_font, fill=text_color_to_use)
|
| 312 |
-
current_text_y += current_key_h
|
| 313 |
-
if i < len(disp_keys)-1: current_text_y += line_spacing
|
| 314 |
-
return set_pil_image_format_to_png(img_overlayed)
|
| 315 |
-
# --- END KV to Image Functions ---
|
| 316 |
|
| 317 |
|
| 318 |
# --- Helper Functions ---
|
|
@@ -383,7 +368,7 @@ def process_user_interaction_gradio(user_input: str, provider_name: str, model_d
|
|
| 383 |
history_snippet = "\n".join([f"{msg['role']}: {msg['content'][:100]}" for msg in chat_history_for_prompt[-2:]])
|
| 384 |
guideline_snippet = initial_insights_ctx_str[:200].replace('\n', ' ')
|
| 385 |
tool_sys_prompt = "You are a precise routing agent... Output JSON only. Example: {\"action\": \"search_duckduckgo_and_report\", \"action_input\": {\"search_engine_query\": \"query\"}}"
|
| 386 |
-
tool_user_prompt = f"User Query: \"{user_input}\nRecent History:\n{history_snippet}\nGuidelines: {guideline_snippet}...\nAvailable Actions: quick_respond, answer_using_conversation_memory, search_duckduckgo_and_report, scrape_url_and_report.\nSelect one action and input. Output JSON."
|
| 387 |
tool_decision_messages = [{"role":"system", "content": tool_sys_prompt}, {"role":"user", "content": tool_user_prompt}]
|
| 388 |
tool_provider, tool_model_id = TOOL_DECISION_PROVIDER_ENV, TOOL_DECISION_MODEL_ID_ENV
|
| 389 |
tool_model_display = next((dn for dn, mid in MODELS_BY_PROVIDER.get(tool_provider.lower(), {}).get("models", {}).items() if mid == tool_model_id), None)
|
|
@@ -595,18 +580,16 @@ Combine all findings into a single, valid XML structure as specified in the syst
|
|
| 595 |
else:
|
| 596 |
logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx}: Skipped op due to unknown action '{action}' from XML.")
|
| 597 |
|
| 598 |
-
# After processing all rules, if there were significant learnings, add a special memory
|
| 599 |
if significant_learnings_summary:
|
| 600 |
learning_digest = "SYSTEM CORE LEARNING DIGEST:\n" + "\n".join(significant_learnings_summary)
|
| 601 |
-
# Create a synthetic metrics object for this system memory
|
| 602 |
system_metrics = {
|
| 603 |
"takeaway": "Core knowledge refined.",
|
| 604 |
-
"response_success_score": 1.0,
|
| 605 |
"future_confidence_score": 1.0,
|
| 606 |
"type": "SYSTEM_REFLECTION"
|
| 607 |
}
|
| 608 |
add_memory_entry(
|
| 609 |
-
user_input="SYSTEM_INTERNAL_REFLECTION_TRIGGER",
|
| 610 |
metrics=system_metrics,
|
| 611 |
bot_response=learning_digest
|
| 612 |
)
|
|
@@ -623,9 +606,8 @@ Combine all findings into a single, valid XML structure as specified in the syst
|
|
| 623 |
def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_name: str, sel_model_disp_name: str, ui_api_key: str|None, cust_sys_prompt: str):
|
| 624 |
global current_chat_session_history
|
| 625 |
cleared_input, updated_gr_hist, status_txt = "", list(gr_hist_list), "Initializing..."
|
| 626 |
-
|
| 627 |
-
|
| 628 |
-
updated_mems_json = ui_refresh_memories_display_fn() # Get current memories state
|
| 629 |
def_detect_out_md = gr.Markdown(visible=False)
|
| 630 |
def_fmt_out_txt = gr.Textbox(value="*Waiting...*", interactive=True, show_copy_button=True)
|
| 631 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
|
@@ -633,21 +615,18 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
|
|
| 633 |
if not user_msg_txt.strip():
|
| 634 |
status_txt = "Error: Empty message."
|
| 635 |
updated_gr_hist.append((user_msg_txt or "(Empty)", status_txt))
|
| 636 |
-
# Ensure all outputs are provided on early exit
|
| 637 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json)
|
| 638 |
return
|
| 639 |
|
| 640 |
updated_gr_hist.append((user_msg_txt, "<i>Thinking...</i>"))
|
| 641 |
-
# Initial yield to update chat UI with thinking message and show current knowledge base state
|
| 642 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json)
|
| 643 |
|
| 644 |
internal_hist = list(current_chat_session_history); internal_hist.append({"role": "user", "content": user_msg_txt})
|
| 645 |
-
# History truncation logic (keep MAX_HISTORY_TURNS pairs + optional system prompt)
|
| 646 |
hist_len_check = MAX_HISTORY_TURNS * 2
|
| 647 |
if internal_hist and internal_hist[0]["role"] == "system": hist_len_check +=1
|
| 648 |
if len(internal_hist) > hist_len_check:
|
| 649 |
current_chat_session_history = ([internal_hist[0]] if internal_hist[0]["role"] == "system" else []) + internal_hist[-(MAX_HISTORY_TURNS * 2):]
|
| 650 |
-
internal_hist = list(current_chat_session_history)
|
| 651 |
|
| 652 |
final_bot_resp_acc, insights_used_parsed = "", []
|
| 653 |
temp_dl_file_path = None
|
|
@@ -659,21 +638,18 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
|
|
| 659 |
if upd_type == "status":
|
| 660 |
status_txt = upd_data
|
| 661 |
if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
|
| 662 |
-
# Update the status alongside the streaming message
|
| 663 |
updated_gr_hist[-1] = (user_msg_txt, f"{curr_bot_disp_msg} <i>{status_txt}</i>" if curr_bot_disp_msg else f"<i>{status_txt}</i>")
|
| 664 |
elif upd_type == "response_chunk":
|
| 665 |
curr_bot_disp_msg += upd_data
|
| 666 |
if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
|
| 667 |
-
updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg)
|
| 668 |
elif upd_type == "final_response_and_insights":
|
| 669 |
final_bot_resp_acc, insights_used_parsed = upd_data["response"], upd_data["insights_used"]
|
| 670 |
status_txt = "Response generated. Processing learning..."
|
| 671 |
-
# Ensure the final chat message reflects the full response
|
| 672 |
if not curr_bot_disp_msg and final_bot_resp_acc : curr_bot_disp_msg = final_bot_resp_acc
|
| 673 |
if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
|
| 674 |
updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg or "(No text)")
|
| 675 |
|
| 676 |
-
# Update detailed response box and download button
|
| 677 |
def_fmt_out_txt = gr.Textbox(value=curr_bot_disp_msg, interactive=True, show_copy_button=True)
|
| 678 |
|
| 679 |
if curr_bot_disp_msg and not curr_bot_disp_msg.startswith("Error:"):
|
|
@@ -688,15 +664,11 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
|
|
| 688 |
else:
|
| 689 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
| 690 |
|
| 691 |
-
# Update insights display
|
| 692 |
insights_md_content = "### Insights Considered (Pre-Response):\n" + ("\n".join([f"- **[{i.get('type','N/A')}|{i.get('score','N/A')}]** {i.get('text','N/A')[:100]}..." for i in insights_used_parsed[:3]]) if insights_used_parsed else "*None specific.*")
|
| 693 |
def_detect_out_md = gr.Markdown(value=insights_md_content, visible=True if insights_used_parsed else False)
|
| 694 |
|
| 695 |
-
# Yield intermediate updates for the UI
|
| 696 |
-
# Pass the *current* state of rules and memories display components
|
| 697 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json)
|
| 698 |
|
| 699 |
-
# Stop processing generator after final_response_and_insights
|
| 700 |
if upd_type == "final_response_and_insights": break
|
| 701 |
|
| 702 |
except Exception as e:
|
|
@@ -710,29 +682,23 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
|
|
| 710 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
| 711 |
def_detect_out_md = gr.Markdown(value="*Error processing request.*", visible=True)
|
| 712 |
|
| 713 |
-
# Provide the current state of rules/memories on error path yield
|
| 714 |
current_rules_text_on_error = ui_refresh_rules_display_fn()
|
| 715 |
current_mems_json_on_error = ui_refresh_memories_display_fn()
|
| 716 |
|
| 717 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, current_rules_text_on_error, current_mems_json_on_error)
|
| 718 |
-
# Clean up temp file if created before error
|
| 719 |
if temp_dl_file_path and os.path.exists(temp_dl_file_path):
|
| 720 |
try: os.unlink(temp_dl_file_path)
|
| 721 |
except Exception as e_unlink: logger.error(f"Error deleting temp download file {temp_dl_file_path} after error: {e_unlink}")
|
| 722 |
-
return
|
| 723 |
|
| 724 |
-
# --- Post-Interaction Learning ---
|
| 725 |
if final_bot_resp_acc and not final_bot_resp_acc.startswith("Error:"):
|
| 726 |
-
# Add the successful turn to the internal history
|
| 727 |
current_chat_session_history.extend([{"role": "user", "content": user_msg_txt}, {"role": "assistant", "content": final_bot_resp_acc}])
|
| 728 |
-
# History truncation again after adding
|
| 729 |
hist_len_check = MAX_HISTORY_TURNS * 2
|
| 730 |
if current_chat_session_history and current_chat_session_history[0]["role"] == "system": hist_len_check +=1
|
| 731 |
if len(current_chat_session_history) > hist_len_check:
|
| 732 |
current_chat_session_history = ([current_chat_session_history[0]] if current_chat_session_history[0]["role"] == "system" else []) + current_chat_session_history[-(MAX_HISTORY_TURNS * 2):]
|
| 733 |
|
| 734 |
status_txt = "<i>[Performing post-interaction learning...]</i>"
|
| 735 |
-
# Yield status before synchronous learning
|
| 736 |
current_rules_text_before_learn = ui_refresh_rules_display_fn()
|
| 737 |
current_mems_json_before_learn = ui_refresh_memories_display_fn()
|
| 738 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, current_rules_text_before_learn, current_mems_json_before_learn)
|
|
@@ -753,21 +719,14 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
|
|
| 753 |
|
| 754 |
elif final_bot_resp_acc.startswith("Error:"):
|
| 755 |
status_txt = final_bot_resp_acc
|
| 756 |
-
# If it was an error response from the generator, it's already in updated_gr_hist[-1]
|
| 757 |
-
# The other output components (fmt_report_tb, dl_btn, detect_out_md) are already set by the generator loop or default state
|
| 758 |
else:
|
| 759 |
status_txt = "Processing finished; no valid response or error occurred during main phase."
|
| 760 |
|
| 761 |
-
|
| 762 |
-
# Final yield after learning (or error handling)
|
| 763 |
-
# This final yield updates the UI one last time with the true final status
|
| 764 |
-
# AND crucially refreshes the Rules and Memories displays in case they changed during learning.
|
| 765 |
updated_rules_text = ui_refresh_rules_display_fn()
|
| 766 |
updated_mems_json = ui_refresh_memories_display_fn()
|
| 767 |
|
| 768 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json)
|
| 769 |
|
| 770 |
-
# Clean up the temporary download file after the final yield
|
| 771 |
if temp_dl_file_path and os.path.exists(temp_dl_file_path):
|
| 772 |
try: os.unlink(temp_dl_file_path)
|
| 773 |
except Exception as e_unlink: logger.error(f"Error deleting temp download file {temp_dl_file_path}: {e_unlink}")
|
|
@@ -775,10 +734,9 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
|
|
| 775 |
|
| 776 |
# --- Startup Loading Functions ---
|
| 777 |
def load_rules_from_file(filepath: str | None):
|
| 778 |
-
"""Loads rules from a local file (.txt or .jsonl) and adds them to the system."""
|
| 779 |
if not filepath:
|
| 780 |
logger.info("LOAD_RULES_FILE environment variable not set. Skipping rules loading from file.")
|
| 781 |
-
return 0, 0, 0
|
| 782 |
|
| 783 |
if not os.path.exists(filepath):
|
| 784 |
logger.warning(f"LOAD_RULES: Specified rules file not found: {filepath}. Skipping loading.")
|
|
@@ -792,7 +750,7 @@ def load_rules_from_file(filepath: str | None):
|
|
| 792 |
content = f.read()
|
| 793 |
except Exception as e:
|
| 794 |
logger.error(f"LOAD_RULES: Error reading file {filepath}: {e}", exc_info=False)
|
| 795 |
-
return 0, 0, 1
|
| 796 |
|
| 797 |
if not content.strip():
|
| 798 |
logger.info(f"LOAD_RULES: File {filepath} is empty. Skipping loading.")
|
|
@@ -802,7 +760,6 @@ def load_rules_from_file(filepath: str | None):
|
|
| 802 |
|
| 803 |
if file_name_lower.endswith(".txt"):
|
| 804 |
potential_rules = content.split("\n\n---\n\n")
|
| 805 |
-
# Also handle simple line breaks if '---' separator is not used
|
| 806 |
if len(potential_rules) == 1 and "\n" in content:
|
| 807 |
potential_rules = [r.strip() for r in content.splitlines() if r.strip()]
|
| 808 |
elif file_name_lower.endswith(".jsonl"):
|
|
@@ -810,7 +767,6 @@ def load_rules_from_file(filepath: str | None):
|
|
| 810 |
line = line.strip()
|
| 811 |
if line:
|
| 812 |
try:
|
| 813 |
-
# Expecting each line to be a JSON string containing the rule text
|
| 814 |
rule_text_in_json_string = json.loads(line)
|
| 815 |
if isinstance(rule_text_in_json_string, str):
|
| 816 |
potential_rules.append(rule_text_in_json_string)
|
|
@@ -822,7 +778,7 @@ def load_rules_from_file(filepath: str | None):
|
|
| 822 |
error_count +=1
|
| 823 |
else:
|
| 824 |
logger.error(f"LOAD_RULES: Unsupported file type for rules: {filepath}. Must be .txt or .jsonl")
|
| 825 |
-
return 0, 0, 1
|
| 826 |
|
| 827 |
valid_potential_rules = [r.strip() for r in potential_rules if r.strip()]
|
| 828 |
total_to_process = len(valid_potential_rules)
|
|
@@ -832,7 +788,7 @@ def load_rules_from_file(filepath: str | None):
|
|
| 832 |
return 0, 0, 0
|
| 833 |
elif total_to_process == 0 and error_count > 0:
|
| 834 |
logger.warning(f"LOAD_RULES: No valid rule segments found to process. Encountered {error_count} parsing/format errors in {filepath}.")
|
| 835 |
-
return 0, 0, error_count
|
| 836 |
|
| 837 |
logger.info(f"LOAD_RULES: Attempting to add {total_to_process} potential rules from {filepath}...")
|
| 838 |
for idx, rule_text in enumerate(valid_potential_rules):
|
|
@@ -849,10 +805,9 @@ def load_rules_from_file(filepath: str | None):
|
|
| 849 |
return added_count, skipped_count, error_count
|
| 850 |
|
| 851 |
def load_memories_from_file(filepath: str | None):
|
| 852 |
-
"""Loads memories from a local file (.json or .jsonl) and adds them to the system."""
|
| 853 |
if not filepath:
|
| 854 |
logger.info("LOAD_MEMORIES_FILE environment variable not set. Skipping memories loading from file.")
|
| 855 |
-
return 0, 0, 0
|
| 856 |
|
| 857 |
if not os.path.exists(filepath):
|
| 858 |
logger.warning(f"LOAD_MEMORIES: Specified memories file not found: {filepath}. Skipping loading.")
|
|
@@ -866,7 +821,7 @@ def load_memories_from_file(filepath: str | None):
|
|
| 866 |
content = f.read()
|
| 867 |
except Exception as e:
|
| 868 |
logger.error(f"LOAD_MEMORIES: Error reading file {filepath}: {e}", exc_info=False)
|
| 869 |
-
return 0, 1, 0
|
| 870 |
|
| 871 |
if not content.strip():
|
| 872 |
logger.info(f"LOAD_MEMORIES: File {filepath} is empty. Skipping loading.")
|
|
@@ -880,7 +835,6 @@ def load_memories_from_file(filepath: str | None):
|
|
| 880 |
if isinstance(parsed_json, list):
|
| 881 |
memory_objects_to_process = parsed_json
|
| 882 |
elif isinstance(parsed_json, dict):
|
| 883 |
-
# If it's a single object, process it as a list of one
|
| 884 |
memory_objects_to_process = [parsed_json]
|
| 885 |
else:
|
| 886 |
logger.warning(f"LOAD_MEMORIES (.json): File content is not a JSON list or object in {filepath}. Type: {type(parsed_json)}")
|
|
@@ -899,7 +853,7 @@ def load_memories_from_file(filepath: str | None):
|
|
| 899 |
format_error_count += 1
|
| 900 |
else:
|
| 901 |
logger.error(f"LOAD_MEMORIES: Unsupported file type for memories: {filepath}. Must be .json or .jsonl")
|
| 902 |
-
return 0, 1, 0
|
| 903 |
|
| 904 |
total_to_process = len(memory_objects_to_process)
|
| 905 |
|
|
@@ -913,18 +867,11 @@ def load_memories_from_file(filepath: str | None):
|
|
| 913 |
|
| 914 |
logger.info(f"LOAD_MEMORIES: Attempting to add {total_to_process} memory objects from {filepath}...")
|
| 915 |
for idx, mem_data in enumerate(memory_objects_to_process):
|
| 916 |
-
# Validate minimum structure
|
| 917 |
if isinstance(mem_data, dict) and all(k in mem_data for k in ["user_input", "bot_response", "metrics"]):
|
| 918 |
-
|
| 919 |
-
# NOTE: The current add_memory_entry function *always* generates embeddings.
|
| 920 |
-
# If performance is an issue with large files, memory_logic might need
|
| 921 |
-
# an optimized bulk import function that reuses existing embeddings or
|
| 922 |
-
# generates them in batches. For now, we use the existing function.
|
| 923 |
-
success, _ = add_memory_entry(mem_data["user_input"], mem_data["metrics"], mem_data["bot_response"]) # add_memory_entry needs user_input, metrics, bot_response
|
| 924 |
if success:
|
| 925 |
added_count += 1
|
| 926 |
else:
|
| 927 |
-
# add_memory_entry currently doesn't return detailed error status
|
| 928 |
logger.warning(f"LOAD_MEMORIES: Failed to save memory object from {filepath} (segment {idx+1}). Data: {str(mem_data)[:100]}")
|
| 929 |
save_error_count += 1
|
| 930 |
else:
|
|
@@ -936,18 +883,23 @@ def load_memories_from_file(filepath: str | None):
|
|
| 936 |
|
| 937 |
|
| 938 |
# --- UI Functions for Rules and Memories (Text and Image) ---
|
| 939 |
-
def convert_kb_to_kv_string(rules: list[str], memories: list[dict]) -> str:
|
| 940 |
-
"""Serializes rules and memories into a single key-value string for image embedding."""
|
| 941 |
lines = ["# iLearn Knowledge Base Export", f"# Exported on: {datetime.utcnow().isoformat()}Z"]
|
| 942 |
-
|
| 943 |
-
|
| 944 |
-
lines.append(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 945 |
|
| 946 |
-
lines.append("\n# --- MEMORIES ---")
|
| 947 |
-
for i, mem_dict in enumerate(memories):
|
| 948 |
-
lines.append(f"memory_{i+1} = {json.dumps(mem_dict)}")
|
| 949 |
return "\n".join(lines)
|
| 950 |
|
|
|
|
| 951 |
def ui_refresh_rules_display_fn(): return "\n\n---\n\n".join(get_all_rules_cached()) or "No rules found."
|
| 952 |
def ui_refresh_memories_display_fn(): return get_all_memories_cached() or []
|
| 953 |
|
|
@@ -1093,66 +1045,8 @@ def save_edited_rules_action_fn(edited_rules_text: str, progress=gr.Progress()):
|
|
| 1093 |
progress((idx + 1) / total_unique, desc=f"Processed {idx+1}/{total_unique} rules...")
|
| 1094 |
return f"Editor Save: Added: {added}, Skipped (duplicates): {skipped}, Errors/Invalid: {errors} from {total_unique} unique rules in text."
|
| 1095 |
|
| 1096 |
-
def ui_download_kb_as_image_fn(password: str, progress=gr.Progress()):
|
| 1097 |
-
"""Generates and provides a downloadable image with embedded KB data."""
|
| 1098 |
-
progress(0, desc="Fetching knowledge base...")
|
| 1099 |
-
rules, memories = get_all_rules_cached(), get_all_memories_cached()
|
| 1100 |
-
if not rules and not memories:
|
| 1101 |
-
gr.Warning("Knowledge base is empty. Nothing to download.")
|
| 1102 |
-
return None
|
| 1103 |
-
|
| 1104 |
-
progress(0.2, desc="Serializing data...")
|
| 1105 |
-
kv_string = convert_kb_to_kv_string(rules, memories)
|
| 1106 |
-
data_bytes = kv_string.encode('utf-8')
|
| 1107 |
-
|
| 1108 |
-
if password and password.strip():
|
| 1109 |
-
progress(0.4, desc="Encrypting data...")
|
| 1110 |
-
try:
|
| 1111 |
-
data_bytes = encrypt_data(data_bytes, password.strip())
|
| 1112 |
-
gr.Info("Data encrypted successfully.")
|
| 1113 |
-
except Exception as e:
|
| 1114 |
-
logger.error(f"KB ImgDL: Encrypt failed: {e}")
|
| 1115 |
-
gr.Error(f"Encryption failed: {e}")
|
| 1116 |
-
return None
|
| 1117 |
-
|
| 1118 |
-
progress(0.6, desc="Generating carrier image...")
|
| 1119 |
-
carrier_image = generate_brain_carrier_image(w=800, h=800, msg="iLearn Knowledge Base")
|
| 1120 |
-
|
| 1121 |
-
try:
|
| 1122 |
-
progress(0.7, desc="Embedding data...")
|
| 1123 |
-
embedded_image = embed_data_in_image(carrier_image, data_bytes)
|
| 1124 |
-
except ValueError as e:
|
| 1125 |
-
logger.error(f"KB ImgDL: Embed failed: {e}")
|
| 1126 |
-
gr.Error(f"Data is too large for this image size: {e}")
|
| 1127 |
-
return None
|
| 1128 |
-
|
| 1129 |
-
progress(0.8, desc="Adding visual overlay...")
|
| 1130 |
-
keys_for_overlay = [f"Rule Count: {len(rules)}", f"Memory Count: {len(memories)}", "---"]
|
| 1131 |
-
for r in rules[:5]:
|
| 1132 |
-
match = re.search(r"\](.*)", r, re.DOTALL)
|
| 1133 |
-
rule_content = match.group(1).strip() if match else r
|
| 1134 |
-
keys_for_overlay.append(f"Rule: {rule_content[:40]}...")
|
| 1135 |
-
if len(rules) > 5: keys_for_overlay.append("...")
|
| 1136 |
-
|
| 1137 |
-
title_overlay = "Encrypted Data" if password and password.strip() else "Embedded Data"
|
| 1138 |
-
final_image = draw_key_list_dropdown_overlay(embedded_image, keys=keys_for_overlay, title=title_overlay)
|
| 1139 |
-
|
| 1140 |
-
progress(0.9, desc="Saving final image...")
|
| 1141 |
-
try:
|
| 1142 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmpfile:
|
| 1143 |
-
final_image.save(tmpfile, format="PNG")
|
| 1144 |
-
tmp_path = tmpfile.name
|
| 1145 |
-
|
| 1146 |
-
progress(1.0, desc="Download triggered!")
|
| 1147 |
-
gr.Info("Download should start automatically.")
|
| 1148 |
-
return tmp_path
|
| 1149 |
-
except Exception as e:
|
| 1150 |
-
logger.error(f"KB ImgDL: Save failed: {e}")
|
| 1151 |
-
gr.Error(f"Failed to save final image: {e}")
|
| 1152 |
-
return None
|
| 1153 |
|
| 1154 |
def ui_upload_kb_from_image_fn(uploaded_image_filepath: str, password: str, progress=gr.Progress()):
|
| 1155 |
-
"""Extracts KB data from an uploaded image and adds it to the system."""
|
| 1156 |
if DEMO_MODE:
|
| 1157 |
gr.Warning("Uploading is disabled in Demo Mode.")
|
| 1158 |
return "Upload disabled in Demo Mode."
|
|
@@ -1253,26 +1147,174 @@ def app_load_fn():
|
|
| 1253 |
gr.DownloadButton(interactive=False, value=None, visible=False))
|
| 1254 |
|
| 1255 |
|
| 1256 |
-
# ---
|
| 1257 |
-
#
|
| 1258 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1259 |
|
| 1260 |
-
|
| 1261 |
-
|
| 1262 |
-
|
| 1263 |
-
|
| 1264 |
-
(
|
| 1265 |
-
|
| 1266 |
-
|
| 1267 |
-
|
| 1268 |
-
|
| 1269 |
-
|
| 1270 |
-
|
| 1271 |
-
|
| 1272 |
-
|
| 1273 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1274 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1275 |
|
|
|
|
|
|
|
|
|
|
| 1276 |
gr.Markdown(f"# π€ iLearn: An Autonomous Learning Agent {'(DEMO MODE)' if DEMO_MODE else ''}", elem_classes=["header"])
|
| 1277 |
is_sqlite, is_hf_dataset = (MEMORY_STORAGE_BACKEND == "SQLITE"), (MEMORY_STORAGE_BACKEND == "HF_DATASET")
|
| 1278 |
with gr.Row(variant="compact"):
|
|
@@ -1282,63 +1324,86 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-text
|
|
| 1282 |
sqlite_path_display = gr.Textbox(label="SQLite Path", value=MEMORY_SQLITE_PATH, interactive=False, visible=is_sqlite, elem_classes=["status-text"])
|
| 1283 |
hf_repos_display = gr.Textbox(label="HF Repos", value=f"M: {MEMORY_HF_MEM_REPO}, R: {MEMORY_HF_RULES_REPO}", interactive=False, visible=is_hf_dataset, elem_classes=["status-text"])
|
| 1284 |
|
| 1285 |
-
with gr.
|
| 1286 |
-
with gr.
|
| 1287 |
-
gr.
|
| 1288 |
-
|
| 1289 |
-
|
| 1290 |
-
api_key_tb = gr.Textbox(label="AI Provider API Key (Override)", type="password", placeholder="Uses .env if blank")
|
| 1291 |
-
available_providers = get_available_providers(); default_provider = available_providers[0] if "groq" not in available_providers else "groq"
|
| 1292 |
-
prov_sel_dd = gr.Dropdown(label="AI Provider", choices=available_providers, value=default_provider, interactive=True)
|
| 1293 |
-
default_model_display = get_default_model_display_name_for_provider(default_provider) if default_provider else None
|
| 1294 |
-
model_sel_dd = gr.Dropdown(label="AI Model", choices=get_model_display_names_for_provider(default_provider) if default_provider else [], value=default_model_display, interactive=True)
|
| 1295 |
-
with gr.Group():
|
| 1296 |
-
gr.Markdown("### System Prompt"); sys_prompt_tb = gr.Textbox(label="System Prompt Base", lines=8, value=DEFAULT_SYSTEM_PROMPT, interactive=True)
|
| 1297 |
-
if MEMORY_STORAGE_BACKEND == "RAM": save_faiss_sidebar_btn = gr.Button("Save FAISS Indices", variant="secondary")
|
| 1298 |
-
|
| 1299 |
-
with gr.Column(scale=3):
|
| 1300 |
-
with gr.Tabs():
|
| 1301 |
-
with gr.TabItem("π¬ Chat & Research"):
|
| 1302 |
with gr.Group():
|
| 1303 |
-
gr.Markdown("### AI
|
| 1304 |
-
|
| 1305 |
-
|
| 1306 |
-
|
| 1307 |
-
|
| 1308 |
-
|
| 1309 |
-
|
| 1310 |
-
|
| 1311 |
-
|
| 1312 |
-
|
| 1313 |
-
|
| 1314 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1315 |
with gr.Column():
|
| 1316 |
-
gr.Markdown("### π
|
| 1317 |
-
gr.
|
| 1318 |
-
save_edited_rules_btn = gr.Button("πΎ Save Edited
|
| 1319 |
-
|
| 1320 |
-
dl_rules_btn = gr.DownloadButton("β¬οΈ Download Rules (.txt)", value=None); clear_rules_btn = gr.Button("ποΈ Clear All Rules", variant="stop", visible=not DEMO_MODE)
|
| 1321 |
-
upload_rules_fobj = gr.File(label="Upload Rules File (.txt/.jsonl)", file_types=[".txt", ".jsonl"], interactive=not DEMO_MODE)
|
| 1322 |
-
rules_stat_tb = gr.Textbox(label="Rules Status", interactive=False, lines=1, elem_classes=["status-text"])
|
| 1323 |
with gr.Column():
|
| 1324 |
-
gr.Markdown("### π
|
| 1325 |
-
gr.
|
| 1326 |
-
|
| 1327 |
-
|
| 1328 |
-
|
| 1329 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1330 |
with gr.Group():
|
| 1331 |
-
gr.Markdown("
|
| 1332 |
-
gr.Markdown("Save or load the entire knowledge base (rules and memories) as a single PNG image. A password can be used for AES-256 encryption.")
|
| 1333 |
with gr.Row():
|
| 1334 |
-
|
| 1335 |
-
|
| 1336 |
-
|
| 1337 |
-
|
| 1338 |
-
|
| 1339 |
-
|
| 1340 |
-
# --- MODIFICATION: This hidden component is the key to the solution ---
|
| 1341 |
-
hidden_downloader = gr.File(visible=False, label="File Downloader")
|
| 1342 |
|
| 1343 |
# --- Event Wiring ---
|
| 1344 |
def dyn_upd_model_dd(sel_prov_dyn: str):
|
|
@@ -1346,40 +1411,32 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-text
|
|
| 1346 |
return gr.Dropdown(choices=models_dyn, value=def_model_dyn, interactive=True)
|
| 1347 |
prov_sel_dd.change(fn=dyn_upd_model_dd, inputs=prov_sel_dd, outputs=model_sel_dd)
|
| 1348 |
|
|
|
|
| 1349 |
chat_ins = [user_msg_tb, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
|
| 1350 |
chat_outs = [user_msg_tb, main_chat_disp, agent_stat_tb, detect_out_md, fmt_report_tb, dl_report_btn, rules_disp_ta, mems_disp_json]
|
| 1351 |
chat_event_args = {"fn": handle_gradio_chat_submit, "inputs": chat_ins, "outputs": chat_outs}
|
| 1352 |
send_btn.click(**chat_event_args); user_msg_tb.submit(**chat_event_args)
|
| 1353 |
|
| 1354 |
-
|
| 1355 |
save_edited_rules_btn.click(fn=save_edited_rules_action_fn, inputs=[rules_disp_ta], outputs=[rules_stat_tb], show_progress="full").then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
|
| 1356 |
-
upload_rules_fobj.upload(fn=ui_upload_rules_action_fn, inputs=[upload_rules_fobj], outputs=[rules_stat_tb], show_progress="full").then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
|
| 1357 |
clear_rules_btn.click(fn=lambda: ("All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules."), outputs=rules_stat_tb, show_progress=False).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
|
| 1358 |
-
|
| 1359 |
-
dl_mems_btn.click(fn=ui_download_memories_action_fn, inputs=None, outputs=dl_mems_btn, show_progress=False)
|
| 1360 |
-
upload_mems_fobj.upload(fn=ui_upload_memories_action_fn, inputs=[upload_mems_fobj], outputs=[mems_stat_tb], show_progress="full").then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json, show_progress=False)
|
| 1361 |
clear_mems_btn.click(fn=lambda: ("All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories."), outputs=mems_stat_tb, show_progress=False).then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json, show_progress=False)
|
| 1362 |
|
| 1363 |
-
#
|
| 1364 |
-
|
| 1365 |
-
|
| 1366 |
-
|
| 1367 |
-
|
| 1368 |
-
|
|
|
|
| 1369 |
show_progress="full"
|
| 1370 |
)
|
| 1371 |
-
|
| 1372 |
-
|
| 1373 |
-
|
| 1374 |
-
|
| 1375 |
-
|
| 1376 |
-
|
| 1377 |
-
)
|
| 1378 |
-
|
| 1379 |
-
upload_kb_img_fobj.upload(
|
| 1380 |
-
fn=ui_upload_kb_from_image_fn,
|
| 1381 |
-
inputs=[upload_kb_img_fobj, kb_img_password_tb],
|
| 1382 |
-
outputs=[kb_img_status_tb],
|
| 1383 |
show_progress="full"
|
| 1384 |
).then(
|
| 1385 |
fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta
|
|
@@ -1387,16 +1444,20 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-text
|
|
| 1387 |
fn=ui_refresh_memories_display_fn, outputs=mems_disp_json
|
| 1388 |
)
|
| 1389 |
|
|
|
|
| 1390 |
if MEMORY_STORAGE_BACKEND == "RAM" and 'save_faiss_sidebar_btn' in locals():
|
| 1391 |
def save_faiss_action_with_feedback_sidebar_fn():
|
| 1392 |
try: save_faiss_indices_to_disk(); gr.Info("Attempted to save FAISS indices to disk.")
|
| 1393 |
except Exception as e: logger.error(f"Error saving FAISS indices: {e}", exc_info=True); gr.Error(f"Error saving FAISS indices: {e}")
|
| 1394 |
save_faiss_sidebar_btn.click(fn=save_faiss_action_with_feedback_sidebar_fn, inputs=None, outputs=None, show_progress=False)
|
| 1395 |
|
|
|
|
| 1396 |
app_load_outputs = [agent_stat_tb, rules_disp_ta, mems_disp_json, detect_out_md, fmt_report_tb, dl_report_btn]
|
| 1397 |
demo.load(fn=app_load_fn, inputs=None, outputs=app_load_outputs, show_progress="full")
|
| 1398 |
|
| 1399 |
|
|
|
|
|
|
|
| 1400 |
if __name__ == "__main__":
|
| 1401 |
logger.info(f"Starting Gradio AI Research Mega Agent (v9.1 - Correct 1-Click JS Download, Memory: {MEMORY_STORAGE_BACKEND})...")
|
| 1402 |
app_port = int(os.getenv("GRADIO_PORT", 7860))
|
|
|
|
| 1 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
DEMO_MODE = False
|
|
|
|
|
|
|
| 3 |
MEMORY_STORAGE_TYPE = "RAM"
|
| 4 |
|
|
|
|
|
|
|
| 5 |
HF_DATASET_MEMORY_REPO = "broadfield-dev/ai-brain"
|
| 6 |
HF_DATASET_RULES_REPO = "broadfield-dev/ai-rules"
|
| 7 |
|
|
|
|
| 33 |
from cryptography.hazmat.primitives import hashes
|
| 34 |
from cryptography.exceptions import InvalidTag
|
| 35 |
# --- End New Imports ---
|
| 36 |
+
import random
|
| 37 |
|
| 38 |
load_dotenv() # Load .env file, but our settings above will take precedence if set.
|
| 39 |
|
|
|
|
| 83 |
LENGTH_HEADER_SIZE = 4 # struct.pack('>I') uses 4 bytes
|
| 84 |
PREFERRED_FONTS = ["Arial", "Helvetica", "DejaVu Sans", "Verdana", "Calibri", "sans-serif"]
|
| 85 |
MAX_KEYS_TO_DISPLAY_OVERLAY = 15
|
| 86 |
+
def convert_pil_to_png_bytes(image: Image.Image) -> bytes:
|
| 87 |
+
"""Saves a PIL image to an in-memory buffer as PNG and returns the raw bytes."""
|
| 88 |
+
with io.BytesIO() as buffer:
|
| 89 |
+
image.save(buffer, format="PNG")
|
| 90 |
+
return buffer.getvalue()
|
| 91 |
def _get_font(preferred_fonts, base_size):
|
| 92 |
fp = None
|
| 93 |
safe_base_size = int(base_size)
|
|
|
|
| 164 |
dd[k]=v
|
| 165 |
return dd
|
| 166 |
|
| 167 |
+
def generate_brain_carrier_image(w=800, h=800) -> Image.Image:
|
| 168 |
"""
|
| 169 |
+
Generates a high-quality carrier image with a multi-layered, procedural starfield
|
| 170 |
+
using NumPy for performance and visual appeal.
|
| 171 |
"""
|
| 172 |
+
# --- 1. Create the Gradient Background with NumPy ---
|
| 173 |
+
center_x, center_y = w / 2, h / 2
|
| 174 |
+
y_coords, x_coords = np.mgrid[0:h, 0:w]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
|
| 176 |
+
# Calculate distance of each pixel from the center
|
| 177 |
+
distance = np.sqrt((x_coords - center_x)**2 + (y_coords - center_y)**2)
|
| 178 |
+
max_distance = np.sqrt(center_x**2 + center_y**2)
|
| 179 |
+
|
| 180 |
+
# Normalize distance to a 0-1 range
|
| 181 |
+
distance_norm = distance / max_distance
|
| 182 |
+
|
| 183 |
+
# Define colors and create the gradient array
|
| 184 |
+
bg_center_color = np.array([20, 25, 40]) # Deeper blue center
|
| 185 |
+
bg_outer_color = np.array([0, 0, 0]) # Black edges
|
| 186 |
+
|
| 187 |
+
# Interpolate colors across all pixels at once (vectorized)
|
| 188 |
+
# The [..., np.newaxis] part is for broadcasting the color channels
|
| 189 |
+
gradient = bg_outer_color + (bg_center_color - bg_outer_color) * (1 - distance_norm[..., np.newaxis])
|
| 190 |
+
|
| 191 |
+
# Convert the NumPy array to a PIL Image to start drawing on it
|
| 192 |
+
img = Image.fromarray(gradient.astype(np.uint8), 'RGB')
|
| 193 |
draw = ImageDraw.Draw(img)
|
| 194 |
|
| 195 |
+
# --- 2. Draw a Multi-Layered Starfield ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 196 |
|
| 197 |
+
# Layer 1: Distant, tiny stars (for depth)
|
| 198 |
+
num_distant_stars = int((w * h) / 200)
|
| 199 |
+
for _ in range(num_distant_stars):
|
| 200 |
+
x, y = random.randint(0, w - 1), random.randint(0, h - 1)
|
| 201 |
+
brightness = random.randint(30, 90)
|
| 202 |
+
draw.point((x, y), fill=(brightness, brightness, int(brightness * 1.1))) # Slightly blue tint
|
| 203 |
+
|
| 204 |
+
# Layer 2: Main stars with glow, size, and color variation
|
| 205 |
+
num_main_stars = int((w * h) / 1000)
|
| 206 |
+
star_colors = [
|
| 207 |
+
(255, 255, 255), # White
|
| 208 |
+
(220, 230, 255), # Light Blue
|
| 209 |
+
(255, 240, 220), # Faint Yellow
|
| 210 |
+
]
|
| 211 |
+
|
| 212 |
+
for _ in range(num_main_stars):
|
| 213 |
+
x, y = random.randint(0, w - 1), random.randint(0, h - 1)
|
| 214 |
+
dist_from_center = np.sqrt((x - center_x)**2 + (y - center_y)**2)
|
| 215 |
+
dist_ratio = min(dist_from_center / max_distance, 1.0)
|
| 216 |
+
|
| 217 |
+
# Base size and brightness increase with distance from center
|
| 218 |
+
size = 0.5 + (2.5 * (dist_ratio ** 2))
|
| 219 |
+
brightness = 120 + (135 * (dist_ratio ** 1.5))
|
| 220 |
+
|
| 221 |
+
# Select a random base color
|
| 222 |
+
color = random.choice(star_colors)
|
| 223 |
+
|
| 224 |
+
# Apply brightness to the selected color
|
| 225 |
+
final_color = tuple(int(c * (brightness / 255.0)) for c in color)
|
| 226 |
+
|
| 227 |
+
# Simulate a soft glow by drawing a larger, dimmer circle first
|
| 228 |
+
glow_size = size * 3
|
| 229 |
+
glow_color = tuple(int(c * 0.3) for c in final_color) # Much dimmer
|
| 230 |
+
draw.ellipse([x - glow_size, y - glow_size, x + glow_size, y + glow_size], fill=glow_color)
|
| 231 |
+
|
| 232 |
+
# Simulate a "twinkle" effect for some stars by drawing a cross
|
| 233 |
+
if random.random() < 0.15: # 15% chance to twinkle
|
| 234 |
+
draw.line([x-size, y, x+size, y], fill=final_color, width=1)
|
| 235 |
+
draw.line([x, y-size, x, y+size], fill=final_color, width=1)
|
| 236 |
+
else:
|
| 237 |
+
# Draw the main star on top of the glow
|
| 238 |
+
draw.ellipse([x - size, y - size, x + size, y + size], fill=final_color)
|
| 239 |
+
|
| 240 |
+
return img
|
| 241 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 242 |
|
| 243 |
def _get_text_measurement(draw_obj, text_str, font_obj):
|
| 244 |
if hasattr(draw_obj, 'textbbox'):
|
|
|
|
| 260 |
except: return len(text_str) * 8, 10
|
| 261 |
|
| 262 |
def draw_key_list_dropdown_overlay(image: Image.Image, keys: list[str] = None, title: str = "Data Embedded") -> Image.Image:
|
| 263 |
+
"""Draws overlays on the image using the 'KeyLock' style."""
|
| 264 |
+
img_overlayed = image.copy().convert("RGBA")
|
| 265 |
+
draw = ImageDraw.Draw(img_overlayed, "RGBA")
|
| 266 |
+
width, height = img_overlayed.size
|
| 267 |
+
|
| 268 |
+
overlay_color = (15, 23, 42, 190)
|
| 269 |
+
title_color = (226, 232, 240)
|
| 270 |
+
key_color = (148, 163, 184)
|
| 271 |
+
|
| 272 |
+
font_bold = _get_font(PREFERRED_FONTS, 30)
|
| 273 |
+
font_regular = _get_font(PREFERRED_FONTS, 15)
|
| 274 |
+
|
| 275 |
+
draw.rectangle([0, 20, width, 80], fill=overlay_color)
|
| 276 |
+
draw.text((width / 2, 50), title, fill=title_color, font=font_bold, anchor="ms")
|
| 277 |
+
|
|
|
|
|
|
|
| 278 |
if keys:
|
| 279 |
+
box_padding = 15
|
| 280 |
+
line_spacing = 6
|
| 281 |
+
text_start_x = 35
|
| 282 |
+
lines = keys
|
| 283 |
+
|
| 284 |
+
line_heights = [_get_text_measurement(draw, line, font_regular)[1] for line in lines]
|
| 285 |
+
total_text_height = sum(line_heights) + (len(lines) - 1) * line_spacing
|
| 286 |
+
box_height = total_text_height + (box_padding * 2)
|
| 287 |
+
box_y0 = height - box_height - 20
|
| 288 |
+
|
| 289 |
+
draw.rectangle([20, box_y0, width - 20, height - 20], fill=overlay_color)
|
| 290 |
+
current_y = box_y0 + box_padding
|
| 291 |
+
|
| 292 |
+
for i, key_text in enumerate(lines):
|
| 293 |
+
draw.text((text_start_x, current_y), key_text, fill=key_color, font=font_regular)
|
| 294 |
+
if i < len(line_heights):
|
| 295 |
+
current_y += line_heights[i] + line_spacing
|
| 296 |
+
|
| 297 |
+
final_image_rgb = Image.new("RGB", img_overlayed.size, (0, 0, 0))
|
| 298 |
+
final_image_rgb.paste(img_overlayed, (0, 0), img_overlayed)
|
| 299 |
+
|
| 300 |
+
return final_image_rgb
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 301 |
|
| 302 |
|
| 303 |
# --- Helper Functions ---
|
|
|
|
| 368 |
history_snippet = "\n".join([f"{msg['role']}: {msg['content'][:100]}" for msg in chat_history_for_prompt[-2:]])
|
| 369 |
guideline_snippet = initial_insights_ctx_str[:200].replace('\n', ' ')
|
| 370 |
tool_sys_prompt = "You are a precise routing agent... Output JSON only. Example: {\"action\": \"search_duckduckgo_and_report\", \"action_input\": {\"search_engine_query\": \"query\"}}"
|
| 371 |
+
tool_user_prompt = f"User Query: \"{user_input}\"\nRecent History:\n{history_snippet}\nGuidelines: {guideline_snippet}...\nAvailable Actions: quick_respond, answer_using_conversation_memory, search_duckduckgo_and_report, scrape_url_and_report.\nSelect one action and input. Output JSON."
|
| 372 |
tool_decision_messages = [{"role":"system", "content": tool_sys_prompt}, {"role":"user", "content": tool_user_prompt}]
|
| 373 |
tool_provider, tool_model_id = TOOL_DECISION_PROVIDER_ENV, TOOL_DECISION_MODEL_ID_ENV
|
| 374 |
tool_model_display = next((dn for dn, mid in MODELS_BY_PROVIDER.get(tool_provider.lower(), {}).get("models", {}).items() if mid == tool_model_id), None)
|
|
|
|
| 580 |
else:
|
| 581 |
logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx}: Skipped op due to unknown action '{action}' from XML.")
|
| 582 |
|
|
|
|
| 583 |
if significant_learnings_summary:
|
| 584 |
learning_digest = "SYSTEM CORE LEARNING DIGEST:\n" + "\n".join(significant_learnings_summary)
|
|
|
|
| 585 |
system_metrics = {
|
| 586 |
"takeaway": "Core knowledge refined.",
|
| 587 |
+
"response_success_score": 1.0,
|
| 588 |
"future_confidence_score": 1.0,
|
| 589 |
"type": "SYSTEM_REFLECTION"
|
| 590 |
}
|
| 591 |
add_memory_entry(
|
| 592 |
+
user_input="SYSTEM_INTERNAL_REFLECTION_TRIGGER",
|
| 593 |
metrics=system_metrics,
|
| 594 |
bot_response=learning_digest
|
| 595 |
)
|
|
|
|
| 606 |
def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_name: str, sel_model_disp_name: str, ui_api_key: str|None, cust_sys_prompt: str):
|
| 607 |
global current_chat_session_history
|
| 608 |
cleared_input, updated_gr_hist, status_txt = "", list(gr_hist_list), "Initializing..."
|
| 609 |
+
updated_rules_text = ui_refresh_rules_display_fn()
|
| 610 |
+
updated_mems_json = ui_refresh_memories_display_fn()
|
|
|
|
| 611 |
def_detect_out_md = gr.Markdown(visible=False)
|
| 612 |
def_fmt_out_txt = gr.Textbox(value="*Waiting...*", interactive=True, show_copy_button=True)
|
| 613 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
|
|
|
| 615 |
if not user_msg_txt.strip():
|
| 616 |
status_txt = "Error: Empty message."
|
| 617 |
updated_gr_hist.append((user_msg_txt or "(Empty)", status_txt))
|
|
|
|
| 618 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json)
|
| 619 |
return
|
| 620 |
|
| 621 |
updated_gr_hist.append((user_msg_txt, "<i>Thinking...</i>"))
|
|
|
|
| 622 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json)
|
| 623 |
|
| 624 |
internal_hist = list(current_chat_session_history); internal_hist.append({"role": "user", "content": user_msg_txt})
|
|
|
|
| 625 |
hist_len_check = MAX_HISTORY_TURNS * 2
|
| 626 |
if internal_hist and internal_hist[0]["role"] == "system": hist_len_check +=1
|
| 627 |
if len(internal_hist) > hist_len_check:
|
| 628 |
current_chat_session_history = ([internal_hist[0]] if internal_hist[0]["role"] == "system" else []) + internal_hist[-(MAX_HISTORY_TURNS * 2):]
|
| 629 |
+
internal_hist = list(current_chat_session_history)
|
| 630 |
|
| 631 |
final_bot_resp_acc, insights_used_parsed = "", []
|
| 632 |
temp_dl_file_path = None
|
|
|
|
| 638 |
if upd_type == "status":
|
| 639 |
status_txt = upd_data
|
| 640 |
if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
|
|
|
|
| 641 |
updated_gr_hist[-1] = (user_msg_txt, f"{curr_bot_disp_msg} <i>{status_txt}</i>" if curr_bot_disp_msg else f"<i>{status_txt}</i>")
|
| 642 |
elif upd_type == "response_chunk":
|
| 643 |
curr_bot_disp_msg += upd_data
|
| 644 |
if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
|
| 645 |
+
updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg)
|
| 646 |
elif upd_type == "final_response_and_insights":
|
| 647 |
final_bot_resp_acc, insights_used_parsed = upd_data["response"], upd_data["insights_used"]
|
| 648 |
status_txt = "Response generated. Processing learning..."
|
|
|
|
| 649 |
if not curr_bot_disp_msg and final_bot_resp_acc : curr_bot_disp_msg = final_bot_resp_acc
|
| 650 |
if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
|
| 651 |
updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg or "(No text)")
|
| 652 |
|
|
|
|
| 653 |
def_fmt_out_txt = gr.Textbox(value=curr_bot_disp_msg, interactive=True, show_copy_button=True)
|
| 654 |
|
| 655 |
if curr_bot_disp_msg and not curr_bot_disp_msg.startswith("Error:"):
|
|
|
|
| 664 |
else:
|
| 665 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
| 666 |
|
|
|
|
| 667 |
insights_md_content = "### Insights Considered (Pre-Response):\n" + ("\n".join([f"- **[{i.get('type','N/A')}|{i.get('score','N/A')}]** {i.get('text','N/A')[:100]}..." for i in insights_used_parsed[:3]]) if insights_used_parsed else "*None specific.*")
|
| 668 |
def_detect_out_md = gr.Markdown(value=insights_md_content, visible=True if insights_used_parsed else False)
|
| 669 |
|
|
|
|
|
|
|
| 670 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json)
|
| 671 |
|
|
|
|
| 672 |
if upd_type == "final_response_and_insights": break
|
| 673 |
|
| 674 |
except Exception as e:
|
|
|
|
| 682 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
| 683 |
def_detect_out_md = gr.Markdown(value="*Error processing request.*", visible=True)
|
| 684 |
|
|
|
|
| 685 |
current_rules_text_on_error = ui_refresh_rules_display_fn()
|
| 686 |
current_mems_json_on_error = ui_refresh_memories_display_fn()
|
| 687 |
|
| 688 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, current_rules_text_on_error, current_mems_json_on_error)
|
|
|
|
| 689 |
if temp_dl_file_path and os.path.exists(temp_dl_file_path):
|
| 690 |
try: os.unlink(temp_dl_file_path)
|
| 691 |
except Exception as e_unlink: logger.error(f"Error deleting temp download file {temp_dl_file_path} after error: {e_unlink}")
|
| 692 |
+
return
|
| 693 |
|
|
|
|
| 694 |
if final_bot_resp_acc and not final_bot_resp_acc.startswith("Error:"):
|
|
|
|
| 695 |
current_chat_session_history.extend([{"role": "user", "content": user_msg_txt}, {"role": "assistant", "content": final_bot_resp_acc}])
|
|
|
|
| 696 |
hist_len_check = MAX_HISTORY_TURNS * 2
|
| 697 |
if current_chat_session_history and current_chat_session_history[0]["role"] == "system": hist_len_check +=1
|
| 698 |
if len(current_chat_session_history) > hist_len_check:
|
| 699 |
current_chat_session_history = ([current_chat_session_history[0]] if current_chat_session_history[0]["role"] == "system" else []) + current_chat_session_history[-(MAX_HISTORY_TURNS * 2):]
|
| 700 |
|
| 701 |
status_txt = "<i>[Performing post-interaction learning...]</i>"
|
|
|
|
| 702 |
current_rules_text_before_learn = ui_refresh_rules_display_fn()
|
| 703 |
current_mems_json_before_learn = ui_refresh_memories_display_fn()
|
| 704 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, current_rules_text_before_learn, current_mems_json_before_learn)
|
|
|
|
| 719 |
|
| 720 |
elif final_bot_resp_acc.startswith("Error:"):
|
| 721 |
status_txt = final_bot_resp_acc
|
|
|
|
|
|
|
| 722 |
else:
|
| 723 |
status_txt = "Processing finished; no valid response or error occurred during main phase."
|
| 724 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 725 |
updated_rules_text = ui_refresh_rules_display_fn()
|
| 726 |
updated_mems_json = ui_refresh_memories_display_fn()
|
| 727 |
|
| 728 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn, updated_rules_text, updated_mems_json)
|
| 729 |
|
|
|
|
| 730 |
if temp_dl_file_path and os.path.exists(temp_dl_file_path):
|
| 731 |
try: os.unlink(temp_dl_file_path)
|
| 732 |
except Exception as e_unlink: logger.error(f"Error deleting temp download file {temp_dl_file_path}: {e_unlink}")
|
|
|
|
| 734 |
|
| 735 |
# --- Startup Loading Functions ---
|
| 736 |
def load_rules_from_file(filepath: str | None):
|
|
|
|
| 737 |
if not filepath:
|
| 738 |
logger.info("LOAD_RULES_FILE environment variable not set. Skipping rules loading from file.")
|
| 739 |
+
return 0, 0, 0
|
| 740 |
|
| 741 |
if not os.path.exists(filepath):
|
| 742 |
logger.warning(f"LOAD_RULES: Specified rules file not found: {filepath}. Skipping loading.")
|
|
|
|
| 750 |
content = f.read()
|
| 751 |
except Exception as e:
|
| 752 |
logger.error(f"LOAD_RULES: Error reading file {filepath}: {e}", exc_info=False)
|
| 753 |
+
return 0, 0, 1
|
| 754 |
|
| 755 |
if not content.strip():
|
| 756 |
logger.info(f"LOAD_RULES: File {filepath} is empty. Skipping loading.")
|
|
|
|
| 760 |
|
| 761 |
if file_name_lower.endswith(".txt"):
|
| 762 |
potential_rules = content.split("\n\n---\n\n")
|
|
|
|
| 763 |
if len(potential_rules) == 1 and "\n" in content:
|
| 764 |
potential_rules = [r.strip() for r in content.splitlines() if r.strip()]
|
| 765 |
elif file_name_lower.endswith(".jsonl"):
|
|
|
|
| 767 |
line = line.strip()
|
| 768 |
if line:
|
| 769 |
try:
|
|
|
|
| 770 |
rule_text_in_json_string = json.loads(line)
|
| 771 |
if isinstance(rule_text_in_json_string, str):
|
| 772 |
potential_rules.append(rule_text_in_json_string)
|
|
|
|
| 778 |
error_count +=1
|
| 779 |
else:
|
| 780 |
logger.error(f"LOAD_RULES: Unsupported file type for rules: {filepath}. Must be .txt or .jsonl")
|
| 781 |
+
return 0, 0, 1
|
| 782 |
|
| 783 |
valid_potential_rules = [r.strip() for r in potential_rules if r.strip()]
|
| 784 |
total_to_process = len(valid_potential_rules)
|
|
|
|
| 788 |
return 0, 0, 0
|
| 789 |
elif total_to_process == 0 and error_count > 0:
|
| 790 |
logger.warning(f"LOAD_RULES: No valid rule segments found to process. Encountered {error_count} parsing/format errors in {filepath}.")
|
| 791 |
+
return 0, 0, error_count
|
| 792 |
|
| 793 |
logger.info(f"LOAD_RULES: Attempting to add {total_to_process} potential rules from {filepath}...")
|
| 794 |
for idx, rule_text in enumerate(valid_potential_rules):
|
|
|
|
| 805 |
return added_count, skipped_count, error_count
|
| 806 |
|
| 807 |
def load_memories_from_file(filepath: str | None):
|
|
|
|
| 808 |
if not filepath:
|
| 809 |
logger.info("LOAD_MEMORIES_FILE environment variable not set. Skipping memories loading from file.")
|
| 810 |
+
return 0, 0, 0
|
| 811 |
|
| 812 |
if not os.path.exists(filepath):
|
| 813 |
logger.warning(f"LOAD_MEMORIES: Specified memories file not found: {filepath}. Skipping loading.")
|
|
|
|
| 821 |
content = f.read()
|
| 822 |
except Exception as e:
|
| 823 |
logger.error(f"LOAD_MEMORIES: Error reading file {filepath}: {e}", exc_info=False)
|
| 824 |
+
return 0, 1, 0
|
| 825 |
|
| 826 |
if not content.strip():
|
| 827 |
logger.info(f"LOAD_MEMORIES: File {filepath} is empty. Skipping loading.")
|
|
|
|
| 835 |
if isinstance(parsed_json, list):
|
| 836 |
memory_objects_to_process = parsed_json
|
| 837 |
elif isinstance(parsed_json, dict):
|
|
|
|
| 838 |
memory_objects_to_process = [parsed_json]
|
| 839 |
else:
|
| 840 |
logger.warning(f"LOAD_MEMORIES (.json): File content is not a JSON list or object in {filepath}. Type: {type(parsed_json)}")
|
|
|
|
| 853 |
format_error_count += 1
|
| 854 |
else:
|
| 855 |
logger.error(f"LOAD_MEMORIES: Unsupported file type for memories: {filepath}. Must be .json or .jsonl")
|
| 856 |
+
return 0, 1, 0
|
| 857 |
|
| 858 |
total_to_process = len(memory_objects_to_process)
|
| 859 |
|
|
|
|
| 867 |
|
| 868 |
logger.info(f"LOAD_MEMORIES: Attempting to add {total_to_process} memory objects from {filepath}...")
|
| 869 |
for idx, mem_data in enumerate(memory_objects_to_process):
|
|
|
|
| 870 |
if isinstance(mem_data, dict) and all(k in mem_data for k in ["user_input", "bot_response", "metrics"]):
|
| 871 |
+
success, _ = add_memory_entry(mem_data["user_input"], mem_data["metrics"], mem_data["bot_response"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 872 |
if success:
|
| 873 |
added_count += 1
|
| 874 |
else:
|
|
|
|
| 875 |
logger.warning(f"LOAD_MEMORIES: Failed to save memory object from {filepath} (segment {idx+1}). Data: {str(mem_data)[:100]}")
|
| 876 |
save_error_count += 1
|
| 877 |
else:
|
|
|
|
| 883 |
|
| 884 |
|
| 885 |
# --- UI Functions for Rules and Memories (Text and Image) ---
|
| 886 |
+
def convert_kb_to_kv_string(rules: list[str], memories: list[dict], include_rules: bool, include_memories: bool) -> str:
|
| 887 |
+
"""Serializes rules and/or memories into a single key-value string for image embedding based on user selection."""
|
| 888 |
lines = ["# iLearn Knowledge Base Export", f"# Exported on: {datetime.utcnow().isoformat()}Z"]
|
| 889 |
+
|
| 890 |
+
if include_rules:
|
| 891 |
+
lines.append("\n# --- RULES ---")
|
| 892 |
+
for i, rule_text in enumerate(rules):
|
| 893 |
+
lines.append(f"rule_{i+1} = {json.dumps(rule_text)}")
|
| 894 |
+
|
| 895 |
+
if include_memories:
|
| 896 |
+
lines.append("\n# --- MEMORIES ---")
|
| 897 |
+
for i, mem_dict in enumerate(memories):
|
| 898 |
+
lines.append(f"memory_{i+1} = {json.dumps(mem_dict)}")
|
| 899 |
|
|
|
|
|
|
|
|
|
|
| 900 |
return "\n".join(lines)
|
| 901 |
|
| 902 |
+
|
| 903 |
def ui_refresh_rules_display_fn(): return "\n\n---\n\n".join(get_all_rules_cached()) or "No rules found."
|
| 904 |
def ui_refresh_memories_display_fn(): return get_all_memories_cached() or []
|
| 905 |
|
|
|
|
| 1045 |
progress((idx + 1) / total_unique, desc=f"Processed {idx+1}/{total_unique} rules...")
|
| 1046 |
return f"Editor Save: Added: {added}, Skipped (duplicates): {skipped}, Errors/Invalid: {errors} from {total_unique} unique rules in text."
|
| 1047 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1048 |
|
| 1049 |
def ui_upload_kb_from_image_fn(uploaded_image_filepath: str, password: str, progress=gr.Progress()):
|
|
|
|
| 1050 |
if DEMO_MODE:
|
| 1051 |
gr.Warning("Uploading is disabled in Demo Mode.")
|
| 1052 |
return "Upload disabled in Demo Mode."
|
|
|
|
| 1147 |
gr.DownloadButton(interactive=False, value=None, visible=False))
|
| 1148 |
|
| 1149 |
|
| 1150 |
+
# --- Create a placeholder image for the examples ---
|
| 1151 |
+
# This makes the script self-contained and runnable without needing a separate file.
|
| 1152 |
+
placeholder_filename = "placeholder_image.png"
|
| 1153 |
+
try:
|
| 1154 |
+
if not os.path.exists(placeholder_filename):
|
| 1155 |
+
img = Image.new('RGB', (200, 100), color='darkblue')
|
| 1156 |
+
draw = Image.Draw(img)
|
| 1157 |
+
try:
|
| 1158 |
+
font = _get_font(PREFERRED_FONTS, 14)
|
| 1159 |
+
draw.text((10, 45), "Placeholder KB Image", font=font, fill='white')
|
| 1160 |
+
except Exception:
|
| 1161 |
+
draw.text((10, 45), "Placeholder", fill='white')
|
| 1162 |
+
img.save(placeholder_filename)
|
| 1163 |
+
logger.info(f"Created '{placeholder_filename}' for Gradio examples.")
|
| 1164 |
+
except Exception as e:
|
| 1165 |
+
logger.error(f"Could not create placeholder image. The examples may not load correctly. Error: {e}")
|
| 1166 |
+
|
| 1167 |
+
|
| 1168 |
+
def ui_download_kb_as_image_fn(password: str, progress=gr.Progress()):
|
| 1169 |
+
"""
|
| 1170 |
+
Generates a KB image and returns both the image object for display
|
| 1171 |
+
and a file path for a download button.
|
| 1172 |
+
"""
|
| 1173 |
+
progress(0, desc="Fetching knowledge base...")
|
| 1174 |
+
rules, memories = get_all_rules_cached(), get_all_memories_cached()
|
| 1175 |
+
if not rules and not memories:
|
| 1176 |
+
gr.Warning("Knowledge base is empty. Nothing to create.")
|
| 1177 |
+
# Return updates to hide the components if they were previously visible
|
| 1178 |
+
return gr.update(value=None, visible=False), gr.update(value=None, visible=False), "Knowledge base is empty."
|
| 1179 |
+
|
| 1180 |
+
progress(0.2, desc="Serializing data...")
|
| 1181 |
+
kv_string = convert_kb_to_kv_string(rules, memories)
|
| 1182 |
+
data_bytes = kv_string.encode('utf-8')
|
| 1183 |
+
|
| 1184 |
+
if password and password.strip():
|
| 1185 |
+
progress(0.4, desc="Encrypting data...")
|
| 1186 |
+
try:
|
| 1187 |
+
data_bytes = encrypt_data(data_bytes, password.strip())
|
| 1188 |
+
gr.Info("Data encrypted successfully.")
|
| 1189 |
+
except Exception as e:
|
| 1190 |
+
logger.error(f"KB ImgDL: Encrypt failed: {e}")
|
| 1191 |
+
gr.Error(f"Encryption failed: {e}")
|
| 1192 |
+
return gr.update(value=None, visible=False), gr.update(value=None, visible=False), f"Error: {e}"
|
| 1193 |
+
|
| 1194 |
+
progress(0.6, desc="Generating carrier image...")
|
| 1195 |
+
carrier_image = generate_brain_carrier_image(w=800, h=800, msg="iLearn Knowledge Base")
|
| 1196 |
+
|
| 1197 |
+
try:
|
| 1198 |
+
progress(0.7, desc="Embedding data...")
|
| 1199 |
+
embedded_image = embed_data_in_image(carrier_image, data_bytes)
|
| 1200 |
+
except ValueError as e:
|
| 1201 |
+
logger.error(f"KB ImgDL: Embed failed: {e}")
|
| 1202 |
+
gr.Error(f"Data is too large for this image size: {e}")
|
| 1203 |
+
return gr.update(value=None, visible=False), gr.update(value=None, visible=False), f"Error: {e}"
|
| 1204 |
+
|
| 1205 |
+
progress(0.8, desc="Adding visual overlay...")
|
| 1206 |
+
keys_for_overlay = [f"Rule Count: {len(rules)}", f"Memory Count: {len(memories)}", "---"]
|
| 1207 |
+
for r in rules[:5]:
|
| 1208 |
+
match = re.search(r"\](.*)", r, re.DOTALL)
|
| 1209 |
+
rule_content = match.group(1).strip() if match else r
|
| 1210 |
+
keys_for_overlay.append(f"Rule: {rule_content[:40]}...")
|
| 1211 |
+
if len(rules) > 5: keys_for_overlay.append("...")
|
| 1212 |
+
|
| 1213 |
+
title_overlay = "Encrypted Data" if password and password.strip() else "Embedded Data"
|
| 1214 |
+
final_image = draw_key_list_dropdown_overlay(embedded_image, keys=keys_for_overlay, title=title_overlay)
|
| 1215 |
+
|
| 1216 |
+
progress(0.9, desc="Preparing final image and download file...")
|
| 1217 |
+
try:
|
| 1218 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmpfile:
|
| 1219 |
+
final_image.save(tmpfile, format="PNG")
|
| 1220 |
+
tmp_path = tmpfile.name
|
| 1221 |
+
|
| 1222 |
+
progress(1.0, desc="Image created!")
|
| 1223 |
+
gr.Info("Image created and is ready for download or copy.")
|
| 1224 |
+
# Return updates to show the components with the new data
|
| 1225 |
+
return gr.update(value=final_image, visible=True), gr.update(value=tmp_path, visible=True), "Success! Image created."
|
| 1226 |
+
except Exception as e:
|
| 1227 |
+
logger.error(f"KB ImgDL: Save failed: {e}")
|
| 1228 |
+
gr.Error(f"Failed to save final image: {e}")
|
| 1229 |
+
return gr.update(value=None, visible=False), gr.update(value=None, visible=False), f"Error: {e}"
|
| 1230 |
+
|
| 1231 |
+
|
| 1232 |
+
|
| 1233 |
+
|
| 1234 |
+
def ui_create_kb_image_fn(password: str, content_to_include: list, progress=gr.Progress()):
|
| 1235 |
+
"""
|
| 1236 |
+
Generates a KB image and returns a file path to both the display and download components.
|
| 1237 |
+
"""
|
| 1238 |
+
include_rules = "Include Rules" in content_to_include
|
| 1239 |
+
include_memories = "Include Memories" in content_to_include
|
| 1240 |
|
| 1241 |
+
if not include_rules and not include_memories:
|
| 1242 |
+
gr.Warning("Nothing selected to save.")
|
| 1243 |
+
return gr.update(value=None, visible=False), gr.update(value=None, visible=False), "Nothing selected to save."
|
| 1244 |
+
|
| 1245 |
+
progress(0, desc="Fetching knowledge base...")
|
| 1246 |
+
rules = get_all_rules_cached() if include_rules else []
|
| 1247 |
+
memories = get_all_memories_cached() if include_memories else []
|
| 1248 |
+
|
| 1249 |
+
if not rules and not memories:
|
| 1250 |
+
gr.Warning("Knowledge base is empty or selected content is empty.")
|
| 1251 |
+
return gr.update(value=None, visible=False), gr.update(value=None, visible=False), "No content to save."
|
| 1252 |
+
|
| 1253 |
+
progress(0.2, desc="Serializing data...")
|
| 1254 |
+
kv_string = convert_kb_to_kv_string(rules, memories, include_rules, include_memories)
|
| 1255 |
+
data_bytes = kv_string.encode('utf-8')
|
| 1256 |
+
|
| 1257 |
+
if password and password.strip():
|
| 1258 |
+
progress(0.4, desc="Encrypting data...")
|
| 1259 |
+
try:
|
| 1260 |
+
data_bytes = encrypt_data(data_bytes, password.strip())
|
| 1261 |
+
except Exception as e:
|
| 1262 |
+
logger.error(f"KB ImgDL: Encrypt failed: {e}")
|
| 1263 |
+
return gr.update(value=None, visible=False), gr.update(value=None, visible=False), f"Error: {e}"
|
| 1264 |
+
|
| 1265 |
+
progress(0.6, desc="Generating carrier image...")
|
| 1266 |
+
carrier_image = generate_brain_carrier_image(w=800, h=800)
|
| 1267 |
+
|
| 1268 |
+
try:
|
| 1269 |
+
progress(0.7, desc="Embedding data...")
|
| 1270 |
+
embedded_image = embed_data_in_image(carrier_image, data_bytes)
|
| 1271 |
+
except ValueError as e:
|
| 1272 |
+
logger.error(f"KB ImgDL: Embed failed: {e}")
|
| 1273 |
+
return gr.update(value=None, visible=False), gr.update(value=None, visible=False), f"Error: {e}"
|
| 1274 |
+
|
| 1275 |
+
progress(0.8, desc="Adding visual overlay...")
|
| 1276 |
+
keys_for_overlay = []
|
| 1277 |
+
if include_rules: keys_for_overlay.append(f"Rule Count: {len(rules)}")
|
| 1278 |
+
if include_memories: keys_for_overlay.append(f"Memory Count: {len(memories)}")
|
| 1279 |
+
|
| 1280 |
+
title_overlay = "Encrypted KB" if password and password.strip() else "iLearn KB"
|
| 1281 |
+
final_image = draw_key_list_dropdown_overlay(embedded_image, keys=keys_for_overlay, title=title_overlay)
|
| 1282 |
+
|
| 1283 |
+
progress(0.9, desc="Preparing final image and download file...")
|
| 1284 |
+
try:
|
| 1285 |
+
# Create a temporary file and save the image as a PNG.
|
| 1286 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmpfile:
|
| 1287 |
+
final_image.save(tmpfile, format="PNG")
|
| 1288 |
+
tmp_path = tmpfile.name
|
| 1289 |
+
progress(1.0, desc="Image created!")
|
| 1290 |
+
# Return the FILE PATH to both components.
|
| 1291 |
+
return gr.update(value=tmp_path, visible=True), gr.update(value=tmp_path, visible=True), "Success! Image created."
|
| 1292 |
+
except Exception as e:
|
| 1293 |
+
logger.error(f"KB ImgDL: Save failed: {e}")
|
| 1294 |
+
return gr.update(value=None, visible=False), gr.update(value=None, visible=False), f"Error: {e}"
|
| 1295 |
+
|
| 1296 |
+
def ui_load_from_sources_fn(image_filepath: str, rules_file_obj: object, mems_file_obj: object, password: str, progress=gr.Progress()):
|
| 1297 |
+
"""
|
| 1298 |
+
Loads data from one of the available sources with precedence: Image > Rules File > Memories File.
|
| 1299 |
"""
|
| 1300 |
+
if image_filepath:
|
| 1301 |
+
progress(0.1, desc="Image source detected. Starting image processing...")
|
| 1302 |
+
return ui_upload_kb_from_image_fn(image_filepath, password, progress)
|
| 1303 |
+
|
| 1304 |
+
if rules_file_obj:
|
| 1305 |
+
progress(0.1, desc="Rules file detected. Starting rules import...")
|
| 1306 |
+
return ui_upload_rules_action_fn(rules_file_obj, progress)
|
| 1307 |
+
|
| 1308 |
+
if mems_file_obj:
|
| 1309 |
+
progress(0.1, desc="Memories file detected. Starting memories import...")
|
| 1310 |
+
return ui_upload_memories_action_fn(mems_file_obj, progress)
|
| 1311 |
+
|
| 1312 |
+
return "No file or image uploaded. Please provide a source file to load."
|
| 1313 |
+
|
| 1314 |
|
| 1315 |
+
# --- Gradio UI Definition ---
|
| 1316 |
+
with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-textbox, .gr-text-area, .gr-dropdown, .gr-json { border-radius: 8px; } .gr-group { border: 1px solid #e0e0e0; border-radius: 8px; padding: 10px; } .gr-row { gap: 10px; } .gr-tab { border-radius: 8px; } .status-text { font-size: 0.9em; color: #555; } .gr-json { max-height: 400px; overflow-y: auto; }") as demo:
|
| 1317 |
+
|
| 1318 |
gr.Markdown(f"# π€ iLearn: An Autonomous Learning Agent {'(DEMO MODE)' if DEMO_MODE else ''}", elem_classes=["header"])
|
| 1319 |
is_sqlite, is_hf_dataset = (MEMORY_STORAGE_BACKEND == "SQLITE"), (MEMORY_STORAGE_BACKEND == "HF_DATASET")
|
| 1320 |
with gr.Row(variant="compact"):
|
|
|
|
| 1324 |
sqlite_path_display = gr.Textbox(label="SQLite Path", value=MEMORY_SQLITE_PATH, interactive=False, visible=is_sqlite, elem_classes=["status-text"])
|
| 1325 |
hf_repos_display = gr.Textbox(label="HF Repos", value=f"M: {MEMORY_HF_MEM_REPO}, R: {MEMORY_HF_RULES_REPO}", interactive=False, visible=is_hf_dataset, elem_classes=["status-text"])
|
| 1326 |
|
| 1327 |
+
with gr.Tabs():
|
| 1328 |
+
with gr.TabItem("π¬ Chat & Research"):
|
| 1329 |
+
with gr.Row():
|
| 1330 |
+
with gr.Sidebar():
|
| 1331 |
+
gr.Markdown("## βοΈ Configuration")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1332 |
with gr.Group():
|
| 1333 |
+
gr.Markdown("### AI Model Settings")
|
| 1334 |
+
api_key_tb = gr.Textbox(label="AI Provider API Key (Override)", type="password", placeholder="Uses .env if blank")
|
| 1335 |
+
available_providers = get_available_providers(); default_provider = available_providers[0] if "groq" not in available_providers else "groq"
|
| 1336 |
+
prov_sel_dd = gr.Dropdown(label="AI Provider", choices=available_providers, value=default_provider, interactive=True)
|
| 1337 |
+
default_model_display = get_default_model_display_name_for_provider(default_provider) if default_provider else None
|
| 1338 |
+
model_sel_dd = gr.Dropdown(label="AI Model", choices=get_model_display_names_for_provider(default_provider) if default_provider else [], value=default_model_display, interactive=True)
|
| 1339 |
+
with gr.Group():
|
| 1340 |
+
gr.Markdown("### System Prompt"); sys_prompt_tb = gr.Textbox(label="System Prompt Base", lines=8, value=DEFAULT_SYSTEM_PROMPT, interactive=True)
|
| 1341 |
+
|
| 1342 |
+
with gr.Column(scale=3):
|
| 1343 |
+
gr.Markdown("### AI Chat Interface")
|
| 1344 |
+
main_chat_disp = gr.Chatbot(label=None, height=450, bubble_full_width=False,avatar_images=(None, "https://huggingface.co/spaces/Space-Share/bucket/resolve/main/images/pfp.webp"), show_copy_button=True, render_markdown=True, sanitize_html=True)
|
| 1345 |
+
with gr.Row(variant="compact"):
|
| 1346 |
+
user_msg_tb = gr.Textbox(show_label=False, placeholder="Ask your research question...", scale=7, lines=1, max_lines=3)
|
| 1347 |
+
send_btn = gr.Button("Send", variant="primary", scale=1, min_width=100)
|
| 1348 |
+
with gr.Accordion("π Detailed Response & Insights", open=False):
|
| 1349 |
+
fmt_report_tb = gr.Textbox(label="Full AI Response", lines=8, interactive=True, show_copy_button=True)
|
| 1350 |
+
dl_report_btn = gr.DownloadButton("Download Report", value=None, interactive=False, visible=False)
|
| 1351 |
+
detect_out_md = gr.Markdown(visible=False)
|
| 1352 |
+
|
| 1353 |
+
with gr.TabItem("π§ Knowledge Base"):
|
| 1354 |
+
with gr.Row():
|
| 1355 |
+
rules_stat_tb = gr.Textbox(label="Rules Status", interactive=False, lines=1, elem_classes=["status-text"])
|
| 1356 |
+
mems_stat_tb = gr.Textbox(label="Memories Status", interactive=False, lines=1, elem_classes=["status-text"])
|
| 1357 |
+
|
| 1358 |
+
with gr.Tabs():
|
| 1359 |
+
with gr.TabItem("ποΈ System"):
|
| 1360 |
+
gr.Markdown("View and directly manage the current rules and memories in the system.")
|
| 1361 |
+
with gr.Row(equal_height=False, variant='compact'):
|
| 1362 |
with gr.Column():
|
| 1363 |
+
gr.Markdown("### π Current Rules")
|
| 1364 |
+
rules_disp_ta = gr.TextArea(label=None, lines=15, placeholder="Rules will appear here.", interactive=True)
|
| 1365 |
+
save_edited_rules_btn = gr.Button("πΎ Save Edited Rules", variant="primary", interactive=not DEMO_MODE)
|
| 1366 |
+
clear_rules_btn = gr.Button("ποΈ Clear All Rules", variant="stop", visible=not DEMO_MODE)
|
|
|
|
|
|
|
|
|
|
| 1367 |
with gr.Column():
|
| 1368 |
+
gr.Markdown("### π Current Memories")
|
| 1369 |
+
mems_disp_json = gr.JSON(label=None, value=[], scale=1)
|
| 1370 |
+
clear_mems_btn = gr.Button("ποΈ Clear All Memories", variant="stop", visible=not DEMO_MODE)
|
| 1371 |
+
|
| 1372 |
+
with gr.TabItem("πΎ Save KB"):
|
| 1373 |
+
gr.Markdown("Export the current knowledge base as text files or as a single, portable PNG image.")
|
| 1374 |
+
with gr.Row():
|
| 1375 |
+
with gr.Column():
|
| 1376 |
+
gr.Markdown("### Text File Export")
|
| 1377 |
+
dl_rules_btn = gr.DownloadButton("β¬οΈ Download Rules (.txt)", value=None)
|
| 1378 |
+
dl_mems_btn = gr.DownloadButton("β¬οΈ Download Memories (.jsonl)", value=None)
|
| 1379 |
+
gr.Row()
|
| 1380 |
+
if MEMORY_STORAGE_BACKEND == "RAM": save_faiss_sidebar_btn = gr.Button("Save FAISS Indices", variant="secondary")
|
| 1381 |
+
|
| 1382 |
+
|
| 1383 |
+
with gr.Column():
|
| 1384 |
+
gr.Markdown("### Image Export")
|
| 1385 |
+
with gr.Group():
|
| 1386 |
+
save_kb_password_tb = gr.Textbox(label="Password (optional for encryption)", type="password")
|
| 1387 |
+
save_kb_include_cbg = gr.CheckboxGroup(label="Content to Include", choices=["Include Rules", "Include Memories"], value=["Include Rules", "Include Memories"])
|
| 1388 |
+
create_kb_img_btn = gr.Button("β¨ Create KB Image", variant="secondary")
|
| 1389 |
+
# Removed type="pil" to allow Gradio to handle the raw PNG bytes correctly
|
| 1390 |
+
kb_image_display_output = gr.Image(label="Generated Image (Right-click to copy)", visible=False)
|
| 1391 |
+
kb_image_download_output = gr.DownloadButton("β¬οΈ Download Image File", visible=False)
|
| 1392 |
+
|
| 1393 |
+
with gr.TabItem("π Load KB"):
|
| 1394 |
+
gr.Markdown("Import rules, memories, or a full KB from local files or a portable PNG image.")
|
| 1395 |
+
load_status_tb = gr.Textbox(label="Load Operation Status", interactive=False, lines=2)
|
| 1396 |
+
load_kb_password_tb = gr.Textbox(label="Password (for decrypting images)", type="password")
|
| 1397 |
+
|
| 1398 |
with gr.Group():
|
| 1399 |
+
gr.Markdown("#### Sources (Priority: Image > Rules File > Memories File)")
|
|
|
|
| 1400 |
with gr.Row():
|
| 1401 |
+
upload_kb_img_fobj = gr.Image(label="1. Image Source", type="filepath", sources=["upload", "clipboard"], interactive=not DEMO_MODE)
|
| 1402 |
+
upload_rules_fobj = gr.File(label="2. Rules File Source (.txt/.jsonl)", file_types=[".txt", ".jsonl"], interactive=not DEMO_MODE)
|
| 1403 |
+
upload_mems_fobj = gr.File(label="3. Memories File Source (.json/.jsonl)", file_types=[".jsonl", ".json"], interactive=not DEMO_MODE)
|
| 1404 |
+
|
| 1405 |
+
load_master_btn = gr.Button("β¬οΈ Load from Sources", variant="primary", interactive=not DEMO_MODE)
|
| 1406 |
+
|
|
|
|
|
|
|
| 1407 |
|
| 1408 |
# --- Event Wiring ---
|
| 1409 |
def dyn_upd_model_dd(sel_prov_dyn: str):
|
|
|
|
| 1411 |
return gr.Dropdown(choices=models_dyn, value=def_model_dyn, interactive=True)
|
| 1412 |
prov_sel_dd.change(fn=dyn_upd_model_dd, inputs=prov_sel_dd, outputs=model_sel_dd)
|
| 1413 |
|
| 1414 |
+
# Chat Tab
|
| 1415 |
chat_ins = [user_msg_tb, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
|
| 1416 |
chat_outs = [user_msg_tb, main_chat_disp, agent_stat_tb, detect_out_md, fmt_report_tb, dl_report_btn, rules_disp_ta, mems_disp_json]
|
| 1417 |
chat_event_args = {"fn": handle_gradio_chat_submit, "inputs": chat_ins, "outputs": chat_outs}
|
| 1418 |
send_btn.click(**chat_event_args); user_msg_tb.submit(**chat_event_args)
|
| 1419 |
|
| 1420 |
+
# KB Tab -> System
|
| 1421 |
save_edited_rules_btn.click(fn=save_edited_rules_action_fn, inputs=[rules_disp_ta], outputs=[rules_stat_tb], show_progress="full").then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
|
|
|
|
| 1422 |
clear_rules_btn.click(fn=lambda: ("All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules."), outputs=rules_stat_tb, show_progress=False).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
|
|
|
|
|
|
|
|
|
|
| 1423 |
clear_mems_btn.click(fn=lambda: ("All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories."), outputs=mems_stat_tb, show_progress=False).then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json, show_progress=False)
|
| 1424 |
|
| 1425 |
+
# KB Tab -> Save KB
|
| 1426 |
+
dl_rules_btn.click(fn=ui_download_rules_action_fn, inputs=None, outputs=dl_rules_btn, show_progress=False)
|
| 1427 |
+
dl_mems_btn.click(fn=ui_download_memories_action_fn, inputs=None, outputs=dl_mems_btn, show_progress=False)
|
| 1428 |
+
create_kb_img_btn.click(
|
| 1429 |
+
fn=ui_create_kb_image_fn,
|
| 1430 |
+
inputs=[save_kb_password_tb, save_kb_include_cbg],
|
| 1431 |
+
outputs=[kb_image_display_output, kb_image_download_output, load_status_tb],
|
| 1432 |
show_progress="full"
|
| 1433 |
)
|
| 1434 |
+
|
| 1435 |
+
# KB Tab -> Load KB
|
| 1436 |
+
load_master_btn.click(
|
| 1437 |
+
fn=ui_load_from_sources_fn,
|
| 1438 |
+
inputs=[upload_kb_img_fobj, upload_rules_fobj, upload_mems_fobj, load_kb_password_tb],
|
| 1439 |
+
outputs=[load_status_tb],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1440 |
show_progress="full"
|
| 1441 |
).then(
|
| 1442 |
fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta
|
|
|
|
| 1444 |
fn=ui_refresh_memories_display_fn, outputs=mems_disp_json
|
| 1445 |
)
|
| 1446 |
|
| 1447 |
+
# Sidebar FAISS button
|
| 1448 |
if MEMORY_STORAGE_BACKEND == "RAM" and 'save_faiss_sidebar_btn' in locals():
|
| 1449 |
def save_faiss_action_with_feedback_sidebar_fn():
|
| 1450 |
try: save_faiss_indices_to_disk(); gr.Info("Attempted to save FAISS indices to disk.")
|
| 1451 |
except Exception as e: logger.error(f"Error saving FAISS indices: {e}", exc_info=True); gr.Error(f"Error saving FAISS indices: {e}")
|
| 1452 |
save_faiss_sidebar_btn.click(fn=save_faiss_action_with_feedback_sidebar_fn, inputs=None, outputs=None, show_progress=False)
|
| 1453 |
|
| 1454 |
+
# App Load
|
| 1455 |
app_load_outputs = [agent_stat_tb, rules_disp_ta, mems_disp_json, detect_out_md, fmt_report_tb, dl_report_btn]
|
| 1456 |
demo.load(fn=app_load_fn, inputs=None, outputs=app_load_outputs, show_progress="full")
|
| 1457 |
|
| 1458 |
|
| 1459 |
+
|
| 1460 |
+
|
| 1461 |
if __name__ == "__main__":
|
| 1462 |
logger.info(f"Starting Gradio AI Research Mega Agent (v9.1 - Correct 1-Click JS Download, Memory: {MEMORY_STORAGE_BACKEND})...")
|
| 1463 |
app_port = int(os.getenv("GRADIO_PORT", 7860))
|