broadfield-dev commited on
Commit
3d3c1e2
·
verified ·
1 Parent(s): 5f5ba10

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -28
app.py CHANGED
@@ -8,6 +8,7 @@ from datetime import datetime
8
  from dotenv import load_dotenv
9
  import gradio as gr
10
  import time
 
11
 
12
  load_dotenv()
13
 
@@ -262,7 +263,7 @@ Combine all findings into a single JSON list of operations. If there are multipl
262
  def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_name: str, sel_model_disp_name: str, ui_api_key: str|None, cust_sys_prompt: str):
263
  global current_chat_session_history
264
  cleared_input, updated_gr_hist, status_txt = "", list(gr_hist_list), "Initializing..."
265
- def_detect_out_md, def_fmt_out_txt, def_dl_btn = gr.Markdown("*Processing...*"), gr.Textbox("*Waiting...*"), gr.DownloadButton(interactive=False, value=None, visible=False)
266
  if not user_msg_txt.strip():
267
  status_txt = "Error: Empty message."
268
  updated_gr_hist.append((user_msg_txt or "(Empty)", status_txt))
@@ -274,6 +275,7 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
274
  if internal_hist[0]["role"] == "system" and len(internal_hist) > (MAX_HISTORY_TURNS * 2 + 1) : internal_hist = [internal_hist[0]] + internal_hist[-(MAX_HISTORY_TURNS * 2):]
275
  else: internal_hist = internal_hist[-(MAX_HISTORY_TURNS * 2):]
276
  final_bot_resp_acc, insights_used_parsed = "", []
 
277
  try:
278
  processor_gen = process_user_interaction_gradio(user_input=user_msg_txt, provider_name=sel_prov_name, model_display_name=sel_model_disp_name, chat_history_for_prompt=internal_hist, custom_system_prompt=cust_sys_prompt.strip() or None, ui_api_key_override=ui_api_key.strip() if ui_api_key else None)
279
  curr_bot_disp_msg = ""
@@ -291,7 +293,11 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
291
  if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt: updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg or "(No text)")
292
  def_fmt_out_txt = gr.Textbox(value=curr_bot_disp_msg)
293
  if curr_bot_disp_msg and not curr_bot_disp_msg.startswith("Error:"):
294
- def_dl_btn = gr.DownloadButton(label="Download Report (.md)", value=curr_bot_disp_msg, filename=f"ai_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md", visible=True, interactive=True)
 
 
 
 
295
  insights_md = "### Insights Considered:\n" + ("\n".join([f"- **[{i.get('type','N/A')}|{i.get('score','N/A')}]** {i.get('text','N/A')[:100]}..." for i in insights_used_parsed[:3]]) if insights_used_parsed else "*None specific.*")
296
  def_detect_out_md = gr.Markdown(insights_md)
297
  yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
@@ -311,6 +317,9 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
311
  status_txt = "Response complete. Background learning initiated."
312
  else: status_txt = "Processing finished; no response or error."
313
  yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
 
 
 
314
 
315
  def ui_view_rules_action_fn(): return "\n\n---\n\n".join(get_all_rules_cached()) or "No rules found."
316
  def ui_upload_rules_action_fn(uploaded_file_obj, progress=gr.Progress()):
@@ -370,53 +379,100 @@ def ui_upload_memories_action_fn(uploaded_file_obj, progress=gr.Progress()):
370
  custom_theme = gr.themes.Base(primary_hue="teal", secondary_hue="purple", neutral_hue="zinc", text_size="sm", spacing_size="sm", radius_size="sm", font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"])
371
  custom_css = """ body { font-family: 'Inter', sans-serif; } .gradio-container { max-width: 96% !important; margin: auto !important; padding-top: 1rem !important; } footer { display: none !important; } .gr-button { white-space: nowrap; } .gr-input, .gr-textarea textarea, .gr-dropdown input { border-radius: 8px !important; } .gr-chatbot .message { border-radius: 10px !important; box-shadow: 0 2px 5px rgba(0,0,0,0.08) !important; } .prose { h1 { font-size: 1.8rem; margin-bottom: 0.6em; margin-top: 0.8em; } h2 { font-size: 1.4rem; margin-bottom: 0.5em; margin-top: 0.7em; } h3 { font-size: 1.15rem; margin-bottom: 0.4em; margin-top: 0.6em; } p { margin-bottom: 0.8em; line-height: 1.65; } ul, ol { margin-left: 1.5em; margin-bottom: 0.8em; } code { background-color: #f1f5f9; padding: 0.2em 0.45em; border-radius: 4px; font-size: 0.9em; } pre > code { display: block; padding: 0.8em; overflow-x: auto; background-color: #f8fafc; border: 1px solid #e2e8f0; border-radius: 6px;}} .compact-group .gr-input-label, .compact-group .gr-dropdown-label { font-size: 0.8rem !important; padding-bottom: 2px !important;}"""
372
 
373
- with gr.Blocks(theme=custom_theme, css=custom_css, title="AI Research Mega Agent v4") as demo:
374
- gr.Markdown("# 🚀 AI Research Mega Agent (Advanced Memory & Dynamic Models)", elem_classes="prose")
375
  avail_provs, def_prov = get_available_providers(), get_available_providers()[0] if get_available_providers() else None
376
  def_models, def_model = get_model_display_names_for_provider(def_prov) if def_prov else [], get_default_model_display_name_for_provider(def_prov) if def_prov else None
377
- save_faiss_ram_btn = None
378
- with gr.Row():
379
- with gr.Column(scale=1, min_width=320):
380
- gr.Markdown("## ⚙️ Configuration", elem_classes="prose")
381
- with gr.Accordion("API & Model Settings", open=True):
382
- with gr.Group(elem_classes="compact-group"):
383
- gr.Markdown("### LLM Provider & Model", elem_classes="prose")
384
- prov_sel_dd = gr.Dropdown(label="Provider", choices=avail_provs, value=def_prov, interactive=True)
385
- model_sel_dd = gr.Dropdown(label="Model", choices=def_models, value=def_model, interactive=True)
386
- api_key_tb = gr.Textbox(label="API Key Override", type="password", placeholder="Optional", info="Overrides .env key for session.")
387
- with gr.Group(elem_classes="compact-group"):
388
- gr.Markdown("### System Prompt", elem_classes="prose")
389
- sys_prompt_tb = gr.Textbox(label="Custom System Prompt Base", lines=6, value=DEFAULT_SYSTEM_PROMPT, interactive=True)
390
- with gr.Accordion("Knowledge Management (Backend: " + MEMORY_STORAGE_BACKEND + ")", open=False):
391
- gr.Markdown("### Rules (Insights)", elem_classes="prose"); view_rules_btn = gr.Button("View Rules"); upload_rules_fobj = gr.File(label="Upload Rules (.txt/.jsonl)", file_types=[".txt", ".jsonl"]); rules_stat_tb = gr.Textbox(label="Status", interactive=False, lines=2); clear_rules_btn = gr.Button("⚠️ Clear All Rules", variant="stop")
392
- if MEMORY_STORAGE_BACKEND == "RAM": save_faiss_ram_btn = gr.Button("Save FAISS Indices (RAM Backend)")
393
- gr.Markdown("### Memories", elem_classes="prose"); view_mems_btn = gr.Button("View Memories"); upload_mems_fobj = gr.File(label="Upload Memories (.jsonl)", file_types=[".jsonl"]); mems_stat_tb = gr.Textbox(label="Status", interactive=False, lines=2); clear_mems_btn = gr.Button("⚠️ Clear All Memories", variant="stop")
394
- with gr.Column(scale=3):
395
- gr.Markdown("## 💬 AI Research Assistant Chat", elem_classes="prose"); main_chat_disp = gr.Chatbot(label="Chat", height=650, bubble_full_width=False, avatar_images=(None, "https://raw.githubusercontent.com/huggingface/brand-assets/main/hf-logo-with-title.png"), show_copy_button=True, render_markdown=True, sanitize_html=True)
396
- with gr.Row(): user_msg_tb = gr.Textbox(show_label=False, placeholder="Ask a question or give an instruction...", scale=7, lines=1, max_lines=5, autofocus=True); send_btn = gr.Button("Send", variant="primary", scale=1, min_width=100)
397
  agent_stat_tb = gr.Textbox(label="Agent Status", interactive=False, lines=1, value="Initializing...")
398
- with gr.Tabs():
399
- with gr.TabItem("📝 Report/Output"): gr.Markdown("AI's full response/report.", elem_classes="prose"); fmt_report_tb = gr.Textbox(label="Output", lines=20, interactive=True, show_copy_button=True, value="*Responses appear here...*"); dl_report_btn = gr.DownloadButton(label="Download Report", interactive=False, visible=False)
400
- with gr.TabItem("🔍 Details / Data"): gr.Markdown("View intermediate details, loaded data.", elem_classes="prose"); detect_out_md = gr.Markdown("*Insights used or details show here...*"); gr.HTML("<hr style='margin:1em 0;'>"); gr.Markdown("### Rules Viewer", elem_classes="prose"); rules_disp_ta = gr.TextArea(label="Rules Snapshot", lines=10, interactive=False); gr.HTML("<hr style='margin:1em 0;'>"); gr.Markdown("### Memories Viewer", elem_classes="prose"); mems_disp_json = gr.JSON(label="Memories Snapshot")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
401
  def dyn_upd_model_dd(sel_prov_dyn:str): models_dyn, def_model_dyn = get_model_display_names_for_provider(sel_prov_dyn), get_default_model_display_name_for_provider(sel_prov_dyn); return gr.Dropdown(choices=models_dyn, value=def_model_dyn, interactive=True)
402
  prov_sel_dd.change(fn=dyn_upd_model_dd, inputs=prov_sel_dd, outputs=model_sel_dd)
403
  chat_ins = [user_msg_tb, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
404
  chat_outs = [user_msg_tb, main_chat_disp, agent_stat_tb, detect_out_md, fmt_report_tb, dl_report_btn]
405
  send_btn.click(fn=handle_gradio_chat_submit, inputs=chat_ins, outputs=chat_outs); user_msg_tb.submit(fn=handle_gradio_chat_submit, inputs=chat_ins, outputs=chat_outs)
 
406
  view_rules_btn.click(fn=ui_view_rules_action_fn, outputs=rules_disp_ta)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
407
  upload_rules_fobj.upload(fn=ui_upload_rules_action_fn, inputs=[upload_rules_fobj], outputs=[rules_stat_tb], show_progress="full").then(fn=ui_view_rules_action_fn, outputs=rules_disp_ta)
408
  clear_rules_btn.click(fn=lambda: "All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules.", outputs=rules_stat_tb).then(fn=ui_view_rules_action_fn, outputs=rules_disp_ta)
 
409
  if MEMORY_STORAGE_BACKEND == "RAM" and save_faiss_ram_btn is not None:
410
  def save_faiss_action_with_feedback_fn(): save_faiss_indices_to_disk(); gr.Info("Attempted to save FAISS indices to disk.")
411
  save_faiss_ram_btn.click(fn=save_faiss_action_with_feedback_fn, inputs=None, outputs=None)
 
412
  view_mems_btn.click(fn=ui_view_memories_action_fn, outputs=mems_disp_json)
413
  upload_mems_fobj.upload(fn=ui_upload_memories_action_fn, inputs=[upload_mems_fobj], outputs=[mems_stat_tb], show_progress="full").then(fn=ui_view_memories_action_fn, outputs=mems_disp_json)
414
  clear_mems_btn.click(fn=lambda: "All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories.", outputs=mems_stat_tb).then(fn=ui_view_memories_action_fn, outputs=mems_disp_json)
 
415
  def app_load_fn(): initialize_memory_system(); logger.info("App loaded. Memory system initialized."); return f"AI Systems Initialized (Backend: {MEMORY_STORAGE_BACKEND}). Ready."
416
  demo.load(fn=app_load_fn, inputs=None, outputs=agent_stat_tb)
417
 
418
  if __name__ == "__main__":
419
- logger.info(f"Starting Gradio AI Research Mega Agent (v4 with Advanced Memory: {MEMORY_STORAGE_BACKEND})...")
420
  app_port, app_server = int(os.getenv("GRADIO_PORT", 7860)), os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
421
  app_debug, app_share = os.getenv("GRADIO_DEBUG", "False").lower()=="true", os.getenv("GRADIO_SHARE", "False").lower()=="true"
422
  logger.info(f"Launching Gradio server: http://{app_server}:{app_port}. Debug: {app_debug}, Share: {app_share}")
 
8
  from dotenv import load_dotenv
9
  import gradio as gr
10
  import time
11
+ import tempfile # For temporary file for download
12
 
13
  load_dotenv()
14
 
 
263
  def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_name: str, sel_model_disp_name: str, ui_api_key: str|None, cust_sys_prompt: str):
264
  global current_chat_session_history
265
  cleared_input, updated_gr_hist, status_txt = "", list(gr_hist_list), "Initializing..."
266
+ def_detect_out_md, def_fmt_out_txt, def_dl_btn = gr.Markdown("*Processing...*"), gr.Textbox("*Waiting...*"), gr.DownloadButton(interactive=False, value=None, visible=False) # Ensure DownloadButton is part of initial yield
267
  if not user_msg_txt.strip():
268
  status_txt = "Error: Empty message."
269
  updated_gr_hist.append((user_msg_txt or "(Empty)", status_txt))
 
275
  if internal_hist[0]["role"] == "system" and len(internal_hist) > (MAX_HISTORY_TURNS * 2 + 1) : internal_hist = [internal_hist[0]] + internal_hist[-(MAX_HISTORY_TURNS * 2):]
276
  else: internal_hist = internal_hist[-(MAX_HISTORY_TURNS * 2):]
277
  final_bot_resp_acc, insights_used_parsed = "", []
278
+ temp_dl_file_path = None # For DownloadButton
279
  try:
280
  processor_gen = process_user_interaction_gradio(user_input=user_msg_txt, provider_name=sel_prov_name, model_display_name=sel_model_disp_name, chat_history_for_prompt=internal_hist, custom_system_prompt=cust_sys_prompt.strip() or None, ui_api_key_override=ui_api_key.strip() if ui_api_key else None)
281
  curr_bot_disp_msg = ""
 
293
  if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt: updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg or "(No text)")
294
  def_fmt_out_txt = gr.Textbox(value=curr_bot_disp_msg)
295
  if curr_bot_disp_msg and not curr_bot_disp_msg.startswith("Error:"):
296
+ with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".md", encoding='utf-8') as tmpfile:
297
+ tmpfile.write(curr_bot_disp_msg)
298
+ temp_dl_file_path = tmpfile.name
299
+ def_dl_btn = gr.DownloadButton(label="Download Report (.md)", value=temp_dl_file_path, visible=True, interactive=True)
300
+ else: def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
301
  insights_md = "### Insights Considered:\n" + ("\n".join([f"- **[{i.get('type','N/A')}|{i.get('score','N/A')}]** {i.get('text','N/A')[:100]}..." for i in insights_used_parsed[:3]]) if insights_used_parsed else "*None specific.*")
302
  def_detect_out_md = gr.Markdown(insights_md)
303
  yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
 
317
  status_txt = "Response complete. Background learning initiated."
318
  else: status_txt = "Processing finished; no response or error."
319
  yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
320
+ if temp_dl_file_path and os.path.exists(temp_dl_file_path):
321
+ try: os.unlink(temp_dl_file_path)
322
+ except Exception as e_unlink: logger.error(f"Error deleting temp download file {temp_dl_file_path}: {e_unlink}")
323
 
324
  def ui_view_rules_action_fn(): return "\n\n---\n\n".join(get_all_rules_cached()) or "No rules found."
325
  def ui_upload_rules_action_fn(uploaded_file_obj, progress=gr.Progress()):
 
379
  custom_theme = gr.themes.Base(primary_hue="teal", secondary_hue="purple", neutral_hue="zinc", text_size="sm", spacing_size="sm", radius_size="sm", font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"])
380
  custom_css = """ body { font-family: 'Inter', sans-serif; } .gradio-container { max-width: 96% !important; margin: auto !important; padding-top: 1rem !important; } footer { display: none !important; } .gr-button { white-space: nowrap; } .gr-input, .gr-textarea textarea, .gr-dropdown input { border-radius: 8px !important; } .gr-chatbot .message { border-radius: 10px !important; box-shadow: 0 2px 5px rgba(0,0,0,0.08) !important; } .prose { h1 { font-size: 1.8rem; margin-bottom: 0.6em; margin-top: 0.8em; } h2 { font-size: 1.4rem; margin-bottom: 0.5em; margin-top: 0.7em; } h3 { font-size: 1.15rem; margin-bottom: 0.4em; margin-top: 0.6em; } p { margin-bottom: 0.8em; line-height: 1.65; } ul, ol { margin-left: 1.5em; margin-bottom: 0.8em; } code { background-color: #f1f5f9; padding: 0.2em 0.45em; border-radius: 4px; font-size: 0.9em; } pre > code { display: block; padding: 0.8em; overflow-x: auto; background-color: #f8fafc; border: 1px solid #e2e8f0; border-radius: 6px;}} .compact-group .gr-input-label, .compact-group .gr-dropdown-label { font-size: 0.8rem !important; padding-bottom: 2px !important;}"""
381
 
382
+ with gr.Blocks(theme=custom_theme, css=custom_css, title="AI Research Mega Agent v4.1") as demo:
383
+ gr.Markdown("# 🚀 AI Research Mega Agent", elem_classes="prose")
384
  avail_provs, def_prov = get_available_providers(), get_available_providers()[0] if get_available_providers() else None
385
  def_models, def_model = get_model_display_names_for_provider(def_prov) if def_prov else [], get_default_model_display_name_for_provider(def_prov) if def_prov else None
386
+
387
+ with gr.Tabs() as main_tabs:
388
+ with gr.TabItem("💬 Chat Agent", id=0):
389
+ with gr.Row():
390
+ with gr.Column(scale=1, min_width=300):
391
+ gr.Markdown("### ⚙️ Configuration", elem_classes="prose")
392
+ with gr.Group(elem_classes="compact-group"):
393
+ prov_sel_dd = gr.Dropdown(label="Provider", choices=avail_provs, value=def_prov, interactive=True)
394
+ model_sel_dd = gr.Dropdown(label="Model", choices=def_models, value=def_model, interactive=True)
395
+ api_key_tb = gr.Textbox(label="API Key Override", type="password", placeholder="Optional", info="Overrides .env key for session.")
396
+ with gr.Group(elem_classes="compact-group"):
397
+ sys_prompt_tb = gr.Textbox(label="System Prompt Base", lines=8, value=DEFAULT_SYSTEM_PROMPT, interactive=True)
398
+ with gr.Column(scale=3):
399
+ main_chat_disp = gr.Chatbot(label="AI Research Chat", height=600, bubble_full_width=False, avatar_images=(None, "https://raw.githubusercontent.com/huggingface/brand-assets/main/hf-logo-with-title.png"), show_copy_button=True, render_markdown=True, sanitize_html=True)
400
+ with gr.Row():
401
+ user_msg_tb = gr.Textbox(show_label=False, placeholder="Ask a question or give an instruction...", scale=7, lines=1, max_lines=5, autofocus=True)
402
+ send_btn = gr.Button("Send", variant="primary", scale=1, min_width=100)
 
 
 
403
  agent_stat_tb = gr.Textbox(label="Agent Status", interactive=False, lines=1, value="Initializing...")
404
+ with gr.Accordion("📝 Full Response / Output", open=True):
405
+ fmt_report_tb = gr.Textbox(label="Current Research Output", lines=15, interactive=True, show_copy_button=True, value="*AI responses will appear here...*")
406
+ dl_report_btn = gr.DownloadButton(label="Download Report", interactive=False, visible=False) # Filename set dynamically
407
+ detect_out_md = gr.Markdown("*Insights used or other intermediate details will show here...*")
408
+
409
+ with gr.TabItem("🧠 Knowledge Base Management", id=1):
410
+ gr.Markdown("## Knowledge Base (Backend: " + MEMORY_STORAGE_BACKEND + ")", elem_classes="prose")
411
+ with gr.Row():
412
+ with gr.Column(scale=1):
413
+ gr.Markdown("### Rules (Learned Insights)", elem_classes="prose")
414
+ rules_disp_ta = gr.TextArea(label="View/Edit Rules (one per line or '---' separated)", lines=15, interactive=True) # Make editable for direct modification
415
+ with gr.Row():
416
+ view_rules_btn = gr.Button("Load Rules into View"); save_edited_rules_btn = gr.Button("Save Edited Rules from View", variant="primary")
417
+ upload_rules_fobj = gr.File(label="Upload Rules File (.txt/.jsonl)", file_types=[".txt", ".jsonl"])
418
+ rules_stat_tb = gr.Textbox(label="Rules Action Status", interactive=False, lines=2)
419
+ with gr.Row():
420
+ clear_rules_btn = gr.Button("⚠️ Clear All Rules", variant="stop")
421
+ save_faiss_ram_btn = gr.Button("Save FAISS Indices", visible=(MEMORY_STORAGE_BACKEND == "RAM")) # Visible only if RAM
422
+
423
+ with gr.Column(scale=1):
424
+ gr.Markdown("### Memories (Past Interactions)", elem_classes="prose")
425
+ mems_disp_json = gr.JSON(label="View Memories (JSON format)") # JSON is good for list of dicts
426
+ with gr.Row():
427
+ view_mems_btn = gr.Button("Load Memories into View")
428
+ upload_mems_fobj = gr.File(label="Upload Memories File (.jsonl)", file_types=[".jsonl"])
429
+ mems_stat_tb = gr.Textbox(label="Memories Action Status", interactive=False, lines=2)
430
+ clear_mems_btn = gr.Button("⚠️ Clear All Memories", variant="stop")
431
+
432
  def dyn_upd_model_dd(sel_prov_dyn:str): models_dyn, def_model_dyn = get_model_display_names_for_provider(sel_prov_dyn), get_default_model_display_name_for_provider(sel_prov_dyn); return gr.Dropdown(choices=models_dyn, value=def_model_dyn, interactive=True)
433
  prov_sel_dd.change(fn=dyn_upd_model_dd, inputs=prov_sel_dd, outputs=model_sel_dd)
434
  chat_ins = [user_msg_tb, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
435
  chat_outs = [user_msg_tb, main_chat_disp, agent_stat_tb, detect_out_md, fmt_report_tb, dl_report_btn]
436
  send_btn.click(fn=handle_gradio_chat_submit, inputs=chat_ins, outputs=chat_outs); user_msg_tb.submit(fn=handle_gradio_chat_submit, inputs=chat_ins, outputs=chat_outs)
437
+
438
  view_rules_btn.click(fn=ui_view_rules_action_fn, outputs=rules_disp_ta)
439
+ def save_edited_rules_action_fn(edited_rules_text: str, progress=gr.Progress()):
440
+ if not edited_rules_text.strip(): return "No rules text to save."
441
+ potential_rules = edited_rules_text.split("\n\n---\n\n")
442
+ if len(potential_rules) == 1 and "\n" in edited_rules_text: potential_rules = [r.strip() for r in edited_rules_text.splitlines() if r.strip()]
443
+ if not potential_rules: return "No rules found to process from editor."
444
+ # For saving edited, it's often easier to clear existing and re-add all from editor
445
+ # Or, implement a diff and selective add/remove (more complex)
446
+ # Simple approach: clear and re-add. User should be warned.
447
+ # For now, this will just attempt to add them, duplicates will be skipped by memory_logic
448
+ added, skipped, errors = 0,0,0; total = len(potential_rules)
449
+ progress(0, desc=f"Saving {total} rules from editor...")
450
+ for idx, rule_text in enumerate(potential_rules):
451
+ if not rule_text.strip(): continue
452
+ success, status_msg = add_rule_entry(rule_text.strip()) # add_rule_entry handles duplicates/format
453
+ if success: added +=1
454
+ elif status_msg == "duplicate": skipped +=1
455
+ else: errors +=1
456
+ progress((idx+1)/total)
457
+ return f"Editor Save: Added: {added}, Skipped (duplicates): {skipped}, Errors/Invalid: {errors}."
458
+ save_edited_rules_btn.click(fn=save_edited_rules_action_fn, inputs=[rules_disp_ta], outputs=[rules_stat_tb], show_progress="full")
459
+
460
  upload_rules_fobj.upload(fn=ui_upload_rules_action_fn, inputs=[upload_rules_fobj], outputs=[rules_stat_tb], show_progress="full").then(fn=ui_view_rules_action_fn, outputs=rules_disp_ta)
461
  clear_rules_btn.click(fn=lambda: "All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules.", outputs=rules_stat_tb).then(fn=ui_view_rules_action_fn, outputs=rules_disp_ta)
462
+
463
  if MEMORY_STORAGE_BACKEND == "RAM" and save_faiss_ram_btn is not None:
464
  def save_faiss_action_with_feedback_fn(): save_faiss_indices_to_disk(); gr.Info("Attempted to save FAISS indices to disk.")
465
  save_faiss_ram_btn.click(fn=save_faiss_action_with_feedback_fn, inputs=None, outputs=None)
466
+
467
  view_mems_btn.click(fn=ui_view_memories_action_fn, outputs=mems_disp_json)
468
  upload_mems_fobj.upload(fn=ui_upload_memories_action_fn, inputs=[upload_mems_fobj], outputs=[mems_stat_tb], show_progress="full").then(fn=ui_view_memories_action_fn, outputs=mems_disp_json)
469
  clear_mems_btn.click(fn=lambda: "All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories.", outputs=mems_stat_tb).then(fn=ui_view_memories_action_fn, outputs=mems_disp_json)
470
+
471
  def app_load_fn(): initialize_memory_system(); logger.info("App loaded. Memory system initialized."); return f"AI Systems Initialized (Backend: {MEMORY_STORAGE_BACKEND}). Ready."
472
  demo.load(fn=app_load_fn, inputs=None, outputs=agent_stat_tb)
473
 
474
  if __name__ == "__main__":
475
+ logger.info(f"Starting Gradio AI Research Mega Agent (v4.1 with Advanced Memory: {MEMORY_STORAGE_BACKEND})...")
476
  app_port, app_server = int(os.getenv("GRADIO_PORT", 7860)), os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
477
  app_debug, app_share = os.getenv("GRADIO_DEBUG", "False").lower()=="true", os.getenv("GRADIO_SHARE", "False").lower()=="true"
478
  logger.info(f"Launching Gradio server: http://{app_server}:{app_port}. Debug: {app_debug}, Share: {app_share}")