broadfield-dev commited on
Commit
6732e01
·
verified ·
1 Parent(s): ed1d9ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +233 -139
app.py CHANGED
@@ -177,6 +177,56 @@ def process_user_interaction_gradio(user_input: str, provider_name: str, model_d
177
  logger.info(f"PUI_GRADIO [{request_id}]: Finished. Total: {time.time() - process_start_time:.2f}s. Resp len: {len(final_bot_text)}")
178
  yield "final_response_and_insights", {"response": final_bot_text, "insights_used": parsed_initial_insights_list}
179
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
  def deferred_learning_and_memory_task(user_input: str, bot_response: str, provider: str, model_disp_name: str, insights_reflected: list[dict], api_key_override: str = None):
181
  start_time, task_id = time.time(), os.urandom(4).hex()
182
  logger.info(f"DEFERRED [{task_id}]: START User='{user_input[:40]}...', Bot='{bot_response[:40]}...'")
@@ -186,49 +236,48 @@ def deferred_learning_and_memory_task(user_input: str, bot_response: str, provid
186
  add_memory_entry(user_input, metrics, bot_response)
187
  summary = f"User:\"{user_input}\"\nAI:\"{bot_response}\"\nMetrics(takeaway):{metrics.get('takeaway','N/A')},Success:{metrics.get('response_success_score','N/A')}"
188
  existing_rules_ctx = "\n".join([f"- \"{r}\"" for r in retrieve_rules_semantic(f"{summary}\n{user_input}", k=10)]) or "No existing rules context."
 
189
  insight_sys_prompt = """You are an expert AI knowledge base curator. Your primary function is to meticulously analyze an interaction and update the AI's guiding principles (insights/rules) to improve its future performance and self-understanding.
190
  **CRITICAL OUTPUT REQUIREMENT: You MUST output a single, valid JSON list of operation objects.**
191
  This list can and SHOULD contain MULTIPLE distinct operations if various learnings occurred.
192
  If no operations are warranted, output an empty JSON list: `[]`.
193
  ABSOLUTELY NO other text, explanations, or markdown should precede or follow this JSON list.
 
 
194
  Each operation object in the JSON list must have these keys and string values:
195
  1. `"action"`: A string, either `"add"` (for entirely new rules) or `"update"` (to replace an existing rule with a better one).
196
  2. `"insight"`: A string, the full, refined insight text including its `[TYPE|SCORE]` prefix (e.g., `"[CORE_RULE|1.0] My name is Lumina, an AI assistant."`).
197
  3. `"old_insight_to_replace"`: (ONLY for `"update"` action) A string, the *exact, full text* of an existing insight that the new `"insight"` should replace. If action is `"add"`, this key should be omitted or its value should be `null` or an empty string.
198
- **CRITICAL JSON STRING FORMATTING RULES (for values of "insight" and "old_insight_to_replace"):**
 
199
  - All string values MUST be enclosed in double quotes (`"`).
200
  - Any literal double quote (`"`) character *within* the string content MUST be escaped as `\\"`.
201
  - Any literal backslash (`\\`) character *within* the string content MUST be escaped as `\\\\`.
202
- - Any newline characters *within* the string content MUST be escaped as `\\n`. Avoid literal newlines in JSON string values; use `\\n` instead.
203
- THIS IS ESPECIALLY IMPORTANT FOR THE 'insight' FIELD, as the insight text itself can be multi-line. Each internal newline in the original text MUST be represented as `\\n` in the JSON string value.
204
- *Example of correctly escaped insight string in JSON:*
205
- `"insight": "[RESPONSE_PRINCIPLE|0.8] User prefers concise answers, stating: \\"Just the facts!\\". Avoid verbose explanations unless asked.\\nFollow up with a question if appropriate."`
 
 
 
 
 
 
 
 
206
  **Your Reflection Process (Consider each step and generate operations accordingly):**
207
- **STEP 1: Core Identity & Purpose Review (Result: Primarily 'update' operations)**
208
- - Examine all `CORE_RULE`s related to my identity (name, fundamental purpose, core unchanging capabilities, origin) from the "Potentially Relevant Existing Rules".
209
- - **CONSOLIDATE & MERGE:** If multiple `CORE_RULE`s state similar aspects (e.g., multiple name declarations like 'Lumina' and 'LearnerAI', or slightly different purpose statements), you MUST merge them into ONE definitive, comprehensive `CORE_RULE`.
210
- - The new "insight" will be this single, merged rule. Propose separate "update" operations to replace *each* redundant or less accurate core identity rule with this new canonical one.
211
- - Prioritize user-assigned names or the most specific, recently confirmed information. If the interaction summary clarifies a name or core function, ensure this is reflected.
212
- **STEP 2: New Distinct Learnings (Result: Primarily 'add' operations)**
213
- - Did I learn any completely new, distinct facts (e.g., "The user's project is codenamed 'Bluefire'")?
214
- - Did I demonstrate or get told about a new skill/capability not previously documented (e.g., "I can now generate mermaid diagrams based on descriptions")?
215
- - Did the user express a strong, general preference that should guide future interactions (e.g., "User prefers responses to start with a direct answer, then explanation")?
216
- - For these, propose 'add' operations. Assign `CORE_RULE` for truly fundamental new facts/capabilities, otherwise `RESPONSE_PRINCIPLE` or `BEHAVIORAL_ADJUSTMENT`. Ensure these are genuinely NEW and not just rephrasing of existing non-core rules.
217
- **STEP 3: Refinements to Existing Behaviors/Principles (Result: 'update' operations for non-core rules)**
218
- - Did I learn to modify or improve an existing behavior, response style, or operational guideline (that is NOT part of core identity)?
219
- - For example, if an existing `RESPONSE_PRINCIPLE` was "Be formal," and the interaction showed the user prefers informality, update that principle.
220
- - Propose 'update' operations for the relevant `RESPONSE_PRINCIPLE` or `BEHAVIORAL_ADJUSTMENT`. Only update if the change is significant.
221
  **General Guidelines for Insight Content and Actions:**
222
- - Ensure the "insight" field (for both add/update) always contains the properly formatted insight string: `[TYPE|SCORE] Text`. `TYPE` can be `CORE_RULE`, `RESPONSE_PRINCIPLE`, `BEHAVIORAL_ADJUSTMENT`. Scores should reflect confidence/importance (0.0-1.0).
223
- - Be precise with "old_insight_to_replace" – it must *exactly* match an existing rule string from the "Potentially Relevant Existing Rules" context.
224
- - Aim for a comprehensive set of operations that reflects ALL key learnings from the interaction.
225
- **Example of a comprehensive JSON output with MULTIPLE operations (This is how your output should look):**
226
  [
227
  {"action": "update", "old_insight_to_replace": "[CORE_RULE|1.0] My designated name is 'LearnerAI'.", "insight": "[CORE_RULE|1.0] I am Lumina, an AI assistant designed to chat, provide information, and remember context like the secret word 'rocksyrup'."},
228
- {"action": "update", "old_insight_to_replace": "[CORE_RULE|1.0] I'm Lumina, the AI designed to chat with you.", "insight": "[CORE_RULE|1.0] I am Lumina, an AI assistant designed to chat, provide information, and remember context like the secret word 'rocksyrup'."},
229
- {"action": "add", "insight": "[CORE_RULE|0.9] I am capable of searching the internet for current weather information if asked."},
230
- {"action": "add", "insight": "[RESPONSE_PRINCIPLE|0.8] When user provides positive feedback, acknowledge it warmly."},
231
- {"action": "update", "old_insight_to_replace": "[RESPONSE_PRINCIPLE|0.7] Avoid mentioning old conversations.", "insight": "[RESPONSE_PRINCIPLE|0.85] Avoid mentioning old conversations unless the user explicitly refers to them or it's highly relevant to the current query."}
232
  ]
233
  """
234
  insight_user_prompt = f"""Interaction Summary:\n{summary}\n
@@ -239,7 +288,7 @@ Task: Based on your three-step reflection process (Core Identity, New Learnings,
239
  2. **Add New Learnings:** Identify and "add" any distinct new facts, skills, or important user preferences learned from the "Interaction Summary".
240
  3. **Update Existing Principles:** "Update" any non-core principles from "Potentially Relevant Existing Rules" if the "Interaction Summary" provided a clear refinement.
241
  Combine all findings into a single JSON list of operations. If there are multiple distinct changes based on the interaction and existing rules, ensure your list reflects all of them. Output JSON only, adhering to all specified formatting rules.
242
- **ULTRA-IMPORTANT FINAL REMINDER: YOUR ENTIRE RESPONSE MUST BE A SINGLE, VALID JSON LIST. DOUBLE-CHECK ALL STRING VALUES FOR CORRECTLY ESCAPED NEWLINES (\\n) AND QUOTES (\\\") BEFORE OUTPUTTING. INVALID JSON WILL BE REJECTED.**
243
  """
244
  insight_msgs = [{"role":"system", "content":insight_sys_prompt}, {"role":"user", "content":insight_user_prompt}]
245
  insight_prov, insight_model_disp = provider, model_disp_name
@@ -249,41 +298,72 @@ Combine all findings into a single JSON list of operations. If there are multipl
249
  i_d_n = next((dn for dn, mid in MODELS_BY_PROVIDER.get(i_p.lower(), {}).get("models", {}).items() if mid == i_id), None)
250
  if i_d_n: insight_prov, insight_model_disp = i_p, i_d_n
251
  logger.info(f"DEFERRED [{task_id}]: Generating insights with {insight_prov}/{insight_model_disp}")
252
- raw_ops_json = "".join(list(call_model_stream(provider=insight_prov, model_display_name=insight_model_disp, messages=insight_msgs, api_key_override=api_key_override, temperature=0.05, max_tokens=2000))).strip()
 
253
  ops, processed_count = [], 0
254
- json_match_ops = re.search(r"```json\s*(\[.*?\])\s*```", raw_ops_json, re.DOTALL|re.I) or re.search(r"(\[.*?\])", raw_ops_json, re.DOTALL)
255
 
 
 
 
 
256
  if json_match_ops:
 
257
  try:
258
- ops_json_str = json_match_ops.group(1)
259
  ops = json.loads(ops_json_str)
260
- except Exception as e:
261
- logger.error(f"DEFERRED [{task_id}]: JSON ops parse error: {e}. Raw content parsed: {ops_json_str[:500]}")
262
- else:
263
- logger.info(f"DEFERRED [{task_id}]: No JSON list structure found in LLM output. Raw output (first 500 chars): {raw_ops_json[:500]}")
 
 
 
 
 
 
 
 
 
264
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  if isinstance(ops, list) and ops:
266
- logger.info(f"DEFERRED [{task_id}]: LLM provided {len(ops)} insight ops.")
267
  for op_idx, op in enumerate(ops):
268
  if not isinstance(op, dict):
269
  logger.warning(f"DEFERRED [{task_id}]: Op {op_idx}: Skipped non-dict item in ops list: {op}")
270
  continue
271
 
272
  action = op.get("action","").lower()
273
- insight_text = op.get("insight","") # Keep as is initially for logging
274
- old_insight = op.get("old_insight_to_replace","") # Keep as is initially
275
 
276
- if not isinstance(insight_text, str):
277
- logger.warning(f"DEFERRED [{task_id}]: Op {op_idx}: Skipped op due to non-string insight_text: {op}")
278
  continue
279
- insight_text = insight_text.strip() # Now strip
280
-
281
- if not isinstance(old_insight, str) and old_insight is not None : # Allow None for old_insight
282
- logger.warning(f"DEFERRED [{task_id}]: Op {op_idx}: Skipped op due to non-string old_insight_to_replace: {op}")
283
- continue
284
- if old_insight is not None:
285
- old_insight = old_insight.strip() # Strip if not None
286
-
 
287
  if not insight_text or not re.match(r"\[(CORE_RULE|RESPONSE_PRINCIPLE|BEHAVIORAL_ADJUSTMENT|GENERAL_LEARNING)\|([\d\.]+?)\]", insight_text, re.I|re.DOTALL):
288
  logger.warning(f"DEFERRED [{task_id}]: Op {op_idx}: Skipped op due to invalid/empty insight_text format: '{insight_text[:100]}...' from op: {op}")
289
  continue
@@ -293,21 +373,25 @@ Combine all findings into a single JSON list of operations. If there are multipl
293
  if success: processed_count +=1
294
  else: logger.warning(f"DEFERRED [{task_id}]: Op {op_idx} (add): Failed to add rule '{insight_text[:50]}...'. Status: {status_msg}")
295
  elif action == "update":
296
- if old_insight and old_insight != insight_text:
297
- remove_success = remove_rule_entry(old_insight)
298
- if not remove_success:
299
- logger.warning(f"DEFERRED [{task_id}]: Op {op_idx} (update): Failed to remove old rule '{old_insight[:50]}...' before adding new.")
300
- success, status_msg = add_rule_entry(insight_text)
 
 
 
 
301
  if success: processed_count +=1
302
- else: logger.warning(f"DEFERRED [{task_id}]: Op {op_idx} (update): Failed to add updated rule '{insight_text[:50]}...'. Status: {status_msg}")
303
  else:
304
  logger.warning(f"DEFERRED [{task_id}]: Op {op_idx}: Skipped op due to unknown action '{action}': {op}")
305
 
306
  logger.info(f"DEFERRED [{task_id}]: Processed {processed_count} insight ops out of {len(ops)} received.")
307
- elif not json_match_ops : # Already logged if no JSON structure found
308
- pass
309
- else: # json_match_ops was true, but ops list is empty or not a list (e.g. parsing error led to ops not being a list)
310
- logger.info(f"DEFERRED [{task_id}]: No valid list of insight ops from LLM after parsing. Raw match (first 500 chars): {json_match_ops.group(1)[:500] if json_match_ops else 'N/A'}")
311
 
312
  except Exception as e: logger.error(f"DEFERRED [{task_id}]: CRITICAL ERROR in deferred task: {e}", exc_info=True)
313
  logger.info(f"DEFERRED [{task_id}]: END. Total: {time.time() - start_time:.2f}s")
@@ -431,10 +515,33 @@ def ui_upload_rules_action_fn(uploaded_file_obj, progress=gr.Progress()):
431
  except Exception as e_read: return f"Error reading file: {e_read}"
432
  if not content.strip(): return "Uploaded rules file is empty."
433
  added_count, skipped_count, error_count = 0,0,0
434
- potential_rules = content.split("\n\n---\n\n")
435
- if len(potential_rules) == 1 and "\n" in content: potential_rules = [r.strip() for r in content.splitlines() if r.strip()]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436
  total_to_process = len(potential_rules)
437
- if total_to_process == 0: return "No rules found in file to process."
 
438
  progress(0, desc="Starting rules upload...")
439
  for idx, rule_text in enumerate(potential_rules):
440
  rule_text = rule_text.strip()
@@ -442,9 +549,10 @@ def ui_upload_rules_action_fn(uploaded_file_obj, progress=gr.Progress()):
442
  success, status_msg = add_rule_entry(rule_text)
443
  if success: added_count += 1
444
  elif status_msg == "duplicate": skipped_count += 1
445
- else: error_count += 1
446
- progress((idx + 1) / total_to_process, desc=f"Processed {idx+1}/{total_to_process} rules...")
447
- msg = f"Rules Upload: Processed {total_to_process}. Added: {added_count}, Skipped (duplicates): {skipped_count}, Errors/Invalid: {error_count}."
 
448
  logger.info(msg); return msg
449
 
450
  def ui_refresh_memories_display_fn(): return get_all_memories_cached() or []
@@ -480,22 +588,33 @@ def ui_upload_memories_action_fn(uploaded_file_obj, progress=gr.Progress()):
480
  if not content.strip(): return "Uploaded memories file is empty."
481
  added_count, format_error_count, save_error_count = 0,0,0
482
  memory_objects_to_process = []
483
- try:
484
- # Try parsing as a single JSON list first
485
- parsed_json = json.loads(content)
486
- if isinstance(parsed_json, list):
487
- memory_objects_to_process = parsed_json
488
- else: # If it's a single object, wrap it in a list
489
- memory_objects_to_process = [parsed_json]
490
- except json.JSONDecodeError: # If not a single JSON list, try JSONL
491
- for line in content.splitlines():
 
 
 
 
 
 
 
492
  if line.strip():
493
- try: memory_objects_to_process.append(json.loads(line))
494
- except json.JSONDecodeError: format_error_count += 1
 
 
 
 
 
495
 
496
- if not memory_objects_to_process and format_error_count > 0 and not content.strip().startswith("["):
497
- # If many format errors and not starting like a list, it was probably meant to be JSONL but all lines failed
498
- return f"Memories Upload: File does not seem to be a valid JSON array or JSONL. Format errors on all lines attempted for JSONL."
499
  elif not memory_objects_to_process:
500
  return "No valid memory objects found in the uploaded file."
501
 
@@ -508,10 +627,12 @@ def ui_upload_memories_action_fn(uploaded_file_obj, progress=gr.Progress()):
508
  success, _ = add_memory_entry(mem_data["user_input"], mem_data["metrics"], mem_data["bot_response"])
509
  if success: added_count += 1
510
  else: save_error_count += 1
511
- else: format_error_count += 1 # Count errors if structure is wrong even after successful line parse
 
 
512
  progress((idx + 1) / total_to_process, desc=f"Processed {idx+1}/{total_to_process} memories...")
513
 
514
- msg = f"Memories Upload: Processed {total_to_process} objects. Added: {added_count}, Format Errors (incl. structure issues): {format_error_count}, Save Errors: {save_error_count}."
515
  logger.info(msg); return msg
516
 
517
 
@@ -614,7 +735,7 @@ with gr.Blocks(
614
  rules_disp_ta = gr.TextArea(
615
  label="Current Rules (Read-only, Edit via Upload/Save)", lines=10,
616
  placeholder="Rules will appear here. Use 'Save Edited Text' or 'Upload File' to modify.",
617
- interactive=True # Keep interactive for copy-pasting, but primary edit via save button
618
  )
619
  gr.Markdown("To edit rules, modify the text above and click 'Save Edited Text', or upload a new file.")
620
  save_edited_rules_btn = gr.Button("💾 Save Edited Text", variant="primary")
@@ -662,8 +783,8 @@ with gr.Blocks(
662
  user_msg_submit_event = user_msg_tb.submit(**chat_event_args)
663
 
664
  for event in [send_btn_click_event, user_msg_submit_event]:
665
- event.then(fn=ui_refresh_rules_display_fn, inputs=None, outputs=rules_disp_ta)
666
- event.then(fn=ui_refresh_memories_display_fn, inputs=None, outputs=mems_disp_json)
667
 
668
  # Rules Management events
669
  dl_rules_btn.click(fn=ui_download_rules_action_fn, inputs=None, outputs=dl_rules_btn)
@@ -672,74 +793,49 @@ with gr.Blocks(
672
  if not edited_rules_text.strip():
673
  return "No rules text to save."
674
 
675
- # Clear existing rules before adding from textarea to avoid duplicates if user edits and saves repeatedly
676
- # Alternatively, implement a more sophisticated diff/update, but clearing and re-adding is simpler for now.
677
- # For now, let's assume add_rule_entry handles duplicates by skipping.
678
- # clear_all_rules_data_backend() # Option: clear all first
679
- # logger.info("Cleared all rules before saving edited text.")
680
-
681
  potential_rules = edited_rules_text.split("\n\n---\n\n")
682
- if len(potential_rules) == 1 and "\n" in edited_rules_text: # Fallback for simple newline separation
683
  potential_rules = [r.strip() for r in edited_rules_text.splitlines() if r.strip()]
684
 
685
  if not potential_rules:
686
  return "No rules found to process from editor."
687
 
688
  added, skipped, errors = 0, 0, 0
689
- total = len(potential_rules)
690
- progress(0, desc=f"Saving {total} rules from editor...")
691
-
692
- # To prevent duplicates from the text area itself if it contained them,
693
- # process unique rules from the input text.
694
  unique_rules_to_process = sorted(list(set(filter(None, [r.strip() for r in potential_rules]))))
695
 
696
- # We need to identify rules to remove if they are no longer in the text area
697
- # and rules to add if they are new. This is complex.
698
- # Simpler: clear all existing rules, then add all from text area.
699
- # This is destructive if the user only wanted to add/modify a few.
700
- # For now, let's stick to `add_rule_entry` which skips duplicates.
701
- # The user should be aware that "Save Edited Text" adds new rules from the text area
702
- # and does not remove rules that are no longer present in the text area.
703
- # To remove, they should use "Clear All Rules" or upload a new definitive list.
704
-
705
- # For a "Save Edited Text" to truly reflect the text area as the source of truth:
706
- # 1. Get current rules from text area.
707
- # 2. Get rules currently in the system.
708
- # 3. Rules to add = (rules in text area) - (rules in system)
709
- # 4. Rules to remove = (rules in system) - (rules in text area)
710
- # This is too complex for this iteration. The current add_rule_entry handles duplicates.
711
- # Let's make `save_edited_rules_action_fn` clear and re-add for simplicity if that's desired.
712
- # OR, it just adds new ones and updates existing ones if `add_rule_entry` can be made to update.
713
- # Current `add_rule_entry` adds if not exact duplicate.
714
- # Sticking to simpler "add if not present":
715
 
716
  for idx, rule_text in enumerate(unique_rules_to_process):
717
- success, status_msg = add_rule_entry(rule_text) # add_rule_entry handles duplicates
718
  if success: added += 1
719
  elif status_msg == "duplicate": skipped += 1
720
  else: errors += 1
721
- progress((idx + 1) / len(unique_rules_to_process), desc=f"Processed {idx+1}/{len(unique_rules_to_process)} rules...")
722
 
723
- return f"Editor Save: Added: {added}, Skipped (duplicates): {skipped}, Errors/Invalid: {errors} from {len(unique_rules_to_process)} unique rules in text."
724
 
725
  save_edited_rules_btn.click(
726
  fn=save_edited_rules_action_fn,
727
  inputs=[rules_disp_ta],
728
  outputs=[rules_stat_tb],
729
  show_progress="full"
730
- ).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta)
731
 
732
  upload_rules_fobj.upload(
733
  fn=ui_upload_rules_action_fn,
734
  inputs=[upload_rules_fobj],
735
  outputs=[rules_stat_tb],
736
  show_progress="full"
737
- ).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta)
738
 
739
  clear_rules_btn.click(
740
  fn=lambda: ("All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules."),
741
- outputs=rules_stat_tb
742
- ).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta)
 
743
 
744
  # Memories Management events
745
  dl_mems_btn.click(fn=ui_download_memories_action_fn, inputs=None, outputs=dl_mems_btn)
@@ -749,12 +845,13 @@ with gr.Blocks(
749
  inputs=[upload_mems_fobj],
750
  outputs=[mems_stat_tb],
751
  show_progress="full"
752
- ).then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json)
753
 
754
  clear_mems_btn.click(
755
  fn=lambda: ("All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories."),
756
- outputs=mems_stat_tb
757
- ).then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json)
 
758
 
759
  if MEMORY_STORAGE_BACKEND == "RAM" and 'save_faiss_sidebar_btn' in locals():
760
  def save_faiss_action_with_feedback_sidebar_fn():
@@ -768,31 +865,28 @@ with gr.Blocks(
768
  backend_status = "AI Systems Initialized. Ready."
769
  rules_on_load = ui_refresh_rules_display_fn()
770
  mems_on_load = ui_refresh_memories_display_fn()
771
- # Initial population of display areas
772
- return backend_status, rules_on_load, mems_on_load, gr.Markdown(visible=False), gr.Textbox(value="*Waiting...*", interactive=True), gr.DownloadButton(interactive=False, value=None, visible=False)
773
-
774
-
775
- # Ensure all outputs for app_load_fn match the number of output components
776
- # chat_outs are: [user_msg_tb, main_chat_disp, agent_stat_tb, detect_out_md, fmt_report_tb, dl_report_btn]
777
- # demo.load outputs are: [agent_stat_tb, rules_disp_ta, mems_disp_json]
778
- # Let's adjust app_load_fn to return values for the components it's supposed to update.
779
 
780
  initial_load_outputs = [
781
  agent_stat_tb,
782
  rules_disp_ta,
783
  mems_disp_json,
784
- # Outputs for chat area that might need reset/initial state, matching chat_outs if possible
785
- # user_msg_tb (cleared), main_chat_disp (cleared), agent_stat_tb (updated by app_load)
786
- # detect_out_md, fmt_report_tb, dl_report_btn
787
- detect_out_md, # from chat_outs
788
- fmt_report_tb, # from chat_outs
789
- dl_report_btn # from chat_outs
790
  ]
791
  demo.load(fn=app_load_fn, inputs=None, outputs=initial_load_outputs)
792
 
793
 
794
  if __name__ == "__main__":
795
- logger.info(f"Starting Gradio AI Research Mega Agent (v6.0 - JSON Fix & UI Upload Simplification, Memory: {MEMORY_STORAGE_BACKEND})...")
796
  app_port = int(os.getenv("GRADIO_PORT", 7860))
797
  app_server = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
798
  app_debug = os.getenv("GRADIO_DEBUG", "False").lower() == "true"
 
177
  logger.info(f"PUI_GRADIO [{request_id}]: Finished. Total: {time.time() - process_start_time:.2f}s. Resp len: {len(final_bot_text)}")
178
  yield "final_response_and_insights", {"response": final_bot_text, "insights_used": parsed_initial_insights_list}
179
 
180
+ def repair_json_string_newslines(json_like_string: str) -> str:
181
+ """
182
+ Attempts to fix unescaped literal newlines within string literals in a JSON-like string.
183
+ This is a heuristic and focuses on the most common LLM error for this task.
184
+ It converts literal \n, \r, \r\n characters found *inside* what it
185
+ determines to be string literals into the two-character sequence "\\n".
186
+ """
187
+ output = []
188
+ i = 0
189
+ n = len(json_like_string)
190
+ in_string_literal = False
191
+
192
+ while i < n:
193
+ char = json_like_string[i]
194
+
195
+ if char == '"':
196
+ # Check if this quote is escaped (preceded by an odd number of backslashes)
197
+ num_backslashes = 0
198
+ k = i - 1
199
+ while k >= 0 and json_like_string[k] == '\\':
200
+ num_backslashes += 1
201
+ k -= 1
202
+
203
+ if num_backslashes % 2 == 0: # This quote is not escaped, it's a delimiter
204
+ in_string_literal = not in_string_literal
205
+ output.append(char)
206
+ i += 1
207
+ continue
208
+
209
+ if in_string_literal:
210
+ if char == '\n': # Literal newline
211
+ output.append('\\\\n') # Append literal backslash then 'n'
212
+ elif char == '\r':
213
+ if i + 1 < n and json_like_string[i+1] == '\n': # CRLF
214
+ output.append('\\\\n')
215
+ i += 1 # Consume the \n as well, it's part of the CRLF pair
216
+ else: # Standalone CR
217
+ output.append('\\\\n')
218
+ # We are not attempting to fix other escape issues here,
219
+ # as newlines are the primary cause of "Unterminated string".
220
+ # If we see a backslash, we assume it's either a correct escape
221
+ # or part of content that doesn't break basic parsing like a raw newline does.
222
+ else:
223
+ output.append(char)
224
+ else: # Not in string literal
225
+ output.append(char)
226
+ i += 1
227
+
228
+ return "".join(output)
229
+
230
  def deferred_learning_and_memory_task(user_input: str, bot_response: str, provider: str, model_disp_name: str, insights_reflected: list[dict], api_key_override: str = None):
231
  start_time, task_id = time.time(), os.urandom(4).hex()
232
  logger.info(f"DEFERRED [{task_id}]: START User='{user_input[:40]}...', Bot='{bot_response[:40]}...'")
 
236
  add_memory_entry(user_input, metrics, bot_response)
237
  summary = f"User:\"{user_input}\"\nAI:\"{bot_response}\"\nMetrics(takeaway):{metrics.get('takeaway','N/A')},Success:{metrics.get('response_success_score','N/A')}"
238
  existing_rules_ctx = "\n".join([f"- \"{r}\"" for r in retrieve_rules_semantic(f"{summary}\n{user_input}", k=10)]) or "No existing rules context."
239
+
240
  insight_sys_prompt = """You are an expert AI knowledge base curator. Your primary function is to meticulously analyze an interaction and update the AI's guiding principles (insights/rules) to improve its future performance and self-understanding.
241
  **CRITICAL OUTPUT REQUIREMENT: You MUST output a single, valid JSON list of operation objects.**
242
  This list can and SHOULD contain MULTIPLE distinct operations if various learnings occurred.
243
  If no operations are warranted, output an empty JSON list: `[]`.
244
  ABSOLUTELY NO other text, explanations, or markdown should precede or follow this JSON list.
245
+ Your output will be directly parsed by Python's `json.loads()` function.
246
+
247
  Each operation object in the JSON list must have these keys and string values:
248
  1. `"action"`: A string, either `"add"` (for entirely new rules) or `"update"` (to replace an existing rule with a better one).
249
  2. `"insight"`: A string, the full, refined insight text including its `[TYPE|SCORE]` prefix (e.g., `"[CORE_RULE|1.0] My name is Lumina, an AI assistant."`).
250
  3. `"old_insight_to_replace"`: (ONLY for `"update"` action) A string, the *exact, full text* of an existing insight that the new `"insight"` should replace. If action is `"add"`, this key should be omitted or its value should be `null` or an empty string.
251
+
252
+ **ULTRA-CRITICAL JSON STRING FORMATTING RULES (ESPECIALLY FOR THE "insight" FIELD):**
253
  - All string values MUST be enclosed in double quotes (`"`).
254
  - Any literal double quote (`"`) character *within* the string content MUST be escaped as `\\"`.
255
  - Any literal backslash (`\\`) character *within* the string content MUST be escaped as `\\\\`.
256
+ - **NEWLINES ARE THE #1 CAUSE OF ERRORS. READ CAREFULLY:**
257
+ - If the *content* of your `insight` text (or any other string value) spans multiple lines OR contains newline characters for formatting, EACH such newline character *inside the string's content* MUST be represented as the two-character sequence: a backslash followed by an 'n' (i.e., `\\n`).
258
+ - **DO NOT use a literal newline character (pressing Enter) inside a JSON string value.** This will break the JSON parser and result in an "Unterminated string" error.
259
+ - **Example of BAD JSON (literal newline):**
260
+ `"insight": "This is line one.
261
+ This is line two."`
262
+ (This is INVALID because of the actual newline between "one." and "This". The parser stops reading the string at "one.")
263
+ - **Example of GOOD JSON (escaped newline using `\\n`):**
264
+ `"insight": "This is line one.\\nThis is line two."`
265
+ (This is VALID. The `\\n` sequence correctly tells the parser there's a newline *within* the string's content.)
266
+ - The entire JSON response (the list `[...]`) can be pretty-printed with newlines *between* JSON elements (like after a comma or a brace) for readability, but *within* any quoted string value, the newline rule above is absolute.
267
+
268
  **Your Reflection Process (Consider each step and generate operations accordingly):**
269
+ ... (rest of reflection process - STEP 1, STEP 2, STEP 3 - remains the same) ...
270
+
 
 
 
 
 
 
 
 
 
 
 
 
271
  **General Guidelines for Insight Content and Actions:**
272
+ ... (General Guidelines remain the same) ...
273
+
274
+ **Example of a comprehensive JSON output (Pay close attention to `\\n` for newlines within insight text if the insight content spans multiple lines):**
 
275
  [
276
  {"action": "update", "old_insight_to_replace": "[CORE_RULE|1.0] My designated name is 'LearnerAI'.", "insight": "[CORE_RULE|1.0] I am Lumina, an AI assistant designed to chat, provide information, and remember context like the secret word 'rocksyrup'."},
277
+ {"action": "update", "old_insight_to_replace": "[CORE_RULE|1.0] I'm Lumina, the AI designed to chat with you.", "insight": "[CORE_RULE|1.0] I am Lumina, an AI assistant designed to chat, provide information, and remember context like the secret word 'rocksyrup'.\\nMy purpose is to assist the user with research and tasks."},
278
+ {"action": "add", "insight": "[CORE_RULE|0.9] I am capable of searching the internet for current weather information if asked.\\nThis capability was confirmed on [date] based on user query regarding weather."},
279
+ {"action": "add", "insight": "[RESPONSE_PRINCIPLE|0.8] When user provides positive feedback like 'good job', acknowledge it warmly with phrases like 'Thank you!' or 'Glad I could help!'."},
280
+ {"action": "update", "old_insight_to_replace": "[RESPONSE_PRINCIPLE|0.7] Avoid mentioning old conversations.", "insight": "[RESPONSE_PRINCIPLE|0.85] Avoid mentioning old conversations unless the user explicitly refers to them or it's highly relevant to the current query.\\nThis rule was updated due to user preference for more contextual continuity when appropriate."}
281
  ]
282
  """
283
  insight_user_prompt = f"""Interaction Summary:\n{summary}\n
 
288
  2. **Add New Learnings:** Identify and "add" any distinct new facts, skills, or important user preferences learned from the "Interaction Summary".
289
  3. **Update Existing Principles:** "Update" any non-core principles from "Potentially Relevant Existing Rules" if the "Interaction Summary" provided a clear refinement.
290
  Combine all findings into a single JSON list of operations. If there are multiple distinct changes based on the interaction and existing rules, ensure your list reflects all of them. Output JSON only, adhering to all specified formatting rules.
291
+ **ULTRA-IMPORTANT FINAL REMINDER: YOUR ENTIRE RESPONSE MUST BE A SINGLE, VALID JSON LIST. DOUBLE-CHECK ALL STRING VALUES, ESPECIALLY THE 'insight' TEXT, FOR CORRECTLY ESCAPED NEWLINES (using the two characters `\\` and `n`, i.e., `\\n`) AND QUOTES (using `\\"`). AN UNESCAPED LITERAL NEWLINE CHARACTER (ASCII 10) WITHIN AN 'insight' STRING VALUE WILL BREAK THE JSON AND CAUSE THE UPDATE TO FAIL. BE METICULOUS. VALIDATE YOUR JSON STRUCTURE AND STRING CONTENTS BEFORE FINALIZING YOUR OUTPUT.**
292
  """
293
  insight_msgs = [{"role":"system", "content":insight_sys_prompt}, {"role":"user", "content":insight_user_prompt}]
294
  insight_prov, insight_model_disp = provider, model_disp_name
 
298
  i_d_n = next((dn for dn, mid in MODELS_BY_PROVIDER.get(i_p.lower(), {}).get("models", {}).items() if mid == i_id), None)
299
  if i_d_n: insight_prov, insight_model_disp = i_p, i_d_n
300
  logger.info(f"DEFERRED [{task_id}]: Generating insights with {insight_prov}/{insight_model_disp}")
301
+
302
+ raw_ops_json_full = "".join(list(call_model_stream(provider=insight_prov, model_display_name=insight_model_disp, messages=insight_msgs, api_key_override=api_key_override, temperature=0.0, max_tokens=2000))).strip()
303
  ops, processed_count = [], 0
 
304
 
305
+ json_match_ops = re.search(r"```json\s*(\[.*?\])\s*```", raw_ops_json_full, re.DOTALL|re.I) or \
306
+ re.search(r"(\[.*?\])", raw_ops_json_full, re.DOTALL)
307
+
308
+ ops_json_str = None
309
  if json_match_ops:
310
+ ops_json_str = json_match_ops.group(1)
311
  try:
 
312
  ops = json.loads(ops_json_str)
313
+ except json.JSONDecodeError as e:
314
+ error_char_pos = e.pos
315
+ context_window = 40
316
+ start_context = max(0, error_char_pos - context_window)
317
+ end_context = min(len(ops_json_str), error_char_pos + context_window)
318
+ problem_context = ops_json_str[start_context:end_context].replace("\n", "\\n") # Show newlines escaped in log
319
+
320
+ logger.warning(
321
+ f"DEFERRED [{task_id}]: Initial JSON ops parse error: {e}. "
322
+ f"Error at char {error_char_pos}. Context around error: '...{problem_context}...'. "
323
+ f"Attempting to repair newlines in the JSON string."
324
+ )
325
+ # logger.debug(f"DEFERRED [{task_id}]: Problematic JSON string before repair:\n>>>>>>>>>>\n{ops_json_str}\n<<<<<<<<<<")
326
 
327
+ repaired_json_str = repair_json_string_newslines(ops_json_str)
328
+ # logger.debug(f"DEFERRED [{task_id}]: JSON string after repair attempt:\n>>>>>>>>>>\n{repaired_json_str}\n<<<<<<<<<<")
329
+
330
+ try:
331
+ ops = json.loads(repaired_json_str)
332
+ logger.info(f"DEFERRED [{task_id}]: JSON successfully parsed after repair attempt.")
333
+ except json.JSONDecodeError as e2:
334
+ logger.error(
335
+ f"DEFERRED [{task_id}]: JSON ops parse error EVEN AFTER REPAIR: {e2}. "
336
+ f"The repair attempt was not sufficient. Skipping insight operations for this turn."
337
+ )
338
+ # Log the string that failed after repair for further debugging
339
+ # logger.error(f"DEFERRED [{task_id}]: Repaired JSON string that still failed:\n>>>>>>>>>>\n{repaired_json_str}\n<<<<<<<<<<")
340
+ ops = []
341
+ else:
342
+ logger.info(f"DEFERRED [{task_id}]: No JSON list structure (e.g., starting with '[') found in LLM output. Full raw output:\n>>>>>>>>>>\n{raw_ops_json_full}\n<<<<<<<<<<")
343
+
344
  if isinstance(ops, list) and ops:
345
+ logger.info(f"DEFERRED [{task_id}]: LLM provided {len(ops)} insight ops to process.")
346
  for op_idx, op in enumerate(ops):
347
  if not isinstance(op, dict):
348
  logger.warning(f"DEFERRED [{task_id}]: Op {op_idx}: Skipped non-dict item in ops list: {op}")
349
  continue
350
 
351
  action = op.get("action","").lower()
352
+ insight_text_raw = op.get("insight")
353
+ old_insight_raw = op.get("old_insight_to_replace")
354
 
355
+ if not isinstance(insight_text_raw, str):
356
+ logger.warning(f"DEFERRED [{task_id}]: Op {op_idx}: Skipped op due to non-string insight_text (type: {type(insight_text_raw)}): {op}")
357
  continue
358
+ insight_text = insight_text_raw.strip()
359
+
360
+ old_insight = None
361
+ if old_insight_raw is not None:
362
+ if not isinstance(old_insight_raw, str):
363
+ logger.warning(f"DEFERRED [{task_id}]: Op {op_idx}: Skipped op due to non-string/non-null old_insight_to_replace (type: {type(old_insight_raw)}): {op}")
364
+ continue
365
+ old_insight = old_insight_raw.strip()
366
+
367
  if not insight_text or not re.match(r"\[(CORE_RULE|RESPONSE_PRINCIPLE|BEHAVIORAL_ADJUSTMENT|GENERAL_LEARNING)\|([\d\.]+?)\]", insight_text, re.I|re.DOTALL):
368
  logger.warning(f"DEFERRED [{task_id}]: Op {op_idx}: Skipped op due to invalid/empty insight_text format: '{insight_text[:100]}...' from op: {op}")
369
  continue
 
373
  if success: processed_count +=1
374
  else: logger.warning(f"DEFERRED [{task_id}]: Op {op_idx} (add): Failed to add rule '{insight_text[:50]}...'. Status: {status_msg}")
375
  elif action == "update":
376
+ if old_insight:
377
+ if old_insight != insight_text:
378
+ remove_success = remove_rule_entry(old_insight)
379
+ if not remove_success:
380
+ logger.warning(f"DEFERRED [{task_id}]: Op {op_idx} (update): Failed to remove old rule '{old_insight[:50]}...' before adding new. This might lead to duplicates if the new rule is different.")
381
+ else:
382
+ logger.info(f"DEFERRED [{task_id}]: Op {op_idx} (update): Old insight is identical to new insight. Skipping removal and effectively treating as 'add if not present'.")
383
+
384
+ success, status_msg = add_rule_entry(insight_text)
385
  if success: processed_count +=1
386
+ else: logger.warning(f"DEFERRED [{task_id}]: Op {op_idx} (update): Failed to add/update rule '{insight_text[:50]}...'. Status: {status_msg}")
387
  else:
388
  logger.warning(f"DEFERRED [{task_id}]: Op {op_idx}: Skipped op due to unknown action '{action}': {op}")
389
 
390
  logger.info(f"DEFERRED [{task_id}]: Processed {processed_count} insight ops out of {len(ops)} received.")
391
+ elif not json_match_ops :
392
+ pass
393
+ else:
394
+ logger.info(f"DEFERRED [{task_id}]: No valid list of insight ops from LLM after parsing was attempted. Raw match (ops_json_str - first 500 chars):\n>>>>>>>>>>\n{ops_json_str[:500] if ops_json_str else 'N/A'}\n<<<<<<<<<<")
395
 
396
  except Exception as e: logger.error(f"DEFERRED [{task_id}]: CRITICAL ERROR in deferred task: {e}", exc_info=True)
397
  logger.info(f"DEFERRED [{task_id}]: END. Total: {time.time() - start_time:.2f}s")
 
515
  except Exception as e_read: return f"Error reading file: {e_read}"
516
  if not content.strip(): return "Uploaded rules file is empty."
517
  added_count, skipped_count, error_count = 0,0,0
518
+ # For .txt, split by '---'
519
+ if uploaded_file_obj.name.lower().endswith(".txt"):
520
+ potential_rules = content.split("\n\n---\n\n")
521
+ if len(potential_rules) == 1 and "\n" in content: # Fallback for simple newline separation in .txt
522
+ potential_rules = [r.strip() for r in content.splitlines() if r.strip()]
523
+ elif uploaded_file_obj.name.lower().endswith(".jsonl"):
524
+ potential_rules = []
525
+ for line in content.splitlines():
526
+ if line.strip():
527
+ try:
528
+ # Expect each line to be a JSON string containing the rule text
529
+ rule_text_in_json_string = json.loads(line)
530
+ if isinstance(rule_text_in_json_string, str):
531
+ potential_rules.append(rule_text_in_json_string)
532
+ else:
533
+ logger.warning(f"Rule Upload: Skipped non-string rule from JSONL: {rule_text_in_json_string}")
534
+ error_count +=1
535
+ except json.JSONDecodeError:
536
+ logger.warning(f"Rule Upload: Failed to parse JSONL line for rule: {line}")
537
+ error_count +=1
538
+ else:
539
+ return "Unsupported file type for rules. Please use .txt or .jsonl."
540
+
541
+
542
  total_to_process = len(potential_rules)
543
+ if total_to_process == 0 and error_count == 0: return "No rules found in file to process."
544
+
545
  progress(0, desc="Starting rules upload...")
546
  for idx, rule_text in enumerate(potential_rules):
547
  rule_text = rule_text.strip()
 
549
  success, status_msg = add_rule_entry(rule_text)
550
  if success: added_count += 1
551
  elif status_msg == "duplicate": skipped_count += 1
552
+ else: error_count += 1 # Increment error count for add_rule_entry failures too
553
+ progress((idx + 1) / total_to_process if total_to_process > 0 else 1, desc=f"Processed {idx+1}/{total_to_process} rules...")
554
+
555
+ msg = f"Rules Upload: Total lines/segments processed: {total_to_process}. Added: {added_count}, Skipped (duplicates): {skipped_count}, Errors/Invalid: {error_count}."
556
  logger.info(msg); return msg
557
 
558
  def ui_refresh_memories_display_fn(): return get_all_memories_cached() or []
 
588
  if not content.strip(): return "Uploaded memories file is empty."
589
  added_count, format_error_count, save_error_count = 0,0,0
590
  memory_objects_to_process = []
591
+
592
+ file_ext = os.path.splitext(uploaded_file_obj.name.lower())[1]
593
+
594
+ if file_ext == ".json":
595
+ try:
596
+ parsed_json = json.loads(content)
597
+ if isinstance(parsed_json, list):
598
+ memory_objects_to_process = parsed_json
599
+ elif isinstance(parsed_json, dict): # Single object
600
+ memory_objects_to_process = [parsed_json]
601
+ else:
602
+ format_error_count = 1 # Not a list or object
603
+ except json.JSONDecodeError:
604
+ format_error_count = 1 # Invalid JSON
605
+ elif file_ext == ".jsonl":
606
+ for line_num, line in enumerate(content.splitlines()):
607
  if line.strip():
608
+ try:
609
+ memory_objects_to_process.append(json.loads(line))
610
+ except json.JSONDecodeError:
611
+ logger.warning(f"Memories Upload: JSONL line {line_num+1} parse error: {line[:100]}")
612
+ format_error_count += 1
613
+ else:
614
+ return "Unsupported file type for memories. Please use .json or .jsonl."
615
 
616
+ if not memory_objects_to_process and format_error_count > 0 :
617
+ return f"Memories Upload: File parsing failed. Found {format_error_count} format errors."
 
618
  elif not memory_objects_to_process:
619
  return "No valid memory objects found in the uploaded file."
620
 
 
627
  success, _ = add_memory_entry(mem_data["user_input"], mem_data["metrics"], mem_data["bot_response"])
628
  if success: added_count += 1
629
  else: save_error_count += 1
630
+ else:
631
+ logger.warning(f"Memories Upload: Skipped invalid memory object structure: {str(mem_data)[:100]}")
632
+ format_error_count += 1
633
  progress((idx + 1) / total_to_process, desc=f"Processed {idx+1}/{total_to_process} memories...")
634
 
635
+ msg = f"Memories Upload: Processed {total_to_process} objects. Added: {added_count}, Format/Structure Errors: {format_error_count}, Save Errors: {save_error_count}."
636
  logger.info(msg); return msg
637
 
638
 
 
735
  rules_disp_ta = gr.TextArea(
736
  label="Current Rules (Read-only, Edit via Upload/Save)", lines=10,
737
  placeholder="Rules will appear here. Use 'Save Edited Text' or 'Upload File' to modify.",
738
+ interactive=True
739
  )
740
  gr.Markdown("To edit rules, modify the text above and click 'Save Edited Text', or upload a new file.")
741
  save_edited_rules_btn = gr.Button("💾 Save Edited Text", variant="primary")
 
783
  user_msg_submit_event = user_msg_tb.submit(**chat_event_args)
784
 
785
  for event in [send_btn_click_event, user_msg_submit_event]:
786
+ event.then(fn=ui_refresh_rules_display_fn, inputs=None, outputs=rules_disp_ta, show_progress=False)
787
+ event.then(fn=ui_refresh_memories_display_fn, inputs=None, outputs=mems_disp_json, show_progress=False)
788
 
789
  # Rules Management events
790
  dl_rules_btn.click(fn=ui_download_rules_action_fn, inputs=None, outputs=dl_rules_btn)
 
793
  if not edited_rules_text.strip():
794
  return "No rules text to save."
795
 
 
 
 
 
 
 
796
  potential_rules = edited_rules_text.split("\n\n---\n\n")
797
+ if len(potential_rules) == 1 and "\n" in edited_rules_text:
798
  potential_rules = [r.strip() for r in edited_rules_text.splitlines() if r.strip()]
799
 
800
  if not potential_rules:
801
  return "No rules found to process from editor."
802
 
803
  added, skipped, errors = 0, 0, 0
 
 
 
 
 
804
  unique_rules_to_process = sorted(list(set(filter(None, [r.strip() for r in potential_rules]))))
805
 
806
+ total_unique = len(unique_rules_to_process)
807
+ if total_unique == 0: return "No unique, non-empty rules found in editor text."
808
+
809
+ progress(0, desc=f"Saving {total_unique} unique rules from editor...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
810
 
811
  for idx, rule_text in enumerate(unique_rules_to_process):
812
+ success, status_msg = add_rule_entry(rule_text)
813
  if success: added += 1
814
  elif status_msg == "duplicate": skipped += 1
815
  else: errors += 1
816
+ progress((idx + 1) / total_unique, desc=f"Processed {idx+1}/{total_unique} rules...")
817
 
818
+ return f"Editor Save: Added: {added}, Skipped (duplicates): {skipped}, Errors/Invalid: {errors} from {total_unique} unique rules in text."
819
 
820
  save_edited_rules_btn.click(
821
  fn=save_edited_rules_action_fn,
822
  inputs=[rules_disp_ta],
823
  outputs=[rules_stat_tb],
824
  show_progress="full"
825
+ ).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
826
 
827
  upload_rules_fobj.upload(
828
  fn=ui_upload_rules_action_fn,
829
  inputs=[upload_rules_fobj],
830
  outputs=[rules_stat_tb],
831
  show_progress="full"
832
+ ).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
833
 
834
  clear_rules_btn.click(
835
  fn=lambda: ("All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules."),
836
+ outputs=rules_stat_tb,
837
+ show_progress=False
838
+ ).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
839
 
840
  # Memories Management events
841
  dl_mems_btn.click(fn=ui_download_memories_action_fn, inputs=None, outputs=dl_mems_btn)
 
845
  inputs=[upload_mems_fobj],
846
  outputs=[mems_stat_tb],
847
  show_progress="full"
848
+ ).then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json, show_progress=False)
849
 
850
  clear_mems_btn.click(
851
  fn=lambda: ("All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories."),
852
+ outputs=mems_stat_tb,
853
+ show_progress=False
854
+ ).then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json, show_progress=False)
855
 
856
  if MEMORY_STORAGE_BACKEND == "RAM" and 'save_faiss_sidebar_btn' in locals():
857
  def save_faiss_action_with_feedback_sidebar_fn():
 
865
  backend_status = "AI Systems Initialized. Ready."
866
  rules_on_load = ui_refresh_rules_display_fn()
867
  mems_on_load = ui_refresh_memories_display_fn()
868
+ return (
869
+ backend_status,
870
+ rules_on_load,
871
+ mems_on_load,
872
+ gr.Markdown(visible=False),
873
+ gr.Textbox(value="*Waiting...*", interactive=True),
874
+ gr.DownloadButton(interactive=False, value=None, visible=False)
875
+ )
876
 
877
  initial_load_outputs = [
878
  agent_stat_tb,
879
  rules_disp_ta,
880
  mems_disp_json,
881
+ detect_out_md,
882
+ fmt_report_tb,
883
+ dl_report_btn
 
 
 
884
  ]
885
  demo.load(fn=app_load_fn, inputs=None, outputs=initial_load_outputs)
886
 
887
 
888
  if __name__ == "__main__":
889
+ logger.info(f"Starting Gradio AI Research Mega Agent (v6.1 - JSON Repair & UI Polish, Memory: {MEMORY_STORAGE_BACKEND})...")
890
  app_port = int(os.getenv("GRADIO_PORT", 7860))
891
  app_server = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
892
  app_debug = os.getenv("GRADIO_DEBUG", "False").lower() == "true"