broadfield-dev commited on
Commit
af1c593
·
verified ·
1 Parent(s): cfb6614

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -40
app.py CHANGED
@@ -18,7 +18,7 @@ from memory_logic import (
18
  initialize_memory_system,
19
  add_memory_entry, retrieve_memories_semantic, get_all_memories_cached, clear_all_memory_data_backend,
20
  add_rule_entry, retrieve_rules_semantic, remove_rule_entry, get_all_rules_cached, clear_all_rules_data_backend,
21
- save_faiss_indices_to_disk, STORAGE_BACKEND as MEMORY_STORAGE_BACKEND # Import for UI
22
  )
23
  from websearch_logic import scrape_url, search_and_scrape_duckduckgo, search_and_scrape_google
24
 
@@ -156,7 +156,7 @@ def process_user_interaction_gradio(user_input: str, provider_name: str, model_d
156
  yield "status", "<i>[Synthesizing web report...]</i>"
157
  final_system_prompt_str += " Generate report/answer from web content, history, & guidelines. Cite URLs as [Source X]."
158
  final_user_prompt_content_str = f"History:\n{history_str_for_prompt}\nGuidelines:\n{initial_insights_ctx_str}\nWeb Content:\n{scraped_content}\nQuery: \"{user_input}\"\nReport/Response (cite sources [Source X]):"
159
- else: # Fallback
160
  final_system_prompt_str += " Respond directly (unknown action path)."
161
  final_user_prompt_content_str = f"History:\n{history_str_for_prompt}\nGuidelines:\n{initial_insights_ctx_str}\nQuery: \"{user_input}\"\nResponse:"
162
  final_llm_messages = [{"role": "system", "content": final_system_prompt_str}, {"role": "user", "content": final_user_prompt_content_str}]
@@ -182,8 +182,49 @@ def deferred_learning_and_memory_task(user_input: str, bot_response: str, provid
182
  add_memory_entry(user_input, metrics, bot_response)
183
  summary = f"User:\"{user_input}\"\nAI:\"{bot_response}\"\nMetrics(takeaway):{metrics.get('takeaway','N/A')},Success:{metrics.get('response_success_score','N/A')}"
184
  existing_rules_ctx = "\n".join([f"- \"{r}\"" for r in retrieve_rules_semantic(f"{summary}\n{user_input}", k=10)]) or "No existing rules context."
185
- insight_sys_prompt = """You are an expert AI knowledge base curator... (Your full long prompt from ai-learn)... Output ONLY JSON list.""" # Ensure this is the FULL prompt
186
- insight_user_prompt = f"""Interaction Summary:\n{summary}\nRelevant Existing Rules:\n{existing_rules_ctx}\nConsidered Principles:\n{json.dumps([p['original'] for p in insights_reflected if 'original' in p]) if insights_reflected else "None"}\nTask: Generate JSON list of add/update operations... (Full task description from ai-learn)"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  insight_msgs = [{"role":"system", "content":insight_sys_prompt}, {"role":"user", "content":insight_user_prompt}]
188
  insight_prov, insight_model_disp = provider, model_disp_name
189
  insight_env_model = os.getenv("INSIGHT_MODEL_OVERRIDE")
@@ -264,56 +305,66 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
264
  hist_len_check = MAX_HISTORY_TURNS * 2
265
  if current_chat_session_history and current_chat_session_history[0]["role"] == "system": hist_len_check +=1
266
  if len(current_chat_session_history) > hist_len_check:
267
- current_chat_session_history = ([current_chat_session_history[0]] if current_chat_session_history[0]["role"] == "system" else []) + current_chat_session_history[- (MAX_HISTORY_TURNS * 2):]
268
  threading.Thread(target=deferred_learning_and_memory_task, args=(user_msg_txt, final_bot_resp_acc, sel_prov_name, sel_model_disp_name, insights_used_parsed, ui_api_key.strip() if ui_api_key else None), daemon=True).start()
269
  status_txt = "Response complete. Background learning initiated."
270
  else: status_txt = "Processing finished; no response or error."
271
  yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
272
 
273
  def ui_view_rules_action_fn(): return "\n\n---\n\n".join(get_all_rules_cached()) or "No rules found."
274
- def ui_upload_rules_action_fn(file_obj, progress=gr.Progress()):
275
- if not file_obj: return "No file."
276
- try: content = open(file_obj.name, 'r', encoding='utf-8').read()
277
- except Exception as e: return f"Error reading file: {e}"
278
- if not content.strip(): return "File empty."
 
 
279
  potential_rules = content.split("\n\n---\n\n")
280
  if len(potential_rules) == 1 and "\n" in content: potential_rules = [r.strip() for r in content.splitlines() if r.strip()]
281
- if not potential_rules: return "No rules found in file."
282
- added, skipped, errors = 0,0,0; total = len(potential_rules)
 
283
  for idx, rule_text in enumerate(potential_rules):
284
- if not rule_text.strip(): continue
285
- success, status = add_rule_entry(rule_text.strip())
286
- if success: added +=1
287
- elif status == "duplicate": skipped +=1
288
- else: errors +=1
289
- progress((idx+1)/total, desc=f"Processing {idx+1}/{total} rules...")
290
- return f"Rules Upload: Total {total}. Added: {added}, Skipped (duplicates): {skipped}, Errors/Invalid: {errors}."
 
 
291
 
292
  def ui_view_memories_action_fn(): return get_all_memories_cached() or []
293
- def ui_upload_memories_action_fn(file_obj, progress=gr.Progress()):
294
- if not file_obj: return "No file."
295
- try: content = open(file_obj.name, 'r', encoding='utf-8').read()
296
- except Exception as e: return f"Error reading file: {e}"
297
- if not content.strip(): return "File empty."
298
- mem_objs, fmt_errors, added, save_errors = [], 0,0,0
 
 
299
  try:
300
- parsed = json.loads(content)
301
- mem_objs = parsed if isinstance(parsed, list) else [parsed]
302
  except json.JSONDecodeError:
303
  for line in content.splitlines():
304
  if line.strip():
305
- try: mem_objs.append(json.loads(line))
306
- except: fmt_errors+=1
307
- if not mem_objs and fmt_errors == 0: return "No valid memories in file."
308
- total = len(mem_objs)
309
- for idx, mem_data in enumerate(mem_objs):
 
 
310
  if isinstance(mem_data, dict) and all(k in mem_data for k in ["user_input", "bot_response", "metrics"]):
311
  success, _ = add_memory_entry(mem_data["user_input"], mem_data["metrics"], mem_data["bot_response"])
312
- if success: added +=1
313
- else: save_errors +=1
314
- else: fmt_errors +=1
315
- progress((idx+1)/total, desc=f"Processing {idx+1}/{total} memories...")
316
- return f"Memories Upload: Total {total}. Added: {added}, Format Errors: {fmt_errors}, Save Errors: {save_errors}."
 
317
 
318
  custom_theme = gr.themes.Base(primary_hue="teal", secondary_hue="purple", neutral_hue="zinc", text_size="sm", spacing_size="sm", radius_size="sm", font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"])
319
  custom_css = """ body { font-family: 'Inter', sans-serif; } .gradio-container { max-width: 96% !important; margin: auto !important; padding-top: 1rem !important; } footer { display: none !important; } .gr-button { white-space: nowrap; } .gr-input, .gr-textarea textarea, .gr-dropdown input { border-radius: 8px !important; } .gr-chatbot .message { border-radius: 10px !important; box-shadow: 0 2px 5px rgba(0,0,0,0.08) !important; } .prose { h1 { font-size: 1.8rem; margin-bottom: 0.6em; margin-top: 0.8em; } h2 { font-size: 1.4rem; margin-bottom: 0.5em; margin-top: 0.7em; } h3 { font-size: 1.15rem; margin-bottom: 0.4em; margin-top: 0.6em; } p { margin-bottom: 0.8em; line-height: 1.65; } ul, ol { margin-left: 1.5em; margin-bottom: 0.8em; } code { background-color: #f1f5f9; padding: 0.2em 0.45em; border-radius: 4px; font-size: 0.9em; } pre > code { display: block; padding: 0.8em; overflow-x: auto; background-color: #f8fafc; border: 1px solid #e2e8f0; border-radius: 6px;}} .compact-group .gr-input-label, .compact-group .gr-dropdown-label { font-size: 0.8rem !important; padding-bottom: 2px !important;}"""
@@ -322,6 +373,7 @@ with gr.Blocks(theme=custom_theme, css=custom_css, title="AI Research Mega Agent
322
  gr.Markdown("# 🚀 AI Research Mega Agent (Advanced Memory & Dynamic Models)", elem_classes="prose")
323
  avail_provs, def_prov = get_available_providers(), get_available_providers()[0] if get_available_providers() else None
324
  def_models, def_model = get_model_display_names_for_provider(def_prov) if def_prov else [], get_default_model_display_name_for_provider(def_prov) if def_prov else None
 
325
  with gr.Row():
326
  with gr.Column(scale=1, min_width=320):
327
  gr.Markdown("## ⚙️ Configuration", elem_classes="prose")
@@ -334,7 +386,7 @@ with gr.Blocks(theme=custom_theme, css=custom_css, title="AI Research Mega Agent
334
  with gr.Group(elem_classes="compact-group"):
335
  gr.Markdown("### System Prompt", elem_classes="prose")
336
  sys_prompt_tb = gr.Textbox(label="Custom System Prompt Base", lines=6, value=DEFAULT_SYSTEM_PROMPT, interactive=True)
337
- with gr.Accordion("Knowledge Management (Backend: " + MEMORY_STORAGE_BACKEND + ")", open=False): # Show backend type
338
  gr.Markdown("### Rules (Insights)", elem_classes="prose"); view_rules_btn = gr.Button("View Rules"); upload_rules_fobj = gr.File(label="Upload Rules (.txt/.jsonl)", file_types=[".txt", ".jsonl"]); rules_stat_tb = gr.Textbox(label="Status", interactive=False, lines=2); clear_rules_btn = gr.Button("⚠️ Clear All Rules", variant="stop")
339
  if MEMORY_STORAGE_BACKEND == "RAM": save_faiss_ram_btn = gr.Button("Save FAISS Indices (RAM Backend)")
340
  gr.Markdown("### Memories", elem_classes="prose"); view_mems_btn = gr.Button("View Memories"); upload_mems_fobj = gr.File(label="Upload Memories (.jsonl)", file_types=[".jsonl"]); mems_stat_tb = gr.Textbox(label="Status", interactive=False, lines=2); clear_mems_btn = gr.Button("⚠️ Clear All Memories", variant="stop")
@@ -344,7 +396,7 @@ with gr.Blocks(theme=custom_theme, css=custom_css, title="AI Research Mega Agent
344
  agent_stat_tb = gr.Textbox(label="Agent Status", interactive=False, lines=1, value="Initializing...")
345
  with gr.Tabs():
346
  with gr.TabItem("📝 Report/Output"): gr.Markdown("AI's full response/report.", elem_classes="prose"); fmt_report_tb = gr.Textbox(label="Output", lines=20, interactive=True, show_copy_button=True, value="*Responses appear here...*"); dl_report_btn = gr.DownloadButton(label="Download Report", interactive=False, visible=False)
347
- with gr.TabItem("🔍 Details / Data"): gr.Markdown("Intermediate details, loaded data.", elem_classes="prose"); detect_out_md = gr.Markdown("*Insights used or details show here...*"); gr.HTML("<hr style='margin:1em 0;'>"); gr.Markdown("### Rules Viewer", elem_classes="prose"); rules_disp_ta = gr.TextArea(label="Rules Snapshot", lines=10, interactive=False); gr.HTML("<hr style='margin:1em 0;'>"); gr.Markdown("### Memories Viewer", elem_classes="prose"); mems_disp_json = gr.JSON(label="Memories Snapshot")
348
  def dyn_upd_model_dd(sel_prov_dyn:str): models_dyn, def_model_dyn = get_model_display_names_for_provider(sel_prov_dyn), get_default_model_display_name_for_provider(sel_prov_dyn); return gr.Dropdown(choices=models_dyn, value=def_model_dyn, interactive=True)
349
  prov_sel_dd.change(fn=dyn_upd_model_dd, inputs=prov_sel_dd, outputs=model_sel_dd)
350
  chat_ins = [user_msg_tb, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
@@ -353,7 +405,9 @@ with gr.Blocks(theme=custom_theme, css=custom_css, title="AI Research Mega Agent
353
  view_rules_btn.click(fn=ui_view_rules_action_fn, outputs=rules_disp_ta)
354
  upload_rules_fobj.upload(fn=ui_upload_rules_action_fn, inputs=[upload_rules_fobj], outputs=[rules_stat_tb], show_progress="full").then(fn=ui_view_rules_action_fn, outputs=rules_disp_ta)
355
  clear_rules_btn.click(fn=lambda: "All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules.", outputs=rules_stat_tb).then(fn=ui_view_rules_action_fn, outputs=rules_disp_ta)
356
- if MEMORY_STORAGE_BACKEND == "RAM": save_faiss_ram_btn.click(fn=save_faiss_indices_to_disk, outputs=None, success_message="Attempted to save FAISS indices to disk.")
 
 
357
  view_mems_btn.click(fn=ui_view_memories_action_fn, outputs=mems_disp_json)
358
  upload_mems_fobj.upload(fn=ui_upload_memories_action_fn, inputs=[upload_mems_fobj], outputs=[mems_stat_tb], show_progress="full").then(fn=ui_view_memories_action_fn, outputs=mems_disp_json)
359
  clear_mems_btn.click(fn=lambda: "All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories.", outputs=mems_stat_tb).then(fn=ui_view_memories_action_fn, outputs=mems_disp_json)
 
18
  initialize_memory_system,
19
  add_memory_entry, retrieve_memories_semantic, get_all_memories_cached, clear_all_memory_data_backend,
20
  add_rule_entry, retrieve_rules_semantic, remove_rule_entry, get_all_rules_cached, clear_all_rules_data_backend,
21
+ save_faiss_indices_to_disk, STORAGE_BACKEND as MEMORY_STORAGE_BACKEND
22
  )
23
  from websearch_logic import scrape_url, search_and_scrape_duckduckgo, search_and_scrape_google
24
 
 
156
  yield "status", "<i>[Synthesizing web report...]</i>"
157
  final_system_prompt_str += " Generate report/answer from web content, history, & guidelines. Cite URLs as [Source X]."
158
  final_user_prompt_content_str = f"History:\n{history_str_for_prompt}\nGuidelines:\n{initial_insights_ctx_str}\nWeb Content:\n{scraped_content}\nQuery: \"{user_input}\"\nReport/Response (cite sources [Source X]):"
159
+ else:
160
  final_system_prompt_str += " Respond directly (unknown action path)."
161
  final_user_prompt_content_str = f"History:\n{history_str_for_prompt}\nGuidelines:\n{initial_insights_ctx_str}\nQuery: \"{user_input}\"\nResponse:"
162
  final_llm_messages = [{"role": "system", "content": final_system_prompt_str}, {"role": "user", "content": final_user_prompt_content_str}]
 
182
  add_memory_entry(user_input, metrics, bot_response)
183
  summary = f"User:\"{user_input}\"\nAI:\"{bot_response}\"\nMetrics(takeaway):{metrics.get('takeaway','N/A')},Success:{metrics.get('response_success_score','N/A')}"
184
  existing_rules_ctx = "\n".join([f"- \"{r}\"" for r in retrieve_rules_semantic(f"{summary}\n{user_input}", k=10)]) or "No existing rules context."
185
+ insight_sys_prompt = """You are an expert AI knowledge base curator. Your primary function is to meticulously analyze an interaction and update the AI's guiding principles (insights/rules) to improve its future performance and self-understanding.
186
+ You MUST output a JSON list of operation objects. This list can and SHOULD contain MULTIPLE distinct operations if various learnings occurred.
187
+ Each operation object in the JSON list must have:
188
+ 1. "action": A string, either "add" (for entirely new rules) or "update" (to replace an existing rule with a better one).
189
+ 2. "insight": A string, the full, refined insight text including its [TYPE|SCORE] prefix (e.g., "[CORE_RULE|1.0] My name is Lumina, an AI assistant.").
190
+ 3. "old_insight_to_replace" (ONLY for "update" action): A string, the *exact, full text* of an existing insight that the new "insight" should replace.
191
+ **Your Reflection Process (Consider each step and generate operations accordingly):**
192
+ **STEP 1: Core Identity & Purpose Review (Result: Primarily 'update' operations)**
193
+ - Examine all `CORE_RULE`s related to my identity (name, fundamental purpose, core unchanging capabilities, origin) from the "Potentially Relevant Existing Rules".
194
+ - **CONSOLIDATE & MERGE:** If multiple `CORE_RULE`s state similar aspects (e.g., multiple name declarations like 'Lumina' and 'LearnerAI', or slightly different purpose statements), you MUST merge them into ONE definitive, comprehensive `CORE_RULE`.
195
+ - The new "insight" will be this single, merged rule. Propose separate "update" operations to replace *each* redundant or less accurate core identity rule with this new canonical one.
196
+ - Prioritize user-assigned names or the most specific, recently confirmed information. If the interaction summary clarifies a name or core function, ensure this is reflected.
197
+ **STEP 2: New Distinct Learnings (Result: Primarily 'add' operations)**
198
+ - Did I learn any completely new, distinct facts (e.g., "The user's project is codenamed 'Bluefire'")?
199
+ - Did I demonstrate or get told about a new skill/capability not previously documented (e.g., "I can now generate mermaid diagrams based on descriptions")?
200
+ - Did the user express a strong, general preference that should guide future interactions (e.g., "User prefers responses to start with a direct answer, then explanation")?
201
+ - For these, propose 'add' operations. Assign `CORE_RULE` for truly fundamental new facts/capabilities, otherwise `RESPONSE_PRINCIPLE` or `BEHAVIORAL_ADJUSTMENT`. Ensure these are genuinely NEW and not just rephrasing of existing non-core rules.
202
+ **STEP 3: Refinements to Existing Behaviors/Principles (Result: 'update' operations for non-core rules)**
203
+ - Did I learn to modify or improve an existing behavior, response style, or operational guideline (that is NOT part of core identity)?
204
+ - For example, if an existing `RESPONSE_PRINCIPLE` was "Be formal," and the interaction showed the user prefers informality, update that principle.
205
+ - Propose 'update' operations for the relevant `RESPONSE_PRINCIPLE` or `BEHAVIORAL_ADJUSTMENT`. Only update if the change is significant.
206
+ **General Guidelines:**
207
+ - If no new insights, updates, or consolidations are warranted from the interaction, output an empty JSON list: `[]`.
208
+ - Ensure the "insight" field (for both add/update) always contains the properly formatted insight string: `[TYPE|SCORE] Text`. TYPE can be `CORE_RULE`, `RESPONSE_PRINCIPLE`, `BEHAVIORAL_ADJUSTMENT`. Scores should reflect confidence/importance.
209
+ - Be precise with "old_insight_to_replace" – it must *exactly* match an existing rule string from the "Potentially Relevant Existing Rules" context.
210
+ - Aim for a comprehensive set of operations that reflects ALL key learnings from the interaction.
211
+ - Output ONLY the JSON list. No other text, explanations, or markdown.
212
+ **Example of a comprehensive JSON output with MULTIPLE operations:**
213
+ [
214
+ {"action": "update", "old_insight_to_replace": "[CORE_RULE|1.0] My designated name is 'LearnerAI'.", "insight": "[CORE_RULE|1.0] I am Lumina, an AI assistant designed to chat, provide information, and remember context like the secret word 'rocksyrup'."},
215
+ {"action": "update", "old_insight_to_replace": "[CORE_RULE|1.0] I'm Lumina, the AI designed to chat with you.", "insight": "[CORE_RULE|1.0] I am Lumina, an AI assistant designed to chat, provide information, and remember context like the secret word 'rocksyrup'."},
216
+ {"action": "add", "insight": "[CORE_RULE|0.9] I am capable of searching the internet for current weather information if asked."},
217
+ {"action": "add", "insight": "[RESPONSE_PRINCIPLE|0.8] When user provides positive feedback, acknowledge it warmly."},
218
+ {"action": "update", "old_insight_to_replace": "[RESPONSE_PRINCIPLE|0.7] Avoid mentioning old conversations.", "insight": "[RESPONSE_PRINCIPLE|0.85] Avoid mentioning old conversations unless the user explicitly refers to them or it's highly relevant to the current query."}
219
+ ]"""
220
+ insight_user_prompt = f"""Interaction Summary:\n{summary}\n
221
+ Potentially Relevant Existing Rules (Review these carefully for consolidation or refinement):\n{existing_rules_ctx}\n
222
+ Guiding principles that were considered during THIS interaction (these might offer clues for new rules or refinements):\n{json.dumps([p['original'] for p in insights_reflected if 'original' in p]) if insights_reflected else "None"}\n
223
+ Task: Based on your three-step reflection process (Core Identity, New Learnings, Refinements):
224
+ 1. **Consolidate CORE_RULEs:** Merge similar identity/purpose rules from "Potentially Relevant Existing Rules" into single, definitive statements using "update" operations. Replace multiple old versions with the new canonical one.
225
+ 2. **Add New Learnings:** Identify and "add" any distinct new facts, skills, or important user preferences learned from the "Interaction Summary".
226
+ 3. **Update Existing Principles:** "Update" any non-core principles from "Potentially Relevant Existing Rules" if the "Interaction Summary" provided a clear refinement.
227
+ Combine all findings into a single JSON list of operations. If there are multiple distinct changes based on the interaction and existing rules, ensure your list reflects all of them. Output JSON only."""
228
  insight_msgs = [{"role":"system", "content":insight_sys_prompt}, {"role":"user", "content":insight_user_prompt}]
229
  insight_prov, insight_model_disp = provider, model_disp_name
230
  insight_env_model = os.getenv("INSIGHT_MODEL_OVERRIDE")
 
305
  hist_len_check = MAX_HISTORY_TURNS * 2
306
  if current_chat_session_history and current_chat_session_history[0]["role"] == "system": hist_len_check +=1
307
  if len(current_chat_session_history) > hist_len_check:
308
+ current_chat_session_history = ([current_chat_session_history[0]] if current_chat_session_history[0]["role"] == "system" else []) + current_chat_session_history[-(MAX_HISTORY_TURNS * 2):]
309
  threading.Thread(target=deferred_learning_and_memory_task, args=(user_msg_txt, final_bot_resp_acc, sel_prov_name, sel_model_disp_name, insights_used_parsed, ui_api_key.strip() if ui_api_key else None), daemon=True).start()
310
  status_txt = "Response complete. Background learning initiated."
311
  else: status_txt = "Processing finished; no response or error."
312
  yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
313
 
314
  def ui_view_rules_action_fn(): return "\n\n---\n\n".join(get_all_rules_cached()) or "No rules found."
315
+ def ui_upload_rules_action_fn(uploaded_file_obj, progress=gr.Progress()):
316
+ if not uploaded_file_obj: return "No file provided for rules upload."
317
+ try:
318
+ with open(uploaded_file_obj.name, 'r', encoding='utf-8') as f: content = f.read()
319
+ except Exception as e_read: return f"Error reading file: {e_read}"
320
+ if not content.strip(): return "Uploaded rules file is empty."
321
+ added_count, skipped_count, error_count = 0,0,0
322
  potential_rules = content.split("\n\n---\n\n")
323
  if len(potential_rules) == 1 and "\n" in content: potential_rules = [r.strip() for r in content.splitlines() if r.strip()]
324
+ total_to_process = len(potential_rules)
325
+ if total_to_process == 0: return "No rules found in file to process."
326
+ progress(0, desc="Starting rules upload...")
327
  for idx, rule_text in enumerate(potential_rules):
328
+ rule_text = rule_text.strip()
329
+ if not rule_text: continue
330
+ success, status_msg = add_rule_entry(rule_text)
331
+ if success: added_count += 1
332
+ elif status_msg == "duplicate": skipped_count += 1
333
+ else: error_count += 1
334
+ progress((idx + 1) / total_to_process, desc=f"Processed {idx+1}/{total_to_process} rules...")
335
+ msg = f"Rules Upload: Processed {total_to_process}. Added: {added_count}, Skipped (duplicates): {skipped_count}, Errors/Invalid: {error_count}."
336
+ logger.info(msg); return msg
337
 
338
  def ui_view_memories_action_fn(): return get_all_memories_cached() or []
339
+ def ui_upload_memories_action_fn(uploaded_file_obj, progress=gr.Progress()):
340
+ if not uploaded_file_obj: return "No file provided for memories upload."
341
+ try:
342
+ with open(uploaded_file_obj.name, 'r', encoding='utf-8') as f: content = f.read()
343
+ except Exception as e_read: return f"Error reading file: {e_read}"
344
+ if not content.strip(): return "Uploaded memories file is empty."
345
+ added_count, format_error_count, save_error_count = 0,0,0
346
+ memory_objects_to_process = []
347
  try:
348
+ parsed_json = json.loads(content)
349
+ memory_objects_to_process = parsed_json if isinstance(parsed_json, list) else [parsed_json]
350
  except json.JSONDecodeError:
351
  for line in content.splitlines():
352
  if line.strip():
353
+ try: memory_objects_to_process.append(json.loads(line))
354
+ except: format_error_count += 1
355
+ if not memory_objects_to_process and format_error_count == 0: return "No valid memory objects found."
356
+ total_to_process = len(memory_objects_to_process)
357
+ if total_to_process == 0: return "No memory objects to process."
358
+ progress(0, desc="Starting memories upload...")
359
+ for idx, mem_data in enumerate(memory_objects_to_process):
360
  if isinstance(mem_data, dict) and all(k in mem_data for k in ["user_input", "bot_response", "metrics"]):
361
  success, _ = add_memory_entry(mem_data["user_input"], mem_data["metrics"], mem_data["bot_response"])
362
+ if success: added_count += 1
363
+ else: save_error_count += 1
364
+ else: format_error_count += 1
365
+ progress((idx + 1) / total_to_process, desc=f"Processed {idx+1}/{total_to_process} memories...")
366
+ msg = f"Memories Upload: Processed {total_to_process}. Added: {added_count}, Format Errors: {format_error_count}, Save Errors: {save_error_count}."
367
+ logger.info(msg); return msg
368
 
369
  custom_theme = gr.themes.Base(primary_hue="teal", secondary_hue="purple", neutral_hue="zinc", text_size="sm", spacing_size="sm", radius_size="sm", font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"])
370
  custom_css = """ body { font-family: 'Inter', sans-serif; } .gradio-container { max-width: 96% !important; margin: auto !important; padding-top: 1rem !important; } footer { display: none !important; } .gr-button { white-space: nowrap; } .gr-input, .gr-textarea textarea, .gr-dropdown input { border-radius: 8px !important; } .gr-chatbot .message { border-radius: 10px !important; box-shadow: 0 2px 5px rgba(0,0,0,0.08) !important; } .prose { h1 { font-size: 1.8rem; margin-bottom: 0.6em; margin-top: 0.8em; } h2 { font-size: 1.4rem; margin-bottom: 0.5em; margin-top: 0.7em; } h3 { font-size: 1.15rem; margin-bottom: 0.4em; margin-top: 0.6em; } p { margin-bottom: 0.8em; line-height: 1.65; } ul, ol { margin-left: 1.5em; margin-bottom: 0.8em; } code { background-color: #f1f5f9; padding: 0.2em 0.45em; border-radius: 4px; font-size: 0.9em; } pre > code { display: block; padding: 0.8em; overflow-x: auto; background-color: #f8fafc; border: 1px solid #e2e8f0; border-radius: 6px;}} .compact-group .gr-input-label, .compact-group .gr-dropdown-label { font-size: 0.8rem !important; padding-bottom: 2px !important;}"""
 
373
  gr.Markdown("# 🚀 AI Research Mega Agent (Advanced Memory & Dynamic Models)", elem_classes="prose")
374
  avail_provs, def_prov = get_available_providers(), get_available_providers()[0] if get_available_providers() else None
375
  def_models, def_model = get_model_display_names_for_provider(def_prov) if def_prov else [], get_default_model_display_name_for_provider(def_prov) if def_prov else None
376
+ save_faiss_ram_btn = None
377
  with gr.Row():
378
  with gr.Column(scale=1, min_width=320):
379
  gr.Markdown("## ⚙️ Configuration", elem_classes="prose")
 
386
  with gr.Group(elem_classes="compact-group"):
387
  gr.Markdown("### System Prompt", elem_classes="prose")
388
  sys_prompt_tb = gr.Textbox(label="Custom System Prompt Base", lines=6, value=DEFAULT_SYSTEM_PROMPT, interactive=True)
389
+ with gr.Accordion("Knowledge Management (Backend: " + MEMORY_STORAGE_BACKEND + ")", open=False):
390
  gr.Markdown("### Rules (Insights)", elem_classes="prose"); view_rules_btn = gr.Button("View Rules"); upload_rules_fobj = gr.File(label="Upload Rules (.txt/.jsonl)", file_types=[".txt", ".jsonl"]); rules_stat_tb = gr.Textbox(label="Status", interactive=False, lines=2); clear_rules_btn = gr.Button("⚠️ Clear All Rules", variant="stop")
391
  if MEMORY_STORAGE_BACKEND == "RAM": save_faiss_ram_btn = gr.Button("Save FAISS Indices (RAM Backend)")
392
  gr.Markdown("### Memories", elem_classes="prose"); view_mems_btn = gr.Button("View Memories"); upload_mems_fobj = gr.File(label="Upload Memories (.jsonl)", file_types=[".jsonl"]); mems_stat_tb = gr.Textbox(label="Status", interactive=False, lines=2); clear_mems_btn = gr.Button("⚠️ Clear All Memories", variant="stop")
 
396
  agent_stat_tb = gr.Textbox(label="Agent Status", interactive=False, lines=1, value="Initializing...")
397
  with gr.Tabs():
398
  with gr.TabItem("📝 Report/Output"): gr.Markdown("AI's full response/report.", elem_classes="prose"); fmt_report_tb = gr.Textbox(label="Output", lines=20, interactive=True, show_copy_button=True, value="*Responses appear here...*"); dl_report_btn = gr.DownloadButton(label="Download Report", interactive=False, visible=False)
399
+ with gr.TabItem("🔍 Details / Data"): gr.Markdown("View intermediate details, loaded data.", elem_classes="prose"); detect_out_md = gr.Markdown("*Insights used or details show here...*"); gr.HTML("<hr style='margin:1em 0;'>"); gr.Markdown("### Rules Viewer", elem_classes="prose"); rules_disp_ta = gr.TextArea(label="Rules Snapshot", lines=10, interactive=False); gr.HTML("<hr style='margin:1em 0;'>"); gr.Markdown("### Memories Viewer", elem_classes="prose"); mems_disp_json = gr.JSON(label="Memories Snapshot")
400
  def dyn_upd_model_dd(sel_prov_dyn:str): models_dyn, def_model_dyn = get_model_display_names_for_provider(sel_prov_dyn), get_default_model_display_name_for_provider(sel_prov_dyn); return gr.Dropdown(choices=models_dyn, value=def_model_dyn, interactive=True)
401
  prov_sel_dd.change(fn=dyn_upd_model_dd, inputs=prov_sel_dd, outputs=model_sel_dd)
402
  chat_ins = [user_msg_tb, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
 
405
  view_rules_btn.click(fn=ui_view_rules_action_fn, outputs=rules_disp_ta)
406
  upload_rules_fobj.upload(fn=ui_upload_rules_action_fn, inputs=[upload_rules_fobj], outputs=[rules_stat_tb], show_progress="full").then(fn=ui_view_rules_action_fn, outputs=rules_disp_ta)
407
  clear_rules_btn.click(fn=lambda: "All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules.", outputs=rules_stat_tb).then(fn=ui_view_rules_action_fn, outputs=rules_disp_ta)
408
+ if MEMORY_STORAGE_BACKEND == "RAM" and save_faiss_ram_btn is not None:
409
+ def save_faiss_action_with_feedback_fn(): save_faiss_indices_to_disk(); gr.Info("Attempted to save FAISS indices to disk.")
410
+ save_faiss_ram_btn.click(fn=save_faiss_action_with_feedback_fn, inputs=None, outputs=None)
411
  view_mems_btn.click(fn=ui_view_memories_action_fn, outputs=mems_disp_json)
412
  upload_mems_fobj.upload(fn=ui_upload_memories_action_fn, inputs=[upload_mems_fobj], outputs=[mems_stat_tb], show_progress="full").then(fn=ui_view_memories_action_fn, outputs=mems_disp_json)
413
  clear_mems_btn.click(fn=lambda: "All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories.", outputs=mems_stat_tb).then(fn=ui_view_memories_action_fn, outputs=mems_disp_json)