broadfield-dev commited on
Commit
59842d0
·
verified ·
1 Parent(s): 0097062

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -56
app.py CHANGED
@@ -340,7 +340,6 @@ Your plan can use the following tools:
340
  "plan": [
341
  {"tool": "memory_search", "task": "What has the user previously expressed interest in regarding AI topics?"},
342
  {"tool": "web_search", "task": "Find recent advancements in large language models since early 2023."},
343
- {"tool": "web_scrape", "task": "https://example.com"},
344
  {"tool": "think", "task": "Based on the user's interests and recent advancements, what are the key points to highlight?"},
345
  {"tool": "respond", "task": "Synthesize all information from the scratchpad and provide a comprehensive final answer to the user."}
346
  ]
@@ -447,23 +446,6 @@ def process_user_interaction_gradio(
447
  synthesis_prompt = f"Relevant web content for the task '{task}':\n\n{scraped_content}\n\nConcisely summarize the findings from the content."
448
  summary = "".join(list(call_model_stream(provider=provider_name, model_display_name=model_display_name, messages=[{"role": "user", "content": synthesis_prompt}], api_key_override=ui_api_key_override, temperature=0.1, max_tokens=400)))
449
  step_findings += summary
450
- except Exception as e:
451
- try:
452
- web_results = search_and_scrape_google(task, num_results=2)
453
- scraped_content = "\n".join([f"Source:\nURL:{r.get('url','N/A')}\nContent:\n{(r.get('content') or r.get('error') or 'N/A')[:1500]}\n---" for r in web_results]) if web_results else "No results found."
454
- synthesis_prompt = f"Relevant web content for the task '{task}':\n\n{scraped_content}\n\nConcisely summarize the findings from the content."
455
- summary = "".join(list(call_model_stream(provider=provider_name, model_display_name=model_display_name, messages=[{"role": "user", "content": synthesis_prompt}], api_key_override=ui_api_key_override, temperature=0.1, max_tokens=400)))
456
- step_findings += summary
457
- except Exception as e:
458
- step_findings += f"Error during web search: {e}"
459
-
460
- elif tool == 'web_scrape':
461
- try:
462
- web_results = scrape_url(task)
463
- scraped_content = "\n".join([f"Source:\nURL:{r.get('url','N/A')}\nContent:\n{(r.get('content') or r.get('error') or 'N/A')[:1500]}\n---" for r in web_results]) if web_results else "No results found."
464
- synthesis_prompt = f"Relevant web content for the task '{task}':\n\n{scraped_content}\n\nConcisely summarize the findings from the content."
465
- summary = "".join(list(call_model_stream(provider=provider_name, model_display_name=model_display_name, messages=[{"role": "user", "content": synthesis_prompt}], api_key_override=ui_api_key_override, temperature=0.1, max_tokens=400)))
466
- step_findings += summary
467
  except Exception as e:
468
  step_findings += f"Error during web search: {e}"
469
 
@@ -533,13 +515,13 @@ If no operations are warranted, output an empty list: `<operations_list></operat
533
  ABSOLUTELY NO other text, explanations, or markdown should precede or follow this XML structure.
534
  Each `<operation>` element must contain the following child elements:
535
  1. `<action>`: A string, either `"add"` (for entirely new rules) or `"update"` (to replace an existing rule with a better one).
536
- 2. `<insight>`: The full, refined insight text including its `[TYPE|SCORE]` prefix (e.g., `[CORE_RULE|1.0] My name is [Name], an AI assistant.`). Multi-line insight text can be placed directly within this tag; XML handles newlines naturally.
537
  3. `<old_insight_to_replace>`: (ONLY for `"update"` action) The *exact, full text* of an existing insight that the new `<insight>` should replace. If action is `"add"`, this element should be omitted or empty.
538
  **XML Structure Example:**
539
  <operations_list>
540
  <operation>
541
  <action>update</action>
542
- <insight>[CORE_RULE|1.0] I am [Name], an AI assistant.
543
  My purpose is to help with research.</insight>
544
  <old_insight_to_replace>[CORE_RULE|0.9] My name is Assistant.</old_insight_to_replace>
545
  </operation>
@@ -674,18 +656,18 @@ def handle_gradio_chat_submit(user_msg_txt: str, max_research_steps: int, gr_his
674
  cleared_input, updated_gr_hist, status_txt = "", list(gr_hist_list), "Initializing..."
675
  updated_rules_text = ui_refresh_rules_display_fn()
676
  updated_mems_json = ui_refresh_memories_display_fn()
677
- plan_md_output = gr.Markdown(visible=False)
678
  final_report_tb = gr.Textbox(value="*Waiting...*", interactive=True, show_copy_button=True)
679
  dl_report_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
680
 
681
  if not user_msg_txt.strip():
682
  status_txt = "Error: Empty message."
683
  updated_gr_hist.append((user_msg_txt or "(Empty)", status_txt))
684
- yield (cleared_input, updated_gr_hist, status_txt, plan_md_output, final_report_tb, dl_report_btn, updated_rules_text, updated_mems_json)
685
  return
686
 
687
- updated_gr_hist.append((user_msg_txt, "<i>Thinking...</i>"))
688
- yield (cleared_input, updated_gr_hist, status_txt, plan_md_output, final_report_tb, dl_report_btn, updated_rules_text, updated_mems_json)
689
 
690
  internal_hist = list(current_chat_session_history)
691
 
@@ -705,35 +687,41 @@ def handle_gradio_chat_submit(user_msg_txt: str, max_research_steps: int, gr_his
705
 
706
  curr_bot_disp_msg = ""
707
  full_plan = []
708
- step_results = {}
709
 
710
  for upd_type, upd_data in processor_gen:
711
  if upd_type == "status":
712
  status_txt = upd_data
713
- if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
714
- updated_gr_hist[-1] = (user_msg_txt, f"<i>{status_txt}</i>")
 
715
  elif upd_type == "plan":
716
  full_plan = upd_data
717
- plan_md = "### Action Plan\n" + "\n".join([f"**Step {i+1} ({step.get('tool')})**: {step.get('task')}" for i, step in enumerate(full_plan)])
718
- plan_md_output = gr.Markdown(value=plan_md, visible=True)
 
 
 
 
719
  elif upd_type == "step_result":
720
  step_num = upd_data["step"]
721
- step_results[step_num] = upd_data["result"]
722
- results_so_far = "### Research Log\n"
723
- for i in range(1, len(full_plan)):
724
- if i in step_results:
725
- results_so_far += f"**Step {i} ({full_plan[i-1].get('tool')})**: ✅ Completed\n"
726
- elif i <= len(step_results) + 1:
727
- results_so_far += f"**Step {i} ({full_plan[i-1].get('tool')})**: ⏳ In progress...\n"
728
- else:
729
- results_so_far += f"**Step {i} ({full_plan[i-1].get('tool')})**: - Pending\n"
730
 
731
- if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
732
- updated_gr_hist[-1] = (user_msg_txt, results_so_far)
 
 
 
 
 
 
733
  elif upd_type == "response_chunk":
734
  curr_bot_disp_msg += upd_data
735
  if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
736
  updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg)
 
737
  elif upd_type == "final_response":
738
  final_bot_resp_acc = upd_data["response"]
739
  status_txt = "Response generated. Processing learning..."
@@ -755,22 +743,22 @@ def handle_gradio_chat_submit(user_msg_txt: str, max_research_steps: int, gr_his
755
  else:
756
  dl_report_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
757
 
758
- yield (cleared_input, updated_gr_hist, status_txt, plan_md_output, final_report_tb, dl_report_btn, updated_rules_text, updated_mems_json)
759
 
760
  if upd_type == "final_response": break
761
 
762
  except Exception as e:
763
  logger.error(f"Chat handler error during main processing: {e}", exc_info=True)
764
  status_txt = f"Error: {str(e)[:100]}"
765
- error_message_for_chat = f"Sorry, an error occurred during response generation: {str(e)[:100]}"
766
  if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
767
  updated_gr_hist[-1] = (user_msg_txt, error_message_for_chat)
768
  final_report_tb = gr.Textbox(value=error_message_for_chat, interactive=True)
769
  dl_report_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
770
- plan_md_output = gr.Markdown(value="*Error processing request.*", visible=True)
771
  current_rules_text_on_error = ui_refresh_rules_display_fn()
772
  current_mems_json_on_error = ui_refresh_memories_display_fn()
773
- yield (cleared_input, updated_gr_hist, status_txt, plan_md_output, final_report_tb, dl_report_btn, current_rules_text_on_error, current_mems_json_on_error)
774
  if temp_dl_file_path and os.path.exists(temp_dl_file_path):
775
  try: os.unlink(temp_dl_file_path)
776
  except Exception as e_unlink: logger.error(f"Error deleting temp download file {temp_dl_file_path} after error: {e_unlink}")
@@ -782,7 +770,7 @@ def handle_gradio_chat_submit(user_msg_txt: str, max_research_steps: int, gr_his
782
  status_txt = "<i>[Performing post-interaction learning...]</i>"
783
  current_rules_text_before_learn = ui_refresh_rules_display_fn()
784
  current_mems_json_before_learn = ui_refresh_memories_display_fn()
785
- yield (cleared_input, updated_gr_hist, status_txt, plan_md_output, final_report_tb, dl_report_btn, current_rules_text_before_learn, current_mems_json_before_learn)
786
 
787
  try:
788
  perform_post_interaction_learning(
@@ -802,7 +790,7 @@ def handle_gradio_chat_submit(user_msg_txt: str, max_research_steps: int, gr_his
802
  updated_rules_text = ui_refresh_rules_display_fn()
803
  updated_mems_json = ui_refresh_memories_display_fn()
804
 
805
- yield (cleared_input, updated_gr_hist, status_txt, plan_md_output, final_report_tb, dl_report_btn, updated_rules_text, updated_mems_json)
806
 
807
  if temp_dl_file_path and os.path.exists(temp_dl_file_path):
808
  try: os.unlink(temp_dl_file_path)
@@ -1216,7 +1204,7 @@ def app_load_fn():
1216
  logger.info(mems_load_msg)
1217
  final_status = f"AI Systems Initialized. {rules_load_msg} {mems_load_msg} Ready."
1218
  rules_on_load, mems_on_load = ui_refresh_rules_display_fn(), ui_refresh_memories_display_fn()
1219
- return (final_status, rules_on_load, mems_on_load, gr.Markdown(visible=False),
1220
  gr.Textbox(value="*Waiting...*", interactive=True, show_copy_button=True),
1221
  gr.DownloadButton(interactive=False, value=None, visible=False))
1222
 
@@ -1342,8 +1330,8 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-text
1342
  with gr.Row(variant="compact"):
1343
  user_msg_tb = gr.Textbox(show_label=False, placeholder="Ask your research question...", scale=7, lines=1, max_lines=3)
1344
  send_btn = gr.Button("Send", variant="primary", scale=1, min_width=100)
1345
- with gr.Accordion("📝 Detailed Response & Plan", open=False):
1346
- plan_display_md = gr.Markdown(visible=False)
1347
  fmt_report_tb = gr.Textbox(label="Full AI Response", lines=8, interactive=True, show_copy_button=True)
1348
  dl_report_btn = gr.DownloadButton("Download Report", value=None, interactive=False, visible=False)
1349
 
@@ -1383,7 +1371,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-text
1383
  kb_image_display_output = gr.Image(label="Generated Image (Right-click to copy)", visible=False)
1384
  kb_image_download_output = gr.DownloadButton("⬇️ Download Image File", visible=False)
1385
 
1386
- with gr.TabItem("📂 Load KB (Example)"):
1387
  gr.Markdown("Import rules, memories, or a full KB from local files or a portable PNG image.")
1388
  load_status_tb = gr.Textbox(label="Load Operation Status", interactive=False, lines=2)
1389
  load_kb_password_tb = gr.Textbox(label="Password (for decrypting images)", type="password")
@@ -1396,10 +1384,10 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-text
1396
  load_master_btn = gr.Button("⬆️ Load from Sources", variant="primary", interactive=not DEMO_MODE)
1397
  gr.Examples(
1398
  examples=[
1399
- ["https://huggingface.co/spaces/Agents-MCP-Hackathon/iLearn/resolve/main/evolutions/e0.01.01.png", ""],
1400
  ],
1401
  inputs=[upload_kb_img_fobj, load_kb_password_tb],
1402
- label="Choose an Evolution Checkpoint"
1403
  )
1404
 
1405
  def dyn_upd_model_dd(sel_prov_dyn: str):
@@ -1408,7 +1396,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-text
1408
  prov_sel_dd.change(fn=dyn_upd_model_dd, inputs=prov_sel_dd, outputs=model_sel_dd)
1409
 
1410
  chat_ins = [user_msg_tb, research_steps_slider, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
1411
- chat_outs = [user_msg_tb, main_chat_disp, agent_stat_tb, plan_display_md, fmt_report_tb, dl_report_btn, rules_disp_ta, mems_disp_json]
1412
  chat_event_args = {"fn": handle_gradio_chat_submit, "inputs": chat_ins, "outputs": chat_outs}
1413
  send_btn.click(**chat_event_args); user_msg_tb.submit(**chat_event_args)
1414
 
@@ -1442,7 +1430,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-text
1442
  except Exception as e: logger.error(f"Error saving FAISS indices: {e}", exc_info=True); gr.Error(f"Error saving FAISS indices: {e}")
1443
  save_faiss_sidebar_btn.click(fn=save_faiss_action_with_feedback_sidebar_fn, inputs=None, outputs=None, show_progress=False)
1444
 
1445
- app_load_outputs = [agent_stat_tb, rules_disp_ta, mems_disp_json, plan_display_md, fmt_report_tb, dl_report_btn]
1446
  demo.load(fn=app_load_fn, inputs=None, outputs=app_load_outputs, show_progress="full")
1447
 
1448
 
@@ -1450,8 +1438,8 @@ if __name__ == "__main__":
1450
  logger.info(f"Starting Gradio AI Research Mega Agent (v9.1 - Correct 1-Click JS Download, Memory: {MEMORY_STORAGE_BACKEND})...")
1451
  app_port = int(os.getenv("GRADIO_PORT", 7860))
1452
  app_server = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
1453
- app_debug = os.getenv("GRADIO_DEBUG", "False").lower() == "false"
1454
  app_share = os.getenv("GRADIO_SHARE", "False").lower() == "true"
1455
  logger.info(f"Launching Gradio server: http://{app_server}:{app_port}. Debug: {app_debug}, Share: {app_share}")
1456
- demo.queue().launch(server_name=app_server, server_port=app_port, debug=app_debug, share=app_share, mcp_server=True)
1457
  logger.info("Gradio application shut down.")
 
340
  "plan": [
341
  {"tool": "memory_search", "task": "What has the user previously expressed interest in regarding AI topics?"},
342
  {"tool": "web_search", "task": "Find recent advancements in large language models since early 2023."},
 
343
  {"tool": "think", "task": "Based on the user's interests and recent advancements, what are the key points to highlight?"},
344
  {"tool": "respond", "task": "Synthesize all information from the scratchpad and provide a comprehensive final answer to the user."}
345
  ]
 
446
  synthesis_prompt = f"Relevant web content for the task '{task}':\n\n{scraped_content}\n\nConcisely summarize the findings from the content."
447
  summary = "".join(list(call_model_stream(provider=provider_name, model_display_name=model_display_name, messages=[{"role": "user", "content": synthesis_prompt}], api_key_override=ui_api_key_override, temperature=0.1, max_tokens=400)))
448
  step_findings += summary
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
449
  except Exception as e:
450
  step_findings += f"Error during web search: {e}"
451
 
 
515
  ABSOLUTELY NO other text, explanations, or markdown should precede or follow this XML structure.
516
  Each `<operation>` element must contain the following child elements:
517
  1. `<action>`: A string, either `"add"` (for entirely new rules) or `"update"` (to replace an existing rule with a better one).
518
+ 2. `<insight>`: The full, refined insight text including its `[TYPE|SCORE]` prefix (e.g., `[CORE_RULE|1.0] My name is Lumina, an AI assistant.`). Multi-line insight text can be placed directly within this tag; XML handles newlines naturally.
519
  3. `<old_insight_to_replace>`: (ONLY for `"update"` action) The *exact, full text* of an existing insight that the new `<insight>` should replace. If action is `"add"`, this element should be omitted or empty.
520
  **XML Structure Example:**
521
  <operations_list>
522
  <operation>
523
  <action>update</action>
524
+ <insight>[CORE_RULE|1.0] I am Lumina, an AI assistant.
525
  My purpose is to help with research.</insight>
526
  <old_insight_to_replace>[CORE_RULE|0.9] My name is Assistant.</old_insight_to_replace>
527
  </operation>
 
656
  cleared_input, updated_gr_hist, status_txt = "", list(gr_hist_list), "Initializing..."
657
  updated_rules_text = ui_refresh_rules_display_fn()
658
  updated_mems_json = ui_refresh_memories_display_fn()
659
+ log_html_output = gr.HTML("<p><i>Research Log will appear here.</i></p>")
660
  final_report_tb = gr.Textbox(value="*Waiting...*", interactive=True, show_copy_button=True)
661
  dl_report_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
662
 
663
  if not user_msg_txt.strip():
664
  status_txt = "Error: Empty message."
665
  updated_gr_hist.append((user_msg_txt or "(Empty)", status_txt))
666
+ yield (cleared_input, updated_gr_hist, status_txt, log_html_output, final_report_tb, dl_report_btn, updated_rules_text, updated_mems_json)
667
  return
668
 
669
+ updated_gr_hist.append((user_msg_txt, "<i>Thinking... See Research Log below for progress.</i>"))
670
+ yield (cleared_input, updated_gr_hist, status_txt, log_html_output, final_report_tb, dl_report_btn, updated_rules_text, updated_mems_json)
671
 
672
  internal_hist = list(current_chat_session_history)
673
 
 
687
 
688
  curr_bot_disp_msg = ""
689
  full_plan = []
690
+ log_html_parts = []
691
 
692
  for upd_type, upd_data in processor_gen:
693
  if upd_type == "status":
694
  status_txt = upd_data
695
+ if "Deciding" in status_txt or "Executing" in status_txt:
696
+ log_html_output = gr.HTML(f"<p><i>{status_txt}</i></p>")
697
+
698
  elif upd_type == "plan":
699
  full_plan = upd_data
700
+ log_html_parts = ["<h3>Action Plan</h3><ol>"]
701
+ for i, step in enumerate(full_plan):
702
+ log_html_parts.append(f'<li id="log-step-{i+1}"><strong>{step.get("tool")}</strong>: {step.get("task")} <span style="color:gray;">(Pending)</span></li>')
703
+ log_html_parts.append("</ol><hr><h3>Log</h3>")
704
+ log_html_output = gr.HTML("".join(log_html_parts))
705
+
706
  elif upd_type == "step_result":
707
  step_num = upd_data["step"]
708
+ sanitized_result = upd_data["result"].replace('<', '<').replace('>', '>').replace('\n', '<br>')
709
+ log_html_parts[step_num] = f'<li id="log-step-{step_num}"><strong>{upd_data.get("tool")}</strong>: {upd_data.get("task")} <span style="color:green;">(Done)</span></li>'
710
+ log_html_parts.append(f'<div style="margin-left: 20px; padding: 5px; border-left: 2px solid #ccc;"><small style="color: #555;">{sanitized_result}</small></div>')
 
 
 
 
 
 
711
 
712
+ next_step_index_in_list = step_num + 1
713
+ if next_step_index_in_list < len(full_plan) + 1:
714
+ next_step_action = full_plan[step_num]
715
+ if next_step_action.get("tool") != "respond":
716
+ log_html_parts[next_step_index_in_list] = f'<li id="log-step-{next_step_index_in_list}"><strong>{next_step_action.get("tool")}</strong>: {next_step_action.get("task")} <span style="color:blue;">(In Progress...)</span></li>'
717
+
718
+ log_html_output = gr.HTML("".join(log_html_parts))
719
+
720
  elif upd_type == "response_chunk":
721
  curr_bot_disp_msg += upd_data
722
  if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
723
  updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg)
724
+
725
  elif upd_type == "final_response":
726
  final_bot_resp_acc = upd_data["response"]
727
  status_txt = "Response generated. Processing learning..."
 
743
  else:
744
  dl_report_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
745
 
746
+ yield (cleared_input, updated_gr_hist, status_txt, log_html_output, final_report_tb, dl_report_btn, updated_rules_text, updated_mems_json)
747
 
748
  if upd_type == "final_response": break
749
 
750
  except Exception as e:
751
  logger.error(f"Chat handler error during main processing: {e}", exc_info=True)
752
  status_txt = f"Error: {str(e)[:100]}"
753
+ error_message_for_chat = f"Sorry, an error occurred: {str(e)[:100]}"
754
  if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
755
  updated_gr_hist[-1] = (user_msg_txt, error_message_for_chat)
756
  final_report_tb = gr.Textbox(value=error_message_for_chat, interactive=True)
757
  dl_report_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
758
+ log_html_output = gr.HTML(f'<p style="color:red;"><strong>Error processing request.</strong></p>')
759
  current_rules_text_on_error = ui_refresh_rules_display_fn()
760
  current_mems_json_on_error = ui_refresh_memories_display_fn()
761
+ yield (cleared_input, updated_gr_hist, status_txt, log_html_output, final_report_tb, dl_report_btn, current_rules_text_on_error, current_mems_json_on_error)
762
  if temp_dl_file_path and os.path.exists(temp_dl_file_path):
763
  try: os.unlink(temp_dl_file_path)
764
  except Exception as e_unlink: logger.error(f"Error deleting temp download file {temp_dl_file_path} after error: {e_unlink}")
 
770
  status_txt = "<i>[Performing post-interaction learning...]</i>"
771
  current_rules_text_before_learn = ui_refresh_rules_display_fn()
772
  current_mems_json_before_learn = ui_refresh_memories_display_fn()
773
+ yield (cleared_input, updated_gr_hist, status_txt, log_html_output, final_report_tb, dl_report_btn, current_rules_text_before_learn, current_mems_json_before_learn)
774
 
775
  try:
776
  perform_post_interaction_learning(
 
790
  updated_rules_text = ui_refresh_rules_display_fn()
791
  updated_mems_json = ui_refresh_memories_display_fn()
792
 
793
+ yield (cleared_input, updated_gr_hist, status_txt, log_html_output, final_report_tb, dl_report_btn, updated_rules_text, updated_mems_json)
794
 
795
  if temp_dl_file_path and os.path.exists(temp_dl_file_path):
796
  try: os.unlink(temp_dl_file_path)
 
1204
  logger.info(mems_load_msg)
1205
  final_status = f"AI Systems Initialized. {rules_load_msg} {mems_load_msg} Ready."
1206
  rules_on_load, mems_on_load = ui_refresh_rules_display_fn(), ui_refresh_memories_display_fn()
1207
+ return (final_status, rules_on_load, mems_on_load, gr.HTML("<p><i>Research Log will appear here.</i></p>"),
1208
  gr.Textbox(value="*Waiting...*", interactive=True, show_copy_button=True),
1209
  gr.DownloadButton(interactive=False, value=None, visible=False))
1210
 
 
1330
  with gr.Row(variant="compact"):
1331
  user_msg_tb = gr.Textbox(show_label=False, placeholder="Ask your research question...", scale=7, lines=1, max_lines=3)
1332
  send_btn = gr.Button("Send", variant="primary", scale=1, min_width=100)
1333
+ with gr.Accordion("📝 Detailed Response & Research Log", open=True):
1334
+ research_log_html = gr.HTML(label="Research Log", value="<p><i>Waiting for a new task to begin...</i></p>")
1335
  fmt_report_tb = gr.Textbox(label="Full AI Response", lines=8, interactive=True, show_copy_button=True)
1336
  dl_report_btn = gr.DownloadButton("Download Report", value=None, interactive=False, visible=False)
1337
 
 
1371
  kb_image_display_output = gr.Image(label="Generated Image (Right-click to copy)", visible=False)
1372
  kb_image_download_output = gr.DownloadButton("⬇️ Download Image File", visible=False)
1373
 
1374
+ with gr.TabItem("📂 Load KB"):
1375
  gr.Markdown("Import rules, memories, or a full KB from local files or a portable PNG image.")
1376
  load_status_tb = gr.Textbox(label="Load Operation Status", interactive=False, lines=2)
1377
  load_kb_password_tb = gr.Textbox(label="Password (for decrypting images)", type="password")
 
1384
  load_master_btn = gr.Button("⬆️ Load from Sources", variant="primary", interactive=not DEMO_MODE)
1385
  gr.Examples(
1386
  examples=[
1387
+ [placeholder_filename, "https://huggingface.co/spaces/Agents-MCP-Hackathon/iLearn/resolve/main/evolutions/e0.01.01.png"],
1388
  ],
1389
  inputs=[upload_kb_img_fobj, load_kb_password_tb],
1390
+ label="Click an Example to Load Data"
1391
  )
1392
 
1393
  def dyn_upd_model_dd(sel_prov_dyn: str):
 
1396
  prov_sel_dd.change(fn=dyn_upd_model_dd, inputs=prov_sel_dd, outputs=model_sel_dd)
1397
 
1398
  chat_ins = [user_msg_tb, research_steps_slider, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
1399
+ chat_outs = [user_msg_tb, main_chat_disp, agent_stat_tb, research_log_html, fmt_report_tb, dl_report_btn, rules_disp_ta, mems_disp_json]
1400
  chat_event_args = {"fn": handle_gradio_chat_submit, "inputs": chat_ins, "outputs": chat_outs}
1401
  send_btn.click(**chat_event_args); user_msg_tb.submit(**chat_event_args)
1402
 
 
1430
  except Exception as e: logger.error(f"Error saving FAISS indices: {e}", exc_info=True); gr.Error(f"Error saving FAISS indices: {e}")
1431
  save_faiss_sidebar_btn.click(fn=save_faiss_action_with_feedback_sidebar_fn, inputs=None, outputs=None, show_progress=False)
1432
 
1433
+ app_load_outputs = [agent_stat_tb, rules_disp_ta, mems_disp_json, research_log_html, fmt_report_tb, dl_report_btn]
1434
  demo.load(fn=app_load_fn, inputs=None, outputs=app_load_outputs, show_progress="full")
1435
 
1436
 
 
1438
  logger.info(f"Starting Gradio AI Research Mega Agent (v9.1 - Correct 1-Click JS Download, Memory: {MEMORY_STORAGE_BACKEND})...")
1439
  app_port = int(os.getenv("GRADIO_PORT", 7860))
1440
  app_server = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
1441
+ app_debug = os.getenv("GRADIO_DEBUG", "False").lower() == "true"
1442
  app_share = os.getenv("GRADIO_SHARE", "False").lower() == "true"
1443
  logger.info(f"Launching Gradio server: http://{app_server}:{app_port}. Debug: {app_debug}, Share: {app_share}")
1444
+ demo.queue().launch(server_name=app_server, server_port=app_port, debug=app_debug, share=app_share)
1445
  logger.info("Gradio application shut down.")