broadfield-dev commited on
Commit
5cc2c95
·
verified ·
1 Parent(s): 2b30c42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +516 -250
app.py CHANGED
@@ -31,13 +31,23 @@ import time
31
  import tempfile
32
  import xml.etree.ElementTree as ET
33
 
 
 
 
 
 
 
 
 
 
 
 
34
  load_dotenv() # Load .env file, but our settings above will take precedence if set.
35
 
36
  from model_logic import (
37
  get_available_providers, get_model_display_names_for_provider,
38
  get_default_model_display_name_for_provider, call_model_stream, MODELS_BY_PROVIDER
39
  )
40
-
41
 
42
  from memory_logic import (
43
  initialize_memory_system,
@@ -71,6 +81,240 @@ logger.info(f"App Config: WebSearch={WEB_SEARCH_ENABLED}, ToolDecisionProvider={
71
  logger.info(f"Startup loading: Rules from {LOAD_RULES_FILE or 'None'}, Memories from {LOAD_MEMORIES_FILE or 'None'}")
72
 
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  # --- Helper Functions ---
75
  def format_insights_for_prompt(retrieved_insights_list: list[str]) -> tuple[str, list[dict]]:
76
  if not retrieved_insights_list:
@@ -691,8 +935,21 @@ def load_memories_from_file(filepath: str | None):
691
  return added_count, format_error_count, save_error_count
692
 
693
 
694
- # --- UI Functions for Rules and Memories (ui_refresh_..., ui_download_..., ui_upload_...) ---
 
 
 
 
 
 
 
 
 
 
 
 
695
  def ui_refresh_rules_display_fn(): return "\n\n---\n\n".join(get_all_rules_cached()) or "No rules found."
 
696
 
697
  def ui_download_rules_action_fn():
698
  rules_content = "\n\n---\n\n".join(get_all_rules_cached())
@@ -715,10 +972,8 @@ def ui_upload_rules_action_fn(uploaded_file_obj, progress=gr.Progress()):
715
  except Exception as e_read: return f"Error reading file: {e_read}"
716
  if not content.strip(): return "Uploaded rules file is empty."
717
  added_count, skipped_count, error_count = 0,0,0
718
-
719
  potential_rules = []
720
  file_name_lower = uploaded_file_obj.name.lower()
721
-
722
  if file_name_lower.endswith(".txt"):
723
  potential_rules = content.split("\n\n---\n\n")
724
  if len(potential_rules) == 1 and "\n" in content:
@@ -739,15 +994,10 @@ def ui_upload_rules_action_fn(uploaded_file_obj, progress=gr.Progress()):
739
  error_count +=1
740
  else:
741
  return "Unsupported file type for rules. Please use .txt or .jsonl."
742
-
743
  valid_potential_rules = [r.strip() for r in potential_rules if r.strip()]
744
  total_to_process = len(valid_potential_rules)
745
-
746
- if total_to_process == 0 and error_count == 0:
747
- return "No valid rules found in file to process."
748
- elif total_to_process == 0 and error_count > 0:
749
- return f"No valid rules found to process. Encountered {error_count} parsing/format errors."
750
-
751
  progress(0, desc="Starting rules upload...")
752
  for idx, rule_text in enumerate(valid_potential_rules):
753
  success, status_msg = add_rule_entry(rule_text)
@@ -755,23 +1005,18 @@ def ui_upload_rules_action_fn(uploaded_file_obj, progress=gr.Progress()):
755
  elif status_msg == "duplicate": skipped_count += 1
756
  else: error_count += 1
757
  progress((idx + 1) / total_to_process, desc=f"Processed {idx+1}/{total_to_process} rules...")
758
-
759
  msg = f"Rules Upload: Total valid rule segments processed: {total_to_process}. Added: {added_count}, Skipped (duplicates): {skipped_count}, Errors (parsing/add): {error_count}."
760
  logger.info(msg); return msg
761
 
762
- def ui_refresh_memories_display_fn(): return get_all_memories_cached() or []
763
-
764
  def ui_download_memories_action_fn():
765
  memories = get_all_memories_cached()
766
  if not memories:
767
  gr.Warning("No memories to download.")
768
  return gr.DownloadButton(value=None, interactive=False, label="No Memories")
769
-
770
  jsonl_content = ""
771
  for mem_dict in memories:
772
  try: jsonl_content += json.dumps(mem_dict) + "\n"
773
  except Exception as e: logger.error(f"Error serializing memory for download: {mem_dict}, Error: {e}")
774
-
775
  if not jsonl_content.strip():
776
  gr.Warning("No valid memories to serialize for download.")
777
  return gr.DownloadButton(value=None, interactive=False, label="No Data")
@@ -792,42 +1037,28 @@ def ui_upload_memories_action_fn(uploaded_file_obj, progress=gr.Progress()):
792
  if not content.strip(): return "Uploaded memories file is empty."
793
  added_count, format_error_count, save_error_count = 0,0,0
794
  memory_objects_to_process = []
795
-
796
  file_ext = os.path.splitext(uploaded_file_obj.name.lower())[1]
797
-
798
  if file_ext == ".json":
799
  try:
800
  parsed_json = json.loads(content)
801
- if isinstance(parsed_json, list):
802
- memory_objects_to_process = parsed_json
803
- elif isinstance(parsed_json, dict):
804
- memory_objects_to_process = [parsed_json]
805
  else:
806
- logger.warning(f"Memories Upload (.json): File content is not a JSON list or object. Type: {type(parsed_json)}")
807
- format_error_count = 1
808
  except json.JSONDecodeError as e:
809
- logger.warning(f"Memories Upload (.json): Invalid JSON file. Error: {e}")
810
- format_error_count = 1
811
  elif file_ext == ".jsonl":
812
  for line_num, line in enumerate(content.splitlines()):
813
  line = line.strip()
814
  if line:
815
- try:
816
- memory_objects_to_process.append(json.loads(line))
817
  except json.JSONDecodeError:
818
- logger.warning(f"Memories Upload (.jsonl): Line {line_num+1} parse error: {line[:100]}")
819
- format_error_count += 1
820
- else:
821
- return "Unsupported file type for memories. Please use .json or .jsonl."
822
-
823
- if not memory_objects_to_process and format_error_count > 0 :
824
- return f"Memories Upload: File parsing failed. Found {format_error_count} format errors and no processable objects."
825
- elif not memory_objects_to_process:
826
- return "No valid memory objects found in the uploaded file."
827
-
828
  total_to_process = len(memory_objects_to_process)
829
  if total_to_process == 0: return "No memory objects to process (after parsing)."
830
-
831
  progress(0, desc="Starting memories upload...")
832
  for idx, mem_data in enumerate(memory_objects_to_process):
833
  if isinstance(mem_data, dict) and all(k in mem_data for k in ["user_input", "bot_response", "metrics"]):
@@ -835,308 +1066,343 @@ def ui_upload_memories_action_fn(uploaded_file_obj, progress=gr.Progress()):
835
  if success: added_count += 1
836
  else: save_error_count += 1
837
  else:
838
- logger.warning(f"Memories Upload: Skipped invalid memory object structure: {str(mem_data)[:100]}")
839
- format_error_count += 1
840
  progress((idx + 1) / total_to_process, desc=f"Processed {idx+1}/{total_to_process} memories...")
841
-
842
  msg = f"Memories Upload: Processed {total_to_process} objects. Added: {added_count}, Format/Structure Errors: {format_error_count}, Save Errors: {save_error_count}."
843
  logger.info(msg); return msg
844
 
845
  def save_edited_rules_action_fn(edited_rules_text: str, progress=gr.Progress()):
846
- # --- DEMO MODE CHANGE ---
847
  if DEMO_MODE:
848
  gr.Warning("Saving edited rules is disabled in Demo Mode.")
849
  return "Saving edited rules is disabled in Demo Mode."
850
-
851
- if not edited_rules_text.strip():
852
- return "No rules text to save."
853
-
854
  potential_rules = edited_rules_text.split("\n\n---\n\n")
855
  if len(potential_rules) == 1 and "\n" in edited_rules_text:
856
  potential_rules = [r.strip() for r in edited_rules_text.splitlines() if r.strip()]
857
-
858
- if not potential_rules:
859
- return "No rules found to process from editor."
860
-
861
  added, skipped, errors = 0, 0, 0
862
  unique_rules_to_process = sorted(list(set(filter(None, [r.strip() for r in potential_rules]))))
863
-
864
  total_unique = len(unique_rules_to_process)
865
  if total_unique == 0: return "No unique, non-empty rules found in editor text."
866
-
867
  progress(0, desc=f"Saving {total_unique} unique rules from editor...")
868
-
869
  for idx, rule_text in enumerate(unique_rules_to_process):
870
  success, status_msg = add_rule_entry(rule_text)
871
  if success: added += 1
872
  elif status_msg == "duplicate": skipped += 1
873
  else: errors += 1
874
  progress((idx + 1) / total_unique, desc=f"Processed {idx+1}/{total_unique} rules...")
875
-
876
  return f"Editor Save: Added: {added}, Skipped (duplicates): {skipped}, Errors/Invalid: {errors} from {total_unique} unique rules in text."
877
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
878
  def app_load_fn():
879
  logger.info("App loading. Initializing systems...")
880
  initialize_memory_system()
881
  logger.info("Memory system initialized.")
882
-
883
- # --- Load Rules from File ---
884
  rules_added, rules_skipped, rules_errors = load_rules_from_file(LOAD_RULES_FILE)
885
  rules_load_msg = f"Rules: Added {rules_added}, Skipped {rules_skipped}, Errors {rules_errors} from {LOAD_RULES_FILE or 'None'}."
886
  logger.info(rules_load_msg)
887
-
888
- # --- Load Memories from File ---
889
  mems_added, mems_format_errors, mems_save_errors = load_memories_from_file(LOAD_MEMORIES_FILE)
890
  mems_load_msg = f"Memories: Added {mems_added}, Format Errors {mems_format_errors}, Save Errors {mems_save_errors} from {LOAD_MEMORIES_FILE or 'None'}."
891
  logger.info(mems_load_msg)
892
-
893
  final_status = f"AI Systems Initialized. {rules_load_msg} {mems_load_msg} Ready."
894
-
895
- # Initial population of all relevant UI components AFTER loading
896
- rules_on_load = ui_refresh_rules_display_fn()
897
- mems_on_load = ui_refresh_memories_display_fn()
898
-
899
- # Return values for outputs defined in demo.load
900
- return (
901
- final_status, # agent_stat_tb
902
- rules_on_load, # rules_disp_ta
903
- mems_on_load, # mems_disp_json
904
- gr.Markdown(visible=False), # detect_out_md (initial state)
905
- gr.Textbox(value="*Waiting...*", interactive=True, show_copy_button=True), # fmt_report_tb (initial state)
906
- gr.DownloadButton(interactive=False, value=None, visible=False), # dl_report_btn (initial state)
907
- )
908
 
909
 
910
  # --- Gradio UI Definition ---
911
- with gr.Blocks(
912
- theme=gr.themes.Soft(),
913
- css="""
914
- .gr-button { margin: 5px; }
915
- .gr-textbox, .gr-text-area, .gr-dropdown, .gr-json { border-radius: 8px; }
916
- .gr-group { border: 1px solid #e0e0e0; border-radius: 8px; padding: 10px; }
917
- .gr-row { gap: 10px; }
918
- .gr-tab { border-radius: 8px; }
919
- .status-text { font-size: 0.9em; color: #555; }
920
- .gr-json { max-height: 300px; overflow-y: auto; } /* Added scrolling for JSON */
 
 
 
 
 
 
 
921
  """
922
- ) as demo:
923
- # --- DEMO MODE CHANGE ---
924
- gr.Markdown(
925
- f"""
926
- # 🤖 AI Research Agent {'(DEMO MODE)' if DEMO_MODE else ''}
927
- Your intelligent assistant for research and knowledge management
928
- ### Special thanks to [Groq](https://groq.com) for their blazing fast inference
929
-
930
- """,
931
- elem_classes=["header"]
932
- )
933
-
934
- is_sqlite = MEMORY_STORAGE_BACKEND == "SQLITE"
935
- is_hf_dataset = MEMORY_STORAGE_BACKEND == "HF_DATASET"
936
-
937
  with gr.Row(variant="compact"):
938
- agent_stat_tb = gr.Textbox(
939
- label="Agent Status", value="Initializing systems...", interactive=False,
940
- elem_classes=["status-text"], scale=4
941
- )
942
  with gr.Column(scale=1, min_width=150):
943
- memory_backend_info_tb = gr.Textbox(
944
- label="Memory Backend", value=MEMORY_STORAGE_BACKEND, interactive=False,
945
- elem_classes=["status-text"]
946
- )
947
- sqlite_path_display = gr.Textbox(
948
- label="SQLite Path", value=MEMORY_SQLITE_PATH, interactive=False,
949
- visible=is_sqlite, elem_classes=["status-text"]
950
- )
951
- hf_repos_display = gr.Textbox(
952
- label="HF Repos", value=f"M: {MEMORY_HF_MEM_REPO}, R: {MEMORY_HF_RULES_REPO}",
953
- interactive=False, visible=is_hf_dataset, elem_classes=["status-text"]
954
- )
955
 
956
  with gr.Row():
957
  with gr.Sidebar():
958
  gr.Markdown("## ⚙️ Configuration")
959
  with gr.Group():
960
  gr.Markdown("### AI Model Settings")
961
- api_key_tb = gr.Textbox(
962
- label="AI Provider API Key (Override)", type="password", placeholder="Uses .env if blank"
963
- )
964
- available_providers = get_available_providers()
965
- default_provider = available_providers[2] if available_providers else None
966
- prov_sel_dd = gr.Dropdown(
967
- label="AI Provider", choices=available_providers,
968
- value=default_provider, interactive=True
969
- )
970
  default_model_display = get_default_model_display_name_for_provider(default_provider) if default_provider else None
971
- model_sel_dd = gr.Dropdown(
972
- label="AI Model",
973
- choices=get_model_display_names_for_provider(default_provider) if default_provider else [],
974
- value=default_model_display,
975
- interactive=True
976
- )
977
  with gr.Group():
978
- gr.Markdown("### System Prompt")
979
- sys_prompt_tb = gr.Textbox(
980
- label="System Prompt Base", lines=8, value=DEFAULT_SYSTEM_PROMPT, interactive=True
981
- )
982
- if MEMORY_STORAGE_BACKEND == "RAM":
983
- save_faiss_sidebar_btn = gr.Button("Save FAISS Indices", variant="secondary")
984
 
985
  with gr.Column(scale=3):
986
  with gr.Tabs():
987
  with gr.TabItem("💬 Chat & Research"):
988
  with gr.Group():
989
  gr.Markdown("### AI Chat Interface")
990
- main_chat_disp = gr.Chatbot(
991
- label=None, height=400, bubble_full_width=False,
992
- avatar_images=(None, "https://huggingface.co/spaces/Space-Share/bucket/resolve/main/images/pfp.webp"),
993
- show_copy_button=True, render_markdown=True, sanitize_html=True
994
- )
995
  with gr.Row(variant="compact"):
996
- user_msg_tb = gr.Textbox(
997
- show_label=False, placeholder="Ask your research question...",
998
- scale=7, lines=1, max_lines=3
999
- )
1000
  send_btn = gr.Button("Send", variant="primary", scale=1, min_width=100)
1001
  with gr.Accordion("📝 Detailed Response & Insights", open=False):
1002
- fmt_report_tb = gr.Textbox(
1003
- label="Full AI Response", lines=8, interactive=True, show_copy_button=True
1004
- )
1005
- dl_report_btn = gr.DownloadButton(
1006
- "Download Report", value=None, interactive=False, visible=False
1007
- )
1008
  detect_out_md = gr.Markdown(visible=False)
1009
 
1010
  with gr.TabItem("🧠 Knowledge Base"):
1011
  with gr.Row(equal_height=True):
1012
  with gr.Column():
1013
- gr.Markdown("### 📜 Rules Management")
1014
- rules_disp_ta = gr.TextArea(
1015
- label="Current Rules", lines=10,
1016
- placeholder="Rules will appear here.",
1017
- interactive=True
1018
- )
1019
- gr.Markdown("To edit rules, modify the text above and click 'Save Edited Text', or upload a new file.")
1020
  save_edited_rules_btn = gr.Button("💾 Save Edited Text", variant="primary", interactive=not DEMO_MODE)
1021
  with gr.Row(variant="compact"):
1022
- dl_rules_btn = gr.DownloadButton("⬇️ Download Rules", value=None)
1023
- clear_rules_btn = gr.Button("🗑️ Clear All Rules", variant="stop", visible=not DEMO_MODE)
1024
- # --- DEMO MODE CHANGE ---
1025
- upload_rules_fobj = gr.File(
1026
- label="Upload Rules File (.txt with '---' separators, or .jsonl of rule strings)",
1027
- file_types=[".txt", ".jsonl"],
1028
- interactive=not DEMO_MODE
1029
- )
1030
- rules_stat_tb = gr.Textbox(
1031
- label="Rules Status", interactive=False, lines=1, elem_classes=["status-text"]
1032
- )
1033
-
1034
  with gr.Column():
1035
- gr.Markdown("### 📚 Memories Management")
1036
- mems_disp_json = gr.JSON(
1037
- label="Current Memories", value=[]
1038
- )
1039
- gr.Markdown("To add memories, upload a .jsonl or .json file.")
1040
  with gr.Row(variant="compact"):
1041
- dl_mems_btn = gr.DownloadButton("⬇️ Download Memories", value=None)
1042
- clear_mems_btn = gr.Button("🗑️ Clear All Memories", variant="stop", visible=not DEMO_MODE)
1043
- # --- DEMO MODE CHANGE ---
1044
- upload_mems_fobj = gr.File(
1045
- label="Upload Memories File (.jsonl of memory objects, or .json array of objects)",
1046
- file_types=[".jsonl", ".json"],
1047
- interactive=not DEMO_MODE
1048
- )
1049
- mems_stat_tb = gr.Textbox(
1050
- label="Memories Status", interactive=False, lines=1, elem_classes=["status-text"]
1051
- )
1052
-
 
 
 
 
 
1053
  def dyn_upd_model_dd(sel_prov_dyn: str):
1054
- models_dyn = get_model_display_names_for_provider(sel_prov_dyn)
1055
- def_model_dyn = get_default_model_display_name_for_provider(sel_prov_dyn)
1056
  return gr.Dropdown(choices=models_dyn, value=def_model_dyn, interactive=True)
1057
-
1058
  prov_sel_dd.change(fn=dyn_upd_model_dd, inputs=prov_sel_dd, outputs=model_sel_dd)
1059
 
1060
- # Inputs for the main chat submission function
1061
  chat_ins = [user_msg_tb, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
1062
- # Outputs for the main chat submission function (includes knowledge base displays)
1063
  chat_outs = [user_msg_tb, main_chat_disp, agent_stat_tb, detect_out_md, fmt_report_tb, dl_report_btn, rules_disp_ta, mems_disp_json]
1064
-
1065
  chat_event_args = {"fn": handle_gradio_chat_submit, "inputs": chat_ins, "outputs": chat_outs}
 
1066
 
1067
- send_btn.click(**chat_event_args)
1068
- user_msg_tb.submit(**chat_event_args)
1069
-
1070
- # Rules Management events
1071
  dl_rules_btn.click(fn=ui_download_rules_action_fn, inputs=None, outputs=dl_rules_btn, show_progress=False)
 
 
 
1072
 
1073
- save_edited_rules_btn.click(
1074
- fn=save_edited_rules_action_fn,
1075
- inputs=[rules_disp_ta],
1076
- outputs=[rules_stat_tb],
1077
- show_progress="full"
1078
- ).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
1079
-
1080
- upload_rules_fobj.upload(
1081
- fn=ui_upload_rules_action_fn,
1082
- inputs=[upload_rules_fobj],
1083
- outputs=[rules_stat_tb],
1084
- show_progress="full"
1085
- ).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
1086
-
1087
- clear_rules_btn.click(
1088
- fn=lambda: ("All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules."),
1089
- outputs=rules_stat_tb,
1090
- show_progress=False
1091
- ).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
1092
-
1093
- # Memories Management events
1094
  dl_mems_btn.click(fn=ui_download_memories_action_fn, inputs=None, outputs=dl_mems_btn, show_progress=False)
1095
-
1096
- upload_mems_fobj.upload(
1097
- fn=ui_upload_memories_action_fn,
1098
- inputs=[upload_mems_fobj],
1099
- outputs=[mems_stat_tb],
 
 
 
 
1100
  show_progress="full"
1101
- ).then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json, show_progress=False)
1102
-
1103
- clear_mems_btn.click(
1104
- fn=lambda: ("All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories."),
1105
- outputs=mems_stat_tb,
1106
- show_progress=False
1107
- ).then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json, show_progress=False)
 
 
 
 
 
 
 
 
 
 
 
 
1108
 
1109
- # FAISS save button visibility and action (RAM backend only)
1110
  if MEMORY_STORAGE_BACKEND == "RAM" and 'save_faiss_sidebar_btn' in locals():
1111
  def save_faiss_action_with_feedback_sidebar_fn():
1112
- try:
1113
- save_faiss_indices_to_disk()
1114
- gr.Info("Attempted to save FAISS indices to disk.")
1115
- except Exception as e:
1116
- logger.error(f"Error saving FAISS indices: {e}", exc_info=True)
1117
- gr.Error(f"Error saving FAISS indices: {e}")
1118
-
1119
  save_faiss_sidebar_btn.click(fn=save_faiss_action_with_feedback_sidebar_fn, inputs=None, outputs=None, show_progress=False)
1120
 
1121
-
1122
- # --- Initial Load Event ---
1123
- app_load_outputs = [
1124
- agent_stat_tb,
1125
- rules_disp_ta,
1126
- mems_disp_json,
1127
- detect_out_md,
1128
- fmt_report_tb,
1129
- dl_report_btn
1130
- ]
1131
  demo.load(fn=app_load_fn, inputs=None, outputs=app_load_outputs, show_progress="full")
1132
 
1133
 
1134
  if __name__ == "__main__":
1135
- logger.info(f"Starting Gradio AI Research Mega Agent (v6.5 - Direct UI Update & Core Learning Memories, Memory: {MEMORY_STORAGE_BACKEND})...")
1136
  app_port = int(os.getenv("GRADIO_PORT", 7860))
1137
  app_server = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
1138
  app_debug = os.getenv("GRADIO_DEBUG", "False").lower() == "true"
1139
  app_share = os.getenv("GRADIO_SHARE", "False").lower() == "true"
1140
  logger.info(f"Launching Gradio server: http://{app_server}:{app_port}. Debug: {app_debug}, Share: {app_share}")
1141
- demo.queue().launch(server_name=app_server, server_port=app_port, debug=app_debug, share=app_share, mcp_server=True)
1142
  logger.info("Gradio application shut down.")
 
31
  import tempfile
32
  import xml.etree.ElementTree as ET
33
 
34
+ # --- New Imports for Image KV Functionality ---
35
+ import io
36
+ import struct
37
+ import numpy as np
38
+ from PIL import Image, ImageDraw, ImageFont
39
+ from cryptography.hazmat.primitives.ciphers.aead import AESGCM
40
+ from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
41
+ from cryptography.hazmat.primitives import hashes
42
+ from cryptography.exceptions import InvalidTag
43
+ # --- End New Imports ---
44
+
45
  load_dotenv() # Load .env file, but our settings above will take precedence if set.
46
 
47
  from model_logic import (
48
  get_available_providers, get_model_display_names_for_provider,
49
  get_default_model_display_name_for_provider, call_model_stream, MODELS_BY_PROVIDER
50
  )
 
51
 
52
  from memory_logic import (
53
  initialize_memory_system,
 
81
  logger.info(f"Startup loading: Rules from {LOAD_RULES_FILE or 'None'}, Memories from {LOAD_MEMORIES_FILE or 'None'}")
82
 
83
 
84
+ # --- KV to Image Functions (Constants and Implementation) ---
85
+ KEY_SIZE = 32
86
+ SALT_SIZE = 16
87
+ NONCE_SIZE = 12
88
+ TAG_SIZE = 16
89
+ PBKDF2_ITERATIONS = 480000
90
+ LENGTH_HEADER_SIZE = 4 # struct.pack('>I') uses 4 bytes
91
+ PREFERRED_FONTS = ["Arial", "Helvetica", "DejaVu Sans", "Verdana", "Calibri", "sans-serif"]
92
+ MAX_KEYS_TO_DISPLAY_OVERLAY = 15
93
+
94
+ def _get_font(preferred_fonts, base_size):
95
+ fp = None
96
+ safe_base_size = int(base_size)
97
+ if safe_base_size <= 0: safe_base_size = 10
98
+ for n in preferred_fonts:
99
+ try: ImageFont.truetype(n.lower()+".ttf",10); fp=n.lower()+".ttf"; break
100
+ except IOError:
101
+ try: ImageFont.truetype(n,10); fp=n; break
102
+ except IOError: continue
103
+ if fp:
104
+ try: return ImageFont.truetype(fp, safe_base_size)
105
+ except IOError: logger.warning(f"Font '{fp}' load failed with size {safe_base_size}. Defaulting.")
106
+ try: return ImageFont.load_default(size=safe_base_size)
107
+ except TypeError: return ImageFont.load_default()
108
+
109
+ def set_pil_image_format_to_png(image:Image.Image)->Image.Image:
110
+ buf=io.BytesIO(); image.save(buf,format='PNG'); buf.seek(0)
111
+ reloaded=Image.open(buf); reloaded.format="PNG"; return reloaded
112
+
113
+ def _derive_key(pw:str,salt:bytes)->bytes:
114
+ kdf=PBKDF2HMAC(algorithm=hashes.SHA256(),length=KEY_SIZE,salt=salt,iterations=PBKDF2_ITERATIONS)
115
+ return kdf.derive(pw.encode('utf-8'))
116
+
117
+ def encrypt_data(data:bytes,pw:str)->bytes:
118
+ s=os.urandom(SALT_SIZE);k=_derive_key(pw,s);a=AESGCM(k);n=os.urandom(NONCE_SIZE)
119
+ ct=a.encrypt(n,data,None); return s+n+ct
120
+
121
+ def decrypt_data(payload:bytes,pw:str)->bytes:
122
+ ml=SALT_SIZE+NONCE_SIZE+TAG_SIZE;
123
+ if len(payload)<ml: raise ValueError("Payload too short.")
124
+ s,n,ct_tag=payload[:SALT_SIZE],payload[SALT_SIZE:SALT_SIZE+NONCE_SIZE],payload[SALT_SIZE+NONCE_SIZE:]
125
+ k=_derive_key(pw,s);a=AESGCM(k)
126
+ try: return a.decrypt(n,ct_tag,None)
127
+ except InvalidTag: raise ValueError("Decryption failed: Invalid password/corrupted data.")
128
+ except Exception as e: logger.error(f"Decrypt error: {e}",exc_info=True); raise
129
+
130
+ def _d2b(d:bytes)->str: return ''.join(format(b,'08b') for b in d)
131
+ def _b2B(b:str)->bytes:
132
+ if len(b)%8!=0: raise ValueError("Bits not multiple of 8.")
133
+ return bytes(int(b[i:i+8],2) for i in range(0,len(b),8))
134
+
135
+ def embed_data_in_image(img_obj:Image.Image,data:bytes)->Image.Image:
136
+ img=img_obj.convert("RGB");px=np.array(img);fpx=px.ravel()
137
+ lb=struct.pack('>I',len(data));fp=lb+data;db=_d2b(fp);nb=len(db)
138
+ if nb>len(fpx): raise ValueError(f"Data too large: {nb} bits needed, {len(fpx)} available.")
139
+ for i in range(nb): fpx[i]=(fpx[i]&0xFE)|int(db[i])
140
+ spx=fpx.reshape(px.shape); return Image.fromarray(spx.astype(np.uint8),'RGB')
141
+
142
+ def extract_data_from_image(img_obj:Image.Image)->bytes:
143
+ img=img_obj.convert("RGB");px=np.array(img);fpx=px.ravel()
144
+ hbc=LENGTH_HEADER_SIZE*8
145
+ if len(fpx)<hbc: raise ValueError("Image too small for header.")
146
+ lb="".join(str(fpx[i]&1) for i in range(hbc))
147
+ try: pl=struct.unpack('>I',_b2B(lb))[0]
148
+ except Exception as e: raise ValueError(f"Header decode error: {e}")
149
+ if pl==0: return b""
150
+ if pl>(len(fpx)-hbc)/8: raise ValueError("Header len corrupted or > capacity.")
151
+ tpb=pl*8; so=hbc; eo=so+tpb
152
+ if len(fpx)<eo: raise ValueError("Image truncated or header corrupted.")
153
+ pb="".join(str(fpx[i]&1) for i in range(so,eo)); return _b2B(pb)
154
+
155
+ def parse_kv_string_to_dict(kv_str:str)->dict:
156
+ if not kv_str or not kv_str.strip(): return {}
157
+ dd={};
158
+ for ln,ol in enumerate(kv_str.splitlines(),1):
159
+ l=ol.strip()
160
+ if not l or l.startswith('#'): continue
161
+ lc=l.split('#',1)[0].strip();
162
+ if not lc: continue
163
+ p=lc.split('=',1) if '=' in lc else lc.split(':',1) if ':' in lc else []
164
+ if len(p)!=2: raise ValueError(f"L{ln}: Invalid format '{ol}'.")
165
+ k,v=p[0].strip(),p[1].strip()
166
+ if not k: raise ValueError(f"L{ln}: Empty key in '{ol}'.")
167
+ dd[k]=v
168
+ return dd
169
+
170
+ def generate_brain_carrier_image(w=800, h=800, msg="iLearn Knowledge Base") -> Image.Image:
171
+ """
172
+ Generates a carrier image with a radial gradient, a central text-based icon, and a message.
173
+ """
174
+ # --- Color Palette ---
175
+ bg_center_color = (210, 220, 255) # Light blue center
176
+ bg_outer_color = (0, 53, 139) # Deep blue edges
177
+ icon_color = (180, 220, 255, 200) # Light, slightly transparent color for the icon text
178
+ text_color = (230, 235, 245) # Bright text color for the main message
179
+ shadow_color = (0, 0, 0, 128) # Text shadow
180
+ border_color = (255, 255, 255, 50) # Subtle white border
181
+
182
+ # --- Font and Icon Selection ---
183
+ PREFERRED_MONO_FONTS = ["Courier New", "Consolas", "Menlo", "Monaco", "Courier", "monospace"]
184
+
185
+ # Use the single line "thinking" bubble as the main icon.
186
+ ascii_art_icon = ". o O ( hmm... )"
187
+
188
+ # --- Setup ---
189
+ img = Image.new("RGBA", (w, h), bg_outer_color)
190
+ draw = ImageDraw.Draw(img)
191
+
192
+ # --- 1. Draw Radial Gradient Background ---
193
+ center_x, center_y = w / 2, h / 2
194
+ max_radius = int((center_x**2 + center_y**2)**0.5)
195
+
196
+ for r in range(max_radius, 0, -3):
197
+ ratio = 1 - (r / max_radius)
198
+ inter_color = tuple(int(bg_outer_color[i] + (bg_center_color[i] - bg_center_color[i]) * ratio) for i in range(3))
199
+ box = [center_x - r, center_y - r, center_x + r, center_y + r]
200
+ draw.ellipse(box, fill=inter_color)
201
+
202
+ # --- 2. Draw Text-based Icon ---
203
+ # Make the icon much larger, based on image width.
204
+ icon_font_size = max(24, int(w / 15))
205
+ icon_font = _get_font(PREFERRED_MONO_FONTS, icon_font_size)
206
+
207
+ # Position the icon lower, closer to the bottom text.
208
+ icon_cx = w / 2
209
+ icon_cy = h * 0.58
210
+
211
+ # Draw the single-line text icon, centered.
212
+ draw.text(
213
+ (icon_cx, icon_cy),
214
+ ascii_art_icon,
215
+ font=icon_font,
216
+ fill=icon_color,
217
+ anchor="mm"
218
+ )
219
+
220
+ # --- 3. Draw Text Message at the bottom ---
221
+ text_font_size = max(18, int(w / 30))
222
+ text_font = _get_font(PREFERRED_FONTS, text_font_size)
223
+ text_y_pos = h * 0.80 # Moved text slightly lower to give the new icon more space
224
+ shadow_offset = max(1, int(text_font_size / 20))
225
+
226
+ draw.text((center_x, text_y_pos), msg, font=text_font, fill=shadow_color, anchor="ms", stroke_width=shadow_offset*2, stroke_fill=shadow_color)
227
+ draw.text((center_x, text_y_pos), msg, font=text_font, fill=text_color, anchor="ms")
228
+
229
+ # --- 4. Draw Border ---
230
+ border_width = max(2, int(min(w,h) / 150))
231
+ draw.rectangle([(0,0), (w-1, h-1)], outline=border_color, width=border_width)
232
+
233
+ # --- Finalize ---
234
+ final_image_rgb = Image.new("RGB", img.size, (0, 0, 0))
235
+ final_image_rgb.paste(img, (0, 0), img)
236
+
237
+ return final_image_rgb
238
+
239
+ def _get_text_measurement(draw_obj, text_str, font_obj):
240
+ if hasattr(draw_obj, 'textbbox'):
241
+ try:
242
+ bbox = draw_obj.textbbox((0, 0), text_str, font=font_obj)
243
+ width = bbox[2] - bbox[0]
244
+ height = bbox[3] - bbox[1]
245
+ return width, height
246
+ except Exception: pass
247
+ try:
248
+ if hasattr(font_obj, 'getsize'): return font_obj.getsize(text_str)
249
+ width, height = draw_obj.textsize(text_str, font=font_obj)
250
+ return width, height
251
+ except AttributeError:
252
+ try:
253
+ char_width_approx = font_obj.size * 0.6
254
+ char_height_approx = font_obj.size
255
+ return int(len(text_str) * char_width_approx), int(char_height_approx)
256
+ except: return len(text_str) * 8, 10
257
+
258
+ def draw_key_list_dropdown_overlay(image: Image.Image, keys: list[str] = None, title: str = "Data Embedded") -> Image.Image:
259
+ if not title and (keys is None or not keys):
260
+ return set_pil_image_format_to_png(image.copy())
261
+ img_overlayed = image.copy(); draw = ImageDraw.Draw(img_overlayed)
262
+ margin = 10; padding = {'title_x':10,'title_y':6,'key_x':10,'key_y':5}; line_spacing = 4
263
+ title_bg_color=(60,60,60); title_text_color=(230,230,90)
264
+ key_list_bg_color=(50,50,50); key_text_color=(210,210,210); ellipsis_color=(170,170,170)
265
+ OVERLAY_TARGET_WIDTH_RATIO = 0.30; MIN_OVERLAY_WIDTH_PX = 180; MAX_OVERLAY_WIDTH_PX = 500
266
+ final_overlay_box_width = min(max(int(image.width*OVERLAY_TARGET_WIDTH_RATIO),MIN_OVERLAY_WIDTH_PX),MAX_OVERLAY_WIDTH_PX)
267
+ final_overlay_box_width = min(final_overlay_box_width, image.width - 2 * margin)
268
+ TITLE_FONT_HEIGHT_RATIO=0.030; TITLE_FONT_OVERLAY_WIDTH_RATIO=0.08; MIN_TITLE_FONT_SIZE=14; MAX_TITLE_FONT_SIZE=28
269
+ title_font_size = min(max(min(int(image.height*TITLE_FONT_HEIGHT_RATIO),int(final_overlay_box_width*TITLE_FONT_OVERLAY_WIDTH_RATIO)),MIN_TITLE_FONT_SIZE),MAX_TITLE_FONT_SIZE)
270
+ title_font = _get_font(PREFERRED_FONTS, title_font_size)
271
+ KEY_FONT_HEIGHT_RATIO=0.025; KEY_FONT_OVERLAY_WIDTH_RATIO=0.07; MIN_KEY_FONT_SIZE=12; MAX_KEY_FONT_SIZE=22
272
+ key_font_size = min(max(min(int(image.height*KEY_FONT_HEIGHT_RATIO),int(final_overlay_box_width*KEY_FONT_OVERLAY_WIDTH_RATIO)),MIN_KEY_FONT_SIZE),MAX_KEY_FONT_SIZE)
273
+ key_font = _get_font(PREFERRED_FONTS, key_font_size)
274
+ actual_title_w, actual_title_h = _get_text_measurement(draw, title, title_font)
275
+ disp_keys, actual_key_text_widths, total_keys_render_h, key_line_heights = [],[],0,[]
276
+ if keys:
277
+ temp_disp_keys=keys[:MAX_KEYS_TO_DISPLAY_OVERLAY-1]+[f"... ({len(keys)-(MAX_KEYS_TO_DISPLAY_OVERLAY-1)} more)"] if len(keys)>MAX_KEYS_TO_DISPLAY_OVERLAY else keys
278
+ for kt in temp_disp_keys:
279
+ disp_keys.append(kt); kw, kh = _get_text_measurement(draw, kt, key_font)
280
+ actual_key_text_widths.append(kw); key_line_heights.append(kh); total_keys_render_h += kh
281
+ if len(disp_keys)>1: total_keys_render_h += line_spacing*(len(disp_keys)-1)
282
+ title_bar_h = actual_title_h + 2*padding['title_y']; title_bar_x1=image.width-margin; title_bar_x0=title_bar_x1-final_overlay_box_width; title_bar_y0=margin; title_bar_y1=title_bar_y0+title_bar_h
283
+ draw.rectangle([(title_bar_x0,title_bar_y0),(title_bar_x1,title_bar_y1)],fill=title_bg_color)
284
+ available_width_for_title_text = final_overlay_box_width - 2*padding['title_x']
285
+ title_text_draw_x = title_bar_x0+padding['title_x'] + (available_width_for_title_text-actual_title_w)/2 if actual_title_w<=available_width_for_title_text else title_bar_x0+padding['title_x']
286
+ title_text_draw_y = title_bar_y0 + padding['title_y']
287
+ draw.text((title_text_draw_x, title_text_draw_y), title, font=title_font, fill=title_text_color)
288
+ if disp_keys:
289
+ key_list_box_h_ideal = total_keys_render_h + 2*padding['key_y']; key_list_x0, key_list_x1 = title_bar_x0, title_bar_x1
290
+ key_list_y0 = title_bar_y1; key_list_y1 = min(key_list_y0+key_list_box_h_ideal, image.height-margin)
291
+ current_key_list_box_h = key_list_y1 - key_list_y0
292
+ draw.rectangle([(key_list_x0,key_list_y0),(key_list_x1,key_list_y1)],fill=key_list_bg_color)
293
+ current_text_y = key_list_y0+padding['key_y']; available_text_width_for_keys = final_overlay_box_width-2*padding['key_x']
294
+ for i, key_text_item in enumerate(disp_keys):
295
+ if i>=len(key_line_heights): break
296
+ current_key_h=key_line_heights[i]
297
+ if current_text_y+current_key_h > key_list_y0+current_key_list_box_h-padding['key_y']:
298
+ _, ellipsis_h = _get_text_measurement(draw,"...",key_font)
299
+ if current_text_y+ellipsis_h <= key_list_y0+current_key_list_box_h-padding['key_y']:
300
+ ellipsis_w, _ = _get_text_measurement(draw,"...",key_font)
301
+ draw.text((key_list_x0+(final_overlay_box_width-ellipsis_w)/2, current_text_y), "...", font=key_font, fill=ellipsis_color)
302
+ break
303
+ original_key_text_w = actual_key_text_widths[i]; text_to_draw = key_text_item
304
+ if original_key_text_w > available_text_width_for_keys:
305
+ temp_text = key_text_item
306
+ while _get_text_measurement(draw, temp_text+"...", key_font)[0] > available_text_width_for_keys and len(temp_text) > 0: temp_text=temp_text[:-1]
307
+ text_to_draw = temp_text+"..." if len(temp_text)<len(key_text_item) else temp_text
308
+ final_key_text_w, _ = _get_text_measurement(draw, text_to_draw, key_font)
309
+ key_text_draw_x = key_list_x0+padding['key_x']+max(0,(available_text_width_for_keys-final_key_text_w)/2)
310
+ text_color_to_use = ellipsis_color if "..." in text_to_draw or f"... ({len(keys)-(MAX_KEYS_TO_DISPLAY_OVERLAY-1)} more)"==key_text_item else key_text_color
311
+ draw.text((key_text_draw_x, current_text_y), text_to_draw, font=key_font, fill=text_color_to_use)
312
+ current_text_y += current_key_h
313
+ if i < len(disp_keys)-1: current_text_y += line_spacing
314
+ return set_pil_image_format_to_png(img_overlayed)
315
+ # --- END KV to Image Functions ---
316
+
317
+
318
  # --- Helper Functions ---
319
  def format_insights_for_prompt(retrieved_insights_list: list[str]) -> tuple[str, list[dict]]:
320
  if not retrieved_insights_list:
 
935
  return added_count, format_error_count, save_error_count
936
 
937
 
938
+ # --- UI Functions for Rules and Memories (Text and Image) ---
939
+ def convert_kb_to_kv_string(rules: list[str], memories: list[dict]) -> str:
940
+ """Serializes rules and memories into a single key-value string for image embedding."""
941
+ lines = ["# iLearn Knowledge Base Export", f"# Exported on: {datetime.utcnow().isoformat()}Z"]
942
+ lines.append("\n# --- RULES ---")
943
+ for i, rule_text in enumerate(rules):
944
+ lines.append(f"rule_{i+1} = {json.dumps(rule_text)}")
945
+
946
+ lines.append("\n# --- MEMORIES ---")
947
+ for i, mem_dict in enumerate(memories):
948
+ lines.append(f"memory_{i+1} = {json.dumps(mem_dict)}")
949
+ return "\n".join(lines)
950
+
951
  def ui_refresh_rules_display_fn(): return "\n\n---\n\n".join(get_all_rules_cached()) or "No rules found."
952
+ def ui_refresh_memories_display_fn(): return get_all_memories_cached() or []
953
 
954
  def ui_download_rules_action_fn():
955
  rules_content = "\n\n---\n\n".join(get_all_rules_cached())
 
972
  except Exception as e_read: return f"Error reading file: {e_read}"
973
  if not content.strip(): return "Uploaded rules file is empty."
974
  added_count, skipped_count, error_count = 0,0,0
 
975
  potential_rules = []
976
  file_name_lower = uploaded_file_obj.name.lower()
 
977
  if file_name_lower.endswith(".txt"):
978
  potential_rules = content.split("\n\n---\n\n")
979
  if len(potential_rules) == 1 and "\n" in content:
 
994
  error_count +=1
995
  else:
996
  return "Unsupported file type for rules. Please use .txt or .jsonl."
 
997
  valid_potential_rules = [r.strip() for r in potential_rules if r.strip()]
998
  total_to_process = len(valid_potential_rules)
999
+ if total_to_process == 0 and error_count == 0: return "No valid rules found in file to process."
1000
+ elif total_to_process == 0 and error_count > 0: return f"No valid rules found to process. Encountered {error_count} parsing/format errors."
 
 
 
 
1001
  progress(0, desc="Starting rules upload...")
1002
  for idx, rule_text in enumerate(valid_potential_rules):
1003
  success, status_msg = add_rule_entry(rule_text)
 
1005
  elif status_msg == "duplicate": skipped_count += 1
1006
  else: error_count += 1
1007
  progress((idx + 1) / total_to_process, desc=f"Processed {idx+1}/{total_to_process} rules...")
 
1008
  msg = f"Rules Upload: Total valid rule segments processed: {total_to_process}. Added: {added_count}, Skipped (duplicates): {skipped_count}, Errors (parsing/add): {error_count}."
1009
  logger.info(msg); return msg
1010
 
 
 
1011
  def ui_download_memories_action_fn():
1012
  memories = get_all_memories_cached()
1013
  if not memories:
1014
  gr.Warning("No memories to download.")
1015
  return gr.DownloadButton(value=None, interactive=False, label="No Memories")
 
1016
  jsonl_content = ""
1017
  for mem_dict in memories:
1018
  try: jsonl_content += json.dumps(mem_dict) + "\n"
1019
  except Exception as e: logger.error(f"Error serializing memory for download: {mem_dict}, Error: {e}")
 
1020
  if not jsonl_content.strip():
1021
  gr.Warning("No valid memories to serialize for download.")
1022
  return gr.DownloadButton(value=None, interactive=False, label="No Data")
 
1037
  if not content.strip(): return "Uploaded memories file is empty."
1038
  added_count, format_error_count, save_error_count = 0,0,0
1039
  memory_objects_to_process = []
 
1040
  file_ext = os.path.splitext(uploaded_file_obj.name.lower())[1]
 
1041
  if file_ext == ".json":
1042
  try:
1043
  parsed_json = json.loads(content)
1044
+ if isinstance(parsed_json, list): memory_objects_to_process = parsed_json
1045
+ elif isinstance(parsed_json, dict): memory_objects_to_process = [parsed_json]
 
 
1046
  else:
1047
+ logger.warning(f"Memories Upload (.json): File content is not a JSON list or object. Type: {type(parsed_json)}"); format_error_count = 1
 
1048
  except json.JSONDecodeError as e:
1049
+ logger.warning(f"Memories Upload (.json): Invalid JSON file. Error: {e}"); format_error_count = 1
 
1050
  elif file_ext == ".jsonl":
1051
  for line_num, line in enumerate(content.splitlines()):
1052
  line = line.strip()
1053
  if line:
1054
+ try: memory_objects_to_process.append(json.loads(line))
 
1055
  except json.JSONDecodeError:
1056
+ logger.warning(f"Memories Upload (.jsonl): Line {line_num+1} parse error: {line[:100]}"); format_error_count += 1
1057
+ else: return "Unsupported file type for memories. Please use .json or .jsonl."
1058
+ if not memory_objects_to_process and format_error_count > 0 : return f"Memories Upload: File parsing failed. Found {format_error_count} format errors and no processable objects."
1059
+ elif not memory_objects_to_process: return "No valid memory objects found in the uploaded file."
 
 
 
 
 
 
1060
  total_to_process = len(memory_objects_to_process)
1061
  if total_to_process == 0: return "No memory objects to process (after parsing)."
 
1062
  progress(0, desc="Starting memories upload...")
1063
  for idx, mem_data in enumerate(memory_objects_to_process):
1064
  if isinstance(mem_data, dict) and all(k in mem_data for k in ["user_input", "bot_response", "metrics"]):
 
1066
  if success: added_count += 1
1067
  else: save_error_count += 1
1068
  else:
1069
+ logger.warning(f"Memories Upload: Skipped invalid memory object structure: {str(mem_data)[:100]}"); format_error_count += 1
 
1070
  progress((idx + 1) / total_to_process, desc=f"Processed {idx+1}/{total_to_process} memories...")
 
1071
  msg = f"Memories Upload: Processed {total_to_process} objects. Added: {added_count}, Format/Structure Errors: {format_error_count}, Save Errors: {save_error_count}."
1072
  logger.info(msg); return msg
1073
 
1074
  def save_edited_rules_action_fn(edited_rules_text: str, progress=gr.Progress()):
 
1075
  if DEMO_MODE:
1076
  gr.Warning("Saving edited rules is disabled in Demo Mode.")
1077
  return "Saving edited rules is disabled in Demo Mode."
1078
+ if not edited_rules_text.strip(): return "No rules text to save."
 
 
 
1079
  potential_rules = edited_rules_text.split("\n\n---\n\n")
1080
  if len(potential_rules) == 1 and "\n" in edited_rules_text:
1081
  potential_rules = [r.strip() for r in edited_rules_text.splitlines() if r.strip()]
1082
+ if not potential_rules: return "No rules found to process from editor."
 
 
 
1083
  added, skipped, errors = 0, 0, 0
1084
  unique_rules_to_process = sorted(list(set(filter(None, [r.strip() for r in potential_rules]))))
 
1085
  total_unique = len(unique_rules_to_process)
1086
  if total_unique == 0: return "No unique, non-empty rules found in editor text."
 
1087
  progress(0, desc=f"Saving {total_unique} unique rules from editor...")
 
1088
  for idx, rule_text in enumerate(unique_rules_to_process):
1089
  success, status_msg = add_rule_entry(rule_text)
1090
  if success: added += 1
1091
  elif status_msg == "duplicate": skipped += 1
1092
  else: errors += 1
1093
  progress((idx + 1) / total_unique, desc=f"Processed {idx+1}/{total_unique} rules...")
 
1094
  return f"Editor Save: Added: {added}, Skipped (duplicates): {skipped}, Errors/Invalid: {errors} from {total_unique} unique rules in text."
1095
 
1096
+ def ui_download_kb_as_image_fn(password: str, progress=gr.Progress()):
1097
+ """Generates and provides a downloadable image with embedded KB data."""
1098
+ progress(0, desc="Fetching knowledge base...")
1099
+ rules, memories = get_all_rules_cached(), get_all_memories_cached()
1100
+ if not rules and not memories:
1101
+ gr.Warning("Knowledge base is empty. Nothing to download.")
1102
+ return None
1103
+
1104
+ progress(0.2, desc="Serializing data...")
1105
+ kv_string = convert_kb_to_kv_string(rules, memories)
1106
+ data_bytes = kv_string.encode('utf-8')
1107
+
1108
+ if password and password.strip():
1109
+ progress(0.4, desc="Encrypting data...")
1110
+ try:
1111
+ data_bytes = encrypt_data(data_bytes, password.strip())
1112
+ gr.Info("Data encrypted successfully.")
1113
+ except Exception as e:
1114
+ logger.error(f"KB ImgDL: Encrypt failed: {e}")
1115
+ gr.Error(f"Encryption failed: {e}")
1116
+ return None
1117
+
1118
+ progress(0.6, desc="Generating carrier image...")
1119
+ carrier_image = generate_brain_carrier_image(w=800, h=800, msg="iLearn Knowledge Base")
1120
+
1121
+ try:
1122
+ progress(0.7, desc="Embedding data...")
1123
+ embedded_image = embed_data_in_image(carrier_image, data_bytes)
1124
+ except ValueError as e:
1125
+ logger.error(f"KB ImgDL: Embed failed: {e}")
1126
+ gr.Error(f"Data is too large for this image size: {e}")
1127
+ return None
1128
+
1129
+ progress(0.8, desc="Adding visual overlay...")
1130
+ keys_for_overlay = [f"Rule Count: {len(rules)}", f"Memory Count: {len(memories)}", "---"]
1131
+ for r in rules[:5]:
1132
+ match = re.search(r"\](.*)", r, re.DOTALL)
1133
+ rule_content = match.group(1).strip() if match else r
1134
+ keys_for_overlay.append(f"Rule: {rule_content[:40]}...")
1135
+ if len(rules) > 5: keys_for_overlay.append("...")
1136
+
1137
+ title_overlay = "Encrypted Data" if password and password.strip() else "Embedded Data"
1138
+ final_image = draw_key_list_dropdown_overlay(embedded_image, keys=keys_for_overlay, title=title_overlay)
1139
+
1140
+ progress(0.9, desc="Saving final image...")
1141
+ try:
1142
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmpfile:
1143
+ final_image.save(tmpfile, format="PNG")
1144
+ tmp_path = tmpfile.name
1145
+
1146
+ progress(1.0, desc="Download triggered!")
1147
+ gr.Info("Download should start automatically.")
1148
+ return tmp_path
1149
+ except Exception as e:
1150
+ logger.error(f"KB ImgDL: Save failed: {e}")
1151
+ gr.Error(f"Failed to save final image: {e}")
1152
+ return None
1153
+
1154
+ def ui_upload_kb_from_image_fn(uploaded_image_filepath: str, password: str, progress=gr.Progress()):
1155
+ """Extracts KB data from an uploaded image and adds it to the system."""
1156
+ if DEMO_MODE:
1157
+ gr.Warning("Uploading is disabled in Demo Mode.")
1158
+ return "Upload disabled in Demo Mode."
1159
+ if not uploaded_image_filepath:
1160
+ return "No image file provided or pasted."
1161
+
1162
+ progress(0, desc="Loading image...")
1163
+ try:
1164
+ img = Image.open(uploaded_image_filepath)
1165
+ except Exception as e:
1166
+ logger.error(f"KB ImgUL: Open fail: {e}")
1167
+ return f"Error: Could not open image file: {e}"
1168
+
1169
+ progress(0.2, desc="Extracting data from image...")
1170
+ try:
1171
+ extracted_bytes = extract_data_from_image(img)
1172
+ if not extracted_bytes: return "No data found embedded in the image."
1173
+ except ValueError as e:
1174
+ logger.error(f"KB ImgUL: Extract fail: {e}")
1175
+ return f"Error extracting data: {e}"
1176
+ except Exception as e:
1177
+ logger.error(f"KB ImgUL: Extract error: {e}", exc_info=True)
1178
+ return f"Unexpected extraction error: {e}"
1179
+
1180
+ kv_string = ""
1181
+ try:
1182
+ if extracted_bytes[:20].decode('utf-8', errors='ignore').strip().startswith("# iLearn"):
1183
+ kv_string = extracted_bytes.decode('utf-8')
1184
+ progress(0.4, desc="Parsing data...")
1185
+ elif password and password.strip():
1186
+ progress(0.3, desc="Attempting decryption...")
1187
+ kv_string = decrypt_data(extracted_bytes, password.strip()).decode('utf-8')
1188
+ progress(0.4, desc="Parsing decrypted data...")
1189
+ else: return "Data appears encrypted, but no password was provided."
1190
+ except (UnicodeDecodeError, InvalidTag, ValueError) as e:
1191
+ if isinstance(e, (InvalidTag, ValueError)): return f"Decryption Failed. Check password/file. Details: {e}"
1192
+ return "Data is binary and requires a password for decryption."
1193
+ except Exception as e:
1194
+ logger.error(f"KB ImgUL: Decrypt error: {e}", exc_info=True)
1195
+ return f"Unexpected decryption error: {e}"
1196
+
1197
+ if not kv_string: return "Could not get data from image (after potential decryption)."
1198
+ try:
1199
+ kv_dict = parse_kv_string_to_dict(kv_string)
1200
+ except Exception as e:
1201
+ logger.error(f"KB ImgUL: Parse fail: {e}")
1202
+ return f"Error parsing data: {e}"
1203
+ if not kv_dict: return "Parsed data is empty."
1204
+
1205
+ rules_to_add, memories_to_add = [], []
1206
+ for key, value in kv_dict.items():
1207
+ if key.startswith("rule_"):
1208
+ try: rules_to_add.append(json.loads(value))
1209
+ except: logger.warning(f"KB ImgUL: Bad rule format for key {key}")
1210
+ elif key.startswith("memory_"):
1211
+ try:
1212
+ mem_dict = json.loads(value)
1213
+ if isinstance(mem_dict, dict) and all(k in mem_dict for k in ['user_input', 'bot_response', 'metrics']):
1214
+ memories_to_add.append(mem_dict)
1215
+ except: logger.warning(f"KB ImgUL: Bad memory format for key {key}")
1216
+
1217
+ added_rules, skip_r, err_r, added_mems, err_m = 0, 0, 0, 0, 0
1218
+ total = len(rules_to_add) + len(memories_to_add)
1219
+ progress(0.5, desc=f"Adding {len(rules_to_add)} rules...")
1220
+ for i, rule in enumerate(rules_to_add):
1221
+ s, m = add_rule_entry(rule)
1222
+ if s: added_rules += 1
1223
+ elif m == "duplicate": skip_r += 1
1224
+ else: err_r += 1
1225
+ if total > 0: progress(0.5 + (0.4 * ((i+1)/total)) if total else 0)
1226
+
1227
+ progress(0.9, desc=f"Adding {len(memories_to_add)} memories...")
1228
+ for i, mem in enumerate(memories_to_add):
1229
+ s, _ = add_memory_entry(mem['user_input'], mem['metrics'], mem['bot_response'])
1230
+ if s: added_mems += 1
1231
+ else: err_m += 1
1232
+ if total > 0: progress(0.9 + (0.1 * ((i+1)/total)) if total else 0)
1233
+
1234
+ progress(1.0, desc="Upload complete!")
1235
+ msg = f"Upload Complete. Rules - Add: {added_rules}, Skip: {skip_r}, Err: {err_r}. Mems - Add: {added_mems}, Err: {err_m}."
1236
+ logger.info(f"Image KB Upload: {msg}")
1237
+ return msg
1238
+
1239
  def app_load_fn():
1240
  logger.info("App loading. Initializing systems...")
1241
  initialize_memory_system()
1242
  logger.info("Memory system initialized.")
 
 
1243
  rules_added, rules_skipped, rules_errors = load_rules_from_file(LOAD_RULES_FILE)
1244
  rules_load_msg = f"Rules: Added {rules_added}, Skipped {rules_skipped}, Errors {rules_errors} from {LOAD_RULES_FILE or 'None'}."
1245
  logger.info(rules_load_msg)
 
 
1246
  mems_added, mems_format_errors, mems_save_errors = load_memories_from_file(LOAD_MEMORIES_FILE)
1247
  mems_load_msg = f"Memories: Added {mems_added}, Format Errors {mems_format_errors}, Save Errors {mems_save_errors} from {LOAD_MEMORIES_FILE or 'None'}."
1248
  logger.info(mems_load_msg)
 
1249
  final_status = f"AI Systems Initialized. {rules_load_msg} {mems_load_msg} Ready."
1250
+ rules_on_load, mems_on_load = ui_refresh_rules_display_fn(), ui_refresh_memories_display_fn()
1251
+ return (final_status, rules_on_load, mems_on_load, gr.Markdown(visible=False),
1252
+ gr.Textbox(value="*Waiting...*", interactive=True, show_copy_button=True),
1253
+ gr.DownloadButton(interactive=False, value=None, visible=False))
 
 
 
 
 
 
 
 
 
 
1254
 
1255
 
1256
  # --- Gradio UI Definition ---
1257
+ # --- Gradio UI Definition ---
1258
+ with gr.Blocks(theme=gr.themes.Soft(), css=".gr-button { margin: 5px; } .gr-textbox, .gr-text-area, .gr-dropdown, .gr-json { border-radius: 8px; } .gr-group { border: 1px solid #e0e0e0; border-radius: 8px; padding: 10px; } .gr-row { gap: 10px; } .gr-tab { border-radius: 8px; } .status-text { font-size: 0.9em; color: #555; } .gr-json { max-height: 400px; overflow-y: auto; }") as demo:
1259
+
1260
+ # --- MODIFICATION: JavaScript for 1-Click Download ---
1261
+ # This JS function will be triggered when the hidden file component changes.
1262
+ # It receives the file data object from Gradio, which contains a web-accessible URL.
1263
+ js_download_func = """
1264
+ (file) => {
1265
+ if (file && file.url) {
1266
+ const link = document.createElement('a');
1267
+ link.href = file.url;
1268
+ link.download = `iLearn_KB_${new Date().toISOString().split('T')[0]}.png`;
1269
+ document.body.appendChild(link);
1270
+ link.click();
1271
+ document.body.removeChild(link);
1272
+ }
1273
+ }
1274
  """
1275
+
1276
+ gr.Markdown(f"# 🤖 iLearn: An Autonomous Learning Agent {'(DEMO MODE)' if DEMO_MODE else ''}", elem_classes=["header"])
1277
+ is_sqlite, is_hf_dataset = (MEMORY_STORAGE_BACKEND == "SQLITE"), (MEMORY_STORAGE_BACKEND == "HF_DATASET")
 
 
 
 
 
 
 
 
 
 
 
 
1278
  with gr.Row(variant="compact"):
1279
+ agent_stat_tb = gr.Textbox(label="Agent Status", value="Initializing systems...", interactive=False, elem_classes=["status-text"], scale=4)
 
 
 
1280
  with gr.Column(scale=1, min_width=150):
1281
+ memory_backend_info_tb = gr.Textbox(label="Memory Backend", value=MEMORY_STORAGE_BACKEND, interactive=False, elem_classes=["status-text"])
1282
+ sqlite_path_display = gr.Textbox(label="SQLite Path", value=MEMORY_SQLITE_PATH, interactive=False, visible=is_sqlite, elem_classes=["status-text"])
1283
+ hf_repos_display = gr.Textbox(label="HF Repos", value=f"M: {MEMORY_HF_MEM_REPO}, R: {MEMORY_HF_RULES_REPO}", interactive=False, visible=is_hf_dataset, elem_classes=["status-text"])
 
 
 
 
 
 
 
 
 
1284
 
1285
  with gr.Row():
1286
  with gr.Sidebar():
1287
  gr.Markdown("## ⚙️ Configuration")
1288
  with gr.Group():
1289
  gr.Markdown("### AI Model Settings")
1290
+ api_key_tb = gr.Textbox(label="AI Provider API Key (Override)", type="password", placeholder="Uses .env if blank")
1291
+ available_providers = get_available_providers(); default_provider = available_providers[0] if "groq" not in available_providers else "groq"
1292
+ prov_sel_dd = gr.Dropdown(label="AI Provider", choices=available_providers, value=default_provider, interactive=True)
 
 
 
 
 
 
1293
  default_model_display = get_default_model_display_name_for_provider(default_provider) if default_provider else None
1294
+ model_sel_dd = gr.Dropdown(label="AI Model", choices=get_model_display_names_for_provider(default_provider) if default_provider else [], value=default_model_display, interactive=True)
 
 
 
 
 
1295
  with gr.Group():
1296
+ gr.Markdown("### System Prompt"); sys_prompt_tb = gr.Textbox(label="System Prompt Base", lines=8, value=DEFAULT_SYSTEM_PROMPT, interactive=True)
1297
+ if MEMORY_STORAGE_BACKEND == "RAM": save_faiss_sidebar_btn = gr.Button("Save FAISS Indices", variant="secondary")
 
 
 
 
1298
 
1299
  with gr.Column(scale=3):
1300
  with gr.Tabs():
1301
  with gr.TabItem("💬 Chat & Research"):
1302
  with gr.Group():
1303
  gr.Markdown("### AI Chat Interface")
1304
+ main_chat_disp = gr.Chatbot(label=None, height=400, bubble_full_width=False,avatar_images=(None, "https://huggingface.co/spaces/Space-Share/bucket/resolve/main/images/pfp.webp"), show_copy_button=True, render_markdown=True, sanitize_html=True)
 
 
 
 
1305
  with gr.Row(variant="compact"):
1306
+ user_msg_tb = gr.Textbox(show_label=False, placeholder="Ask your research question...", scale=7, lines=1, max_lines=3)
 
 
 
1307
  send_btn = gr.Button("Send", variant="primary", scale=1, min_width=100)
1308
  with gr.Accordion("📝 Detailed Response & Insights", open=False):
1309
+ fmt_report_tb = gr.Textbox(label="Full AI Response", lines=8, interactive=True, show_copy_button=True)
1310
+ dl_report_btn = gr.DownloadButton("Download Report", value=None, interactive=False, visible=False)
 
 
 
 
1311
  detect_out_md = gr.Markdown(visible=False)
1312
 
1313
  with gr.TabItem("🧠 Knowledge Base"):
1314
  with gr.Row(equal_height=True):
1315
  with gr.Column():
1316
+ gr.Markdown("### 📜 Rules Management (Text)"); rules_disp_ta = gr.TextArea(label="Current Rules", lines=10, placeholder="Rules will appear here.", interactive=True)
1317
+ gr.Markdown("To edit rules, modify text and click Save, or upload a file.")
 
 
 
 
 
1318
  save_edited_rules_btn = gr.Button("💾 Save Edited Text", variant="primary", interactive=not DEMO_MODE)
1319
  with gr.Row(variant="compact"):
1320
+ dl_rules_btn = gr.DownloadButton("⬇️ Download Rules (.txt)", value=None); clear_rules_btn = gr.Button("🗑️ Clear All Rules", variant="stop", visible=not DEMO_MODE)
1321
+ upload_rules_fobj = gr.File(label="Upload Rules File (.txt/.jsonl)", file_types=[".txt", ".jsonl"], interactive=not DEMO_MODE)
1322
+ rules_stat_tb = gr.Textbox(label="Rules Status", interactive=False, lines=1, elem_classes=["status-text"])
 
 
 
 
 
 
 
 
 
1323
  with gr.Column():
1324
+ gr.Markdown("### 📚 Memories Management (Text)"); mems_disp_json = gr.JSON(label="Current Memories", value=[])
1325
+ gr.Markdown("To add memories, upload a file.")
 
 
 
1326
  with gr.Row(variant="compact"):
1327
+ dl_mems_btn = gr.DownloadButton("⬇️ Download Memories (.jsonl)", value=None); clear_mems_btn = gr.Button("🗑️ Clear All Memories", variant="stop", visible=not DEMO_MODE)
1328
+ upload_mems_fobj = gr.File(label="Upload Memories File (.json/.jsonl)", file_types=[".jsonl", ".json"], interactive=not DEMO_MODE)
1329
+ mems_stat_tb = gr.Textbox(label="Memories Status", interactive=False, lines=1, elem_classes=["status-text"])
1330
+ with gr.Group():
1331
+ gr.Markdown("### 🖼️ Image-based Knowledge Base Import/Export")
1332
+ gr.Markdown("Save or load the entire knowledge base (rules and memories) as a single PNG image. A password can be used for AES-256 encryption.")
1333
+ with gr.Row():
1334
+ with gr.Column(scale=2):
1335
+ kb_img_password_tb = gr.Textbox(label="Password (optional for encryption)", type="password", placeholder="Leave blank for no encryption")
1336
+ kb_img_status_tb = gr.Textbox(label="Image Operation Status", interactive=False, lines=2)
1337
+ with gr.Column(scale=1):
1338
+ dl_kb_img_btn = gr.Button("⬇️ Download KB as Image", variant="secondary")
1339
+ upload_kb_img_fobj = gr.Image(label="⬆️ Upload or Paste KB Image", type="filepath", sources=["upload", "clipboard"], interactive=not DEMO_MODE)
1340
+ # --- MODIFICATION: This hidden component is the key to the solution ---
1341
+ hidden_downloader = gr.File(visible=False, label="File Downloader")
1342
+
1343
+ # --- Event Wiring ---
1344
  def dyn_upd_model_dd(sel_prov_dyn: str):
1345
+ models_dyn = get_model_display_names_for_provider(sel_prov_dyn); def_model_dyn = get_default_model_display_name_for_provider(sel_prov_dyn)
 
1346
  return gr.Dropdown(choices=models_dyn, value=def_model_dyn, interactive=True)
 
1347
  prov_sel_dd.change(fn=dyn_upd_model_dd, inputs=prov_sel_dd, outputs=model_sel_dd)
1348
 
 
1349
  chat_ins = [user_msg_tb, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
 
1350
  chat_outs = [user_msg_tb, main_chat_disp, agent_stat_tb, detect_out_md, fmt_report_tb, dl_report_btn, rules_disp_ta, mems_disp_json]
 
1351
  chat_event_args = {"fn": handle_gradio_chat_submit, "inputs": chat_ins, "outputs": chat_outs}
1352
+ send_btn.click(**chat_event_args); user_msg_tb.submit(**chat_event_args)
1353
 
 
 
 
 
1354
  dl_rules_btn.click(fn=ui_download_rules_action_fn, inputs=None, outputs=dl_rules_btn, show_progress=False)
1355
+ save_edited_rules_btn.click(fn=save_edited_rules_action_fn, inputs=[rules_disp_ta], outputs=[rules_stat_tb], show_progress="full").then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
1356
+ upload_rules_fobj.upload(fn=ui_upload_rules_action_fn, inputs=[upload_rules_fobj], outputs=[rules_stat_tb], show_progress="full").then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
1357
+ clear_rules_btn.click(fn=lambda: ("All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules."), outputs=rules_stat_tb, show_progress=False).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
1358
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1359
  dl_mems_btn.click(fn=ui_download_memories_action_fn, inputs=None, outputs=dl_mems_btn, show_progress=False)
1360
+ upload_mems_fobj.upload(fn=ui_upload_memories_action_fn, inputs=[upload_mems_fobj], outputs=[mems_stat_tb], show_progress="full").then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json, show_progress=False)
1361
+ clear_mems_btn.click(fn=lambda: ("All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories."), outputs=mems_stat_tb, show_progress=False).then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json, show_progress=False)
1362
+
1363
+ # --- MODIFICATION: Corrected Image KB event wiring ---
1364
+ # 1. The button click now runs the Python function and outputs the file path to the hidden_downloader.
1365
+ dl_kb_img_btn.click(
1366
+ fn=ui_download_kb_as_image_fn,
1367
+ inputs=[kb_img_password_tb],
1368
+ outputs=[hidden_downloader],
1369
  show_progress="full"
1370
+ )
1371
+ # 2. When the hidden_downloader's value changes, it triggers the JavaScript function for download.
1372
+ hidden_downloader.change(
1373
+ fn=None, # No python function needed here
1374
+ inputs=[hidden_downloader],
1375
+ outputs=None,
1376
+ js=js_download_func
1377
+ )
1378
+
1379
+ upload_kb_img_fobj.upload(
1380
+ fn=ui_upload_kb_from_image_fn,
1381
+ inputs=[upload_kb_img_fobj, kb_img_password_tb],
1382
+ outputs=[kb_img_status_tb],
1383
+ show_progress="full"
1384
+ ).then(
1385
+ fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta
1386
+ ).then(
1387
+ fn=ui_refresh_memories_display_fn, outputs=mems_disp_json
1388
+ )
1389
 
 
1390
  if MEMORY_STORAGE_BACKEND == "RAM" and 'save_faiss_sidebar_btn' in locals():
1391
  def save_faiss_action_with_feedback_sidebar_fn():
1392
+ try: save_faiss_indices_to_disk(); gr.Info("Attempted to save FAISS indices to disk.")
1393
+ except Exception as e: logger.error(f"Error saving FAISS indices: {e}", exc_info=True); gr.Error(f"Error saving FAISS indices: {e}")
 
 
 
 
 
1394
  save_faiss_sidebar_btn.click(fn=save_faiss_action_with_feedback_sidebar_fn, inputs=None, outputs=None, show_progress=False)
1395
 
1396
+ app_load_outputs = [agent_stat_tb, rules_disp_ta, mems_disp_json, detect_out_md, fmt_report_tb, dl_report_btn]
 
 
 
 
 
 
 
 
 
1397
  demo.load(fn=app_load_fn, inputs=None, outputs=app_load_outputs, show_progress="full")
1398
 
1399
 
1400
  if __name__ == "__main__":
1401
+ logger.info(f"Starting Gradio AI Research Mega Agent (v9.1 - Correct 1-Click JS Download, Memory: {MEMORY_STORAGE_BACKEND})...")
1402
  app_port = int(os.getenv("GRADIO_PORT", 7860))
1403
  app_server = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
1404
  app_debug = os.getenv("GRADIO_DEBUG", "False").lower() == "true"
1405
  app_share = os.getenv("GRADIO_SHARE", "False").lower() == "true"
1406
  logger.info(f"Launching Gradio server: http://{app_server}:{app_port}. Debug: {app_debug}, Share: {app_share}")
1407
+ demo.queue().launch(server_name=app_server, server_port=app_port, debug=app_debug, share=app_share)
1408
  logger.info("Gradio application shut down.")