Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -200,6 +200,7 @@ Each operation object in the JSON list must have these keys and string values:
|
|
200 |
- Any literal double quote (`"`) character *within* the string content MUST be escaped as `\\"`.
|
201 |
- Any literal backslash (`\\`) character *within* the string content MUST be escaped as `\\\\`.
|
202 |
- Any newline characters *within* the string content MUST be escaped as `\\n`. Avoid literal newlines in JSON string values; use `\\n` instead.
|
|
|
203 |
*Example of correctly escaped insight string in JSON:*
|
204 |
`"insight": "[RESPONSE_PRINCIPLE|0.8] User prefers concise answers, stating: \\"Just the facts!\\". Avoid verbose explanations unless asked.\\nFollow up with a question if appropriate."`
|
205 |
**Your Reflection Process (Consider each step and generate operations accordingly):**
|
@@ -238,6 +239,7 @@ Task: Based on your three-step reflection process (Core Identity, New Learnings,
|
|
238 |
2. **Add New Learnings:** Identify and "add" any distinct new facts, skills, or important user preferences learned from the "Interaction Summary".
|
239 |
3. **Update Existing Principles:** "Update" any non-core principles from "Potentially Relevant Existing Rules" if the "Interaction Summary" provided a clear refinement.
|
240 |
Combine all findings into a single JSON list of operations. If there are multiple distinct changes based on the interaction and existing rules, ensure your list reflects all of them. Output JSON only, adhering to all specified formatting rules.
|
|
|
241 |
"""
|
242 |
insight_msgs = [{"role":"system", "content":insight_sys_prompt}, {"role":"user", "content":insight_user_prompt}]
|
243 |
insight_prov, insight_model_disp = provider, model_disp_name
|
@@ -250,44 +252,80 @@ Combine all findings into a single JSON list of operations. If there are multipl
|
|
250 |
raw_ops_json = "".join(list(call_model_stream(provider=insight_prov, model_display_name=insight_model_disp, messages=insight_msgs, api_key_override=api_key_override, temperature=0.05, max_tokens=2000))).strip()
|
251 |
ops, processed_count = [], 0
|
252 |
json_match_ops = re.search(r"```json\s*(\[.*?\])\s*```", raw_ops_json, re.DOTALL|re.I) or re.search(r"(\[.*?\])", raw_ops_json, re.DOTALL)
|
|
|
253 |
if json_match_ops:
|
254 |
-
try:
|
255 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
256 |
if isinstance(ops, list) and ops:
|
257 |
logger.info(f"DEFERRED [{task_id}]: LLM provided {len(ops)} insight ops.")
|
258 |
-
for op in ops:
|
259 |
-
if not isinstance(op, dict):
|
260 |
-
|
261 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
262 |
if action == "add":
|
263 |
-
success,
|
264 |
if success: processed_count +=1
|
|
|
265 |
elif action == "update":
|
266 |
-
old_insight
|
267 |
-
|
268 |
-
|
|
|
|
|
269 |
if success: processed_count +=1
|
270 |
-
|
271 |
-
|
272 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
273 |
logger.info(f"DEFERRED [{task_id}]: END. Total: {time.time() - start_time:.2f}s")
|
274 |
|
275 |
def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_name: str, sel_model_disp_name: str, ui_api_key: str|None, cust_sys_prompt: str):
|
276 |
global current_chat_session_history
|
277 |
cleared_input, updated_gr_hist, status_txt = "", list(gr_hist_list), "Initializing..."
|
278 |
-
def_detect_out_md = gr.Markdown(visible=False)
|
279 |
-
def_fmt_out_txt = gr.Textbox(value="*Waiting...*", interactive=True)
|
280 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
281 |
|
282 |
if not user_msg_txt.strip():
|
283 |
status_txt = "Error: Empty message."
|
284 |
updated_gr_hist.append((user_msg_txt or "(Empty)", status_txt))
|
285 |
-
# Make sure to yield for all outputs in chat_outs
|
286 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
287 |
return
|
288 |
|
289 |
updated_gr_hist.append((user_msg_txt, "<i>Thinking...</i>"))
|
290 |
-
# Initial yield for chat update
|
291 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
292 |
|
293 |
internal_hist = list(current_chat_session_history); internal_hist.append({"role": "user", "content": user_msg_txt})
|
@@ -317,7 +355,7 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
|
|
317 |
if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
|
318 |
updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg or "(No text)")
|
319 |
|
320 |
-
def_fmt_out_txt = gr.Textbox(value=curr_bot_disp_msg, interactive=True, show_copy_button=True)
|
321 |
|
322 |
if curr_bot_disp_msg and not curr_bot_disp_msg.startswith("Error:"):
|
323 |
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".md", encoding='utf-8') as tmpfile:
|
@@ -328,7 +366,7 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
|
|
328 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
329 |
|
330 |
insights_md_content = "### Insights Considered:\n" + ("\n".join([f"- **[{i.get('type','N/A')}|{i.get('score','N/A')}]** {i.get('text','N/A')[:100]}..." for i in insights_used_parsed[:3]]) if insights_used_parsed else "*None specific.*")
|
331 |
-
def_detect_out_md = gr.Markdown(value=insights_md_content, visible=True
|
332 |
|
333 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
334 |
if upd_type == "final_response_and_insights": break
|
@@ -339,9 +377,9 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
|
|
339 |
updated_gr_hist[-1] = (user_msg_txt, error_message_for_chat)
|
340 |
else:
|
341 |
updated_gr_hist.append((user_msg_txt, error_message_for_chat))
|
342 |
-
def_fmt_out_txt = gr.Textbox(value=error_message_for_chat, interactive=True)
|
343 |
-
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
344 |
-
def_detect_out_md = gr.Markdown(value="*Error processing request.*", visible=True)
|
345 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
346 |
return
|
347 |
|
@@ -356,14 +394,13 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
|
|
356 |
status_txt = "Response complete. Background learning initiated."
|
357 |
else:
|
358 |
status_txt = "Processing finished; no valid response or error occurred."
|
359 |
-
if final_bot_resp_acc.startswith("Error:"):
|
360 |
status_txt = final_bot_resp_acc
|
361 |
if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
|
362 |
-
updated_gr_hist[-1] = (user_msg_txt, final_bot_resp_acc)
|
363 |
def_fmt_out_txt = gr.Textbox(value=final_bot_resp_acc, interactive=True)
|
364 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
365 |
|
366 |
-
# Final yield for this handler
|
367 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
368 |
|
369 |
if temp_dl_file_path and os.path.exists(temp_dl_file_path):
|
@@ -371,19 +408,16 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
|
|
371 |
except Exception as e_unlink: logger.error(f"Error deleting temp download file {temp_dl_file_path}: {e_unlink}")
|
372 |
|
373 |
# --- UI Functions for Rules and Memories ---
|
374 |
-
def
|
375 |
|
376 |
def ui_download_rules_action_fn():
|
377 |
rules_content = "\n\n---\n\n".join(get_all_rules_cached())
|
378 |
if not rules_content.strip():
|
379 |
gr.Warning("No rules to download.")
|
380 |
-
# Return an update to clear any previous file and disable button
|
381 |
return gr.DownloadButton(value=None, interactive=False, label="No Rules")
|
382 |
-
|
383 |
try:
|
384 |
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt", encoding='utf-8') as tmpfile:
|
385 |
tmpfile.write(rules_content)
|
386 |
-
# Gradio's DownloadButton click handler should return the filepath string
|
387 |
return tmpfile.name
|
388 |
except Exception as e:
|
389 |
logger.error(f"Error creating rules download file: {e}")
|
@@ -413,30 +447,26 @@ def ui_upload_rules_action_fn(uploaded_file_obj, progress=gr.Progress()):
|
|
413 |
msg = f"Rules Upload: Processed {total_to_process}. Added: {added_count}, Skipped (duplicates): {skipped_count}, Errors/Invalid: {error_count}."
|
414 |
logger.info(msg); return msg
|
415 |
|
416 |
-
def
|
417 |
|
418 |
def ui_download_memories_action_fn():
|
419 |
-
memories = get_all_memories_cached()
|
420 |
if not memories:
|
421 |
gr.Warning("No memories to download.")
|
422 |
return gr.DownloadButton(value=None, interactive=False, label="No Memories")
|
423 |
|
424 |
jsonl_content = ""
|
425 |
for mem_dict in memories:
|
426 |
-
try:
|
427 |
-
|
428 |
-
except Exception as e:
|
429 |
-
logger.error(f"Error serializing memory for download: {mem_dict}, Error: {e}")
|
430 |
-
# Skip problematic memory for download
|
431 |
|
432 |
if not jsonl_content.strip():
|
433 |
gr.Warning("No valid memories to serialize for download.")
|
434 |
return gr.DownloadButton(value=None, interactive=False, label="No Data")
|
435 |
-
|
436 |
try:
|
437 |
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".jsonl", encoding='utf-8') as tmpfile:
|
438 |
tmpfile.write(jsonl_content)
|
439 |
-
return tmpfile.name
|
440 |
except Exception as e:
|
441 |
logger.error(f"Error creating memories download file: {e}")
|
442 |
gr.Error(f"Failed to prepare memories for download: {e}")
|
@@ -451,25 +481,37 @@ def ui_upload_memories_action_fn(uploaded_file_obj, progress=gr.Progress()):
|
|
451 |
added_count, format_error_count, save_error_count = 0,0,0
|
452 |
memory_objects_to_process = []
|
453 |
try:
|
|
|
454 |
parsed_json = json.loads(content)
|
455 |
-
|
456 |
-
|
|
|
|
|
|
|
457 |
for line in content.splitlines():
|
458 |
if line.strip():
|
459 |
try: memory_objects_to_process.append(json.loads(line))
|
460 |
-
except: format_error_count += 1
|
461 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
462 |
total_to_process = len(memory_objects_to_process)
|
463 |
if total_to_process == 0: return "No memory objects to process (after parsing)."
|
|
|
464 |
progress(0, desc="Starting memories upload...")
|
465 |
for idx, mem_data in enumerate(memory_objects_to_process):
|
466 |
if isinstance(mem_data, dict) and all(k in mem_data for k in ["user_input", "bot_response", "metrics"]):
|
467 |
success, _ = add_memory_entry(mem_data["user_input"], mem_data["metrics"], mem_data["bot_response"])
|
468 |
if success: added_count += 1
|
469 |
else: save_error_count += 1
|
470 |
-
else: format_error_count += 1
|
471 |
progress((idx + 1) / total_to_process, desc=f"Processed {idx+1}/{total_to_process} memories...")
|
472 |
-
|
|
|
473 |
logger.info(msg); return msg
|
474 |
|
475 |
|
@@ -556,30 +598,31 @@ with gr.Blocks(
|
|
556 |
scale=7, lines=1, max_lines=3
|
557 |
)
|
558 |
send_btn = gr.Button("Send", variant="primary", scale=1, min_width=100)
|
559 |
-
with gr.Accordion("📝 Detailed Response", open=False):
|
560 |
fmt_report_tb = gr.Textbox(
|
561 |
label="Full AI Response", lines=8, interactive=True, show_copy_button=True
|
562 |
)
|
563 |
dl_report_btn = gr.DownloadButton(
|
564 |
-
"Download Report", value=None, interactive=False, visible=False
|
565 |
)
|
566 |
-
|
567 |
|
568 |
with gr.TabItem("🧠 Knowledge Base"):
|
569 |
with gr.Row(equal_height=True):
|
570 |
with gr.Column():
|
571 |
gr.Markdown("### 📜 Rules Management")
|
572 |
rules_disp_ta = gr.TextArea(
|
573 |
-
label=
|
|
|
|
|
574 |
)
|
|
|
|
|
575 |
with gr.Row(variant="compact"):
|
576 |
-
|
577 |
-
|
578 |
-
with gr.Row(variant="compact"):
|
579 |
-
dl_rules_btn = gr.DownloadButton("⬇️ Download Rules", value=None) # Add download button
|
580 |
-
clear_rules_btn = gr.Button("🗑️ Clear Rules", variant="stop")
|
581 |
upload_rules_fobj = gr.File(
|
582 |
-
label="Upload Rules File (.txt, .jsonl)",
|
583 |
file_types=[".txt", ".jsonl"]
|
584 |
)
|
585 |
rules_stat_tb = gr.Textbox(
|
@@ -589,16 +632,15 @@ with gr.Blocks(
|
|
589 |
with gr.Column():
|
590 |
gr.Markdown("### 📚 Memories Management")
|
591 |
mems_disp_json = gr.JSON(
|
592 |
-
label=
|
593 |
)
|
|
|
594 |
with gr.Row(variant="compact"):
|
595 |
-
|
596 |
-
|
597 |
-
with gr.Row(variant="compact"):
|
598 |
-
clear_mems_btn = gr.Button("🗑️ Clear Memories", variant="stop")
|
599 |
upload_mems_fobj = gr.File(
|
600 |
-
label="Upload Memories File (.jsonl)",
|
601 |
-
file_types=[".jsonl", ".json"]
|
602 |
)
|
603 |
mems_stat_tb = gr.Textbox(
|
604 |
label="Memories Status", interactive=False, lines=1, elem_classes=["status-text"]
|
@@ -614,84 +656,106 @@ with gr.Blocks(
|
|
614 |
chat_ins = [user_msg_tb, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
|
615 |
chat_outs = [user_msg_tb, main_chat_disp, agent_stat_tb, detect_out_md, fmt_report_tb, dl_report_btn]
|
616 |
|
617 |
-
# Chat submission events
|
618 |
chat_event_args = {"fn": handle_gradio_chat_submit, "inputs": chat_ins, "outputs": chat_outs}
|
619 |
|
620 |
send_btn_click_event = send_btn.click(**chat_event_args)
|
621 |
user_msg_submit_event = user_msg_tb.submit(**chat_event_args)
|
622 |
|
623 |
-
# Chain UI refreshes for rules and memories after chat interaction
|
624 |
for event in [send_btn_click_event, user_msg_submit_event]:
|
625 |
-
event.then(fn=
|
626 |
-
event.then(fn=
|
627 |
-
|
628 |
|
629 |
# Rules Management events
|
630 |
-
view_rules_btn.click(fn=ui_view_rules_action_fn, outputs=rules_disp_ta)
|
631 |
dl_rules_btn.click(fn=ui_download_rules_action_fn, inputs=None, outputs=dl_rules_btn)
|
632 |
|
633 |
-
|
634 |
def save_edited_rules_action_fn(edited_rules_text: str, progress=gr.Progress()):
|
635 |
if not edited_rules_text.strip():
|
636 |
return "No rules text to save."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
637 |
potential_rules = edited_rules_text.split("\n\n---\n\n")
|
638 |
-
if len(potential_rules) == 1 and "\n" in edited_rules_text:
|
639 |
potential_rules = [r.strip() for r in edited_rules_text.splitlines() if r.strip()]
|
|
|
640 |
if not potential_rules:
|
641 |
return "No rules found to process from editor."
|
|
|
642 |
added, skipped, errors = 0, 0, 0
|
643 |
total = len(potential_rules)
|
644 |
progress(0, desc=f"Saving {total} rules from editor...")
|
645 |
-
|
646 |
-
|
647 |
-
|
648 |
-
|
649 |
-
|
650 |
-
|
651 |
-
|
652 |
-
|
653 |
-
|
654 |
-
|
655 |
-
|
656 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
657 |
|
658 |
save_edited_rules_btn.click(
|
659 |
fn=save_edited_rules_action_fn,
|
660 |
inputs=[rules_disp_ta],
|
661 |
outputs=[rules_stat_tb],
|
662 |
show_progress="full"
|
663 |
-
).then(fn=
|
664 |
|
665 |
upload_rules_fobj.upload(
|
666 |
fn=ui_upload_rules_action_fn,
|
667 |
inputs=[upload_rules_fobj],
|
668 |
outputs=[rules_stat_tb],
|
669 |
show_progress="full"
|
670 |
-
).then(fn=
|
671 |
|
672 |
clear_rules_btn.click(
|
673 |
fn=lambda: ("All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules."),
|
674 |
outputs=rules_stat_tb
|
675 |
-
).then(fn=
|
676 |
|
677 |
# Memories Management events
|
678 |
-
view_mems_btn.click(fn=ui_view_memories_action_fn, outputs=mems_disp_json)
|
679 |
dl_mems_btn.click(fn=ui_download_memories_action_fn, inputs=None, outputs=dl_mems_btn)
|
680 |
|
681 |
-
|
682 |
upload_mems_fobj.upload(
|
683 |
fn=ui_upload_memories_action_fn,
|
684 |
inputs=[upload_mems_fobj],
|
685 |
outputs=[mems_stat_tb],
|
686 |
show_progress="full"
|
687 |
-
).then(fn=
|
688 |
|
689 |
clear_mems_btn.click(
|
690 |
fn=lambda: ("All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories."),
|
691 |
outputs=mems_stat_tb
|
692 |
-
).then(fn=
|
693 |
|
694 |
-
# Save FAISS for RAM backend
|
695 |
if MEMORY_STORAGE_BACKEND == "RAM" and 'save_faiss_sidebar_btn' in locals():
|
696 |
def save_faiss_action_with_feedback_sidebar_fn():
|
697 |
save_faiss_indices_to_disk()
|
@@ -702,14 +766,33 @@ with gr.Blocks(
|
|
702 |
initialize_memory_system()
|
703 |
logger.info("App loaded. Memory system initialized.")
|
704 |
backend_status = "AI Systems Initialized. Ready."
|
705 |
-
rules_on_load =
|
706 |
-
mems_on_load =
|
707 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
708 |
|
709 |
-
demo.load(fn=app_load_fn, inputs=None, outputs=[agent_stat_tb, rules_disp_ta, mems_disp_json])
|
710 |
|
711 |
if __name__ == "__main__":
|
712 |
-
logger.info(f"Starting Gradio AI Research Mega Agent (
|
713 |
app_port = int(os.getenv("GRADIO_PORT", 7860))
|
714 |
app_server = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
|
715 |
app_debug = os.getenv("GRADIO_DEBUG", "False").lower() == "true"
|
|
|
200 |
- Any literal double quote (`"`) character *within* the string content MUST be escaped as `\\"`.
|
201 |
- Any literal backslash (`\\`) character *within* the string content MUST be escaped as `\\\\`.
|
202 |
- Any newline characters *within* the string content MUST be escaped as `\\n`. Avoid literal newlines in JSON string values; use `\\n` instead.
|
203 |
+
THIS IS ESPECIALLY IMPORTANT FOR THE 'insight' FIELD, as the insight text itself can be multi-line. Each internal newline in the original text MUST be represented as `\\n` in the JSON string value.
|
204 |
*Example of correctly escaped insight string in JSON:*
|
205 |
`"insight": "[RESPONSE_PRINCIPLE|0.8] User prefers concise answers, stating: \\"Just the facts!\\". Avoid verbose explanations unless asked.\\nFollow up with a question if appropriate."`
|
206 |
**Your Reflection Process (Consider each step and generate operations accordingly):**
|
|
|
239 |
2. **Add New Learnings:** Identify and "add" any distinct new facts, skills, or important user preferences learned from the "Interaction Summary".
|
240 |
3. **Update Existing Principles:** "Update" any non-core principles from "Potentially Relevant Existing Rules" if the "Interaction Summary" provided a clear refinement.
|
241 |
Combine all findings into a single JSON list of operations. If there are multiple distinct changes based on the interaction and existing rules, ensure your list reflects all of them. Output JSON only, adhering to all specified formatting rules.
|
242 |
+
**ULTRA-IMPORTANT FINAL REMINDER: YOUR ENTIRE RESPONSE MUST BE A SINGLE, VALID JSON LIST. DOUBLE-CHECK ALL STRING VALUES FOR CORRECTLY ESCAPED NEWLINES (\\n) AND QUOTES (\\\") BEFORE OUTPUTTING. INVALID JSON WILL BE REJECTED.**
|
243 |
"""
|
244 |
insight_msgs = [{"role":"system", "content":insight_sys_prompt}, {"role":"user", "content":insight_user_prompt}]
|
245 |
insight_prov, insight_model_disp = provider, model_disp_name
|
|
|
252 |
raw_ops_json = "".join(list(call_model_stream(provider=insight_prov, model_display_name=insight_model_disp, messages=insight_msgs, api_key_override=api_key_override, temperature=0.05, max_tokens=2000))).strip()
|
253 |
ops, processed_count = [], 0
|
254 |
json_match_ops = re.search(r"```json\s*(\[.*?\])\s*```", raw_ops_json, re.DOTALL|re.I) or re.search(r"(\[.*?\])", raw_ops_json, re.DOTALL)
|
255 |
+
|
256 |
if json_match_ops:
|
257 |
+
try:
|
258 |
+
ops_json_str = json_match_ops.group(1)
|
259 |
+
ops = json.loads(ops_json_str)
|
260 |
+
except Exception as e:
|
261 |
+
logger.error(f"DEFERRED [{task_id}]: JSON ops parse error: {e}. Raw content parsed: {ops_json_str[:500]}")
|
262 |
+
else:
|
263 |
+
logger.info(f"DEFERRED [{task_id}]: No JSON list structure found in LLM output. Raw output (first 500 chars): {raw_ops_json[:500]}")
|
264 |
+
|
265 |
if isinstance(ops, list) and ops:
|
266 |
logger.info(f"DEFERRED [{task_id}]: LLM provided {len(ops)} insight ops.")
|
267 |
+
for op_idx, op in enumerate(ops):
|
268 |
+
if not isinstance(op, dict):
|
269 |
+
logger.warning(f"DEFERRED [{task_id}]: Op {op_idx}: Skipped non-dict item in ops list: {op}")
|
270 |
+
continue
|
271 |
+
|
272 |
+
action = op.get("action","").lower()
|
273 |
+
insight_text = op.get("insight","") # Keep as is initially for logging
|
274 |
+
old_insight = op.get("old_insight_to_replace","") # Keep as is initially
|
275 |
+
|
276 |
+
if not isinstance(insight_text, str):
|
277 |
+
logger.warning(f"DEFERRED [{task_id}]: Op {op_idx}: Skipped op due to non-string insight_text: {op}")
|
278 |
+
continue
|
279 |
+
insight_text = insight_text.strip() # Now strip
|
280 |
+
|
281 |
+
if not isinstance(old_insight, str) and old_insight is not None : # Allow None for old_insight
|
282 |
+
logger.warning(f"DEFERRED [{task_id}]: Op {op_idx}: Skipped op due to non-string old_insight_to_replace: {op}")
|
283 |
+
continue
|
284 |
+
if old_insight is not None:
|
285 |
+
old_insight = old_insight.strip() # Strip if not None
|
286 |
+
|
287 |
+
if not insight_text or not re.match(r"\[(CORE_RULE|RESPONSE_PRINCIPLE|BEHAVIORAL_ADJUSTMENT|GENERAL_LEARNING)\|([\d\.]+?)\]", insight_text, re.I|re.DOTALL):
|
288 |
+
logger.warning(f"DEFERRED [{task_id}]: Op {op_idx}: Skipped op due to invalid/empty insight_text format: '{insight_text[:100]}...' from op: {op}")
|
289 |
+
continue
|
290 |
+
|
291 |
if action == "add":
|
292 |
+
success, status_msg = add_rule_entry(insight_text)
|
293 |
if success: processed_count +=1
|
294 |
+
else: logger.warning(f"DEFERRED [{task_id}]: Op {op_idx} (add): Failed to add rule '{insight_text[:50]}...'. Status: {status_msg}")
|
295 |
elif action == "update":
|
296 |
+
if old_insight and old_insight != insight_text:
|
297 |
+
remove_success = remove_rule_entry(old_insight)
|
298 |
+
if not remove_success:
|
299 |
+
logger.warning(f"DEFERRED [{task_id}]: Op {op_idx} (update): Failed to remove old rule '{old_insight[:50]}...' before adding new.")
|
300 |
+
success, status_msg = add_rule_entry(insight_text)
|
301 |
if success: processed_count +=1
|
302 |
+
else: logger.warning(f"DEFERRED [{task_id}]: Op {op_idx} (update): Failed to add updated rule '{insight_text[:50]}...'. Status: {status_msg}")
|
303 |
+
else:
|
304 |
+
logger.warning(f"DEFERRED [{task_id}]: Op {op_idx}: Skipped op due to unknown action '{action}': {op}")
|
305 |
+
|
306 |
+
logger.info(f"DEFERRED [{task_id}]: Processed {processed_count} insight ops out of {len(ops)} received.")
|
307 |
+
elif not json_match_ops : # Already logged if no JSON structure found
|
308 |
+
pass
|
309 |
+
else: # json_match_ops was true, but ops list is empty or not a list (e.g. parsing error led to ops not being a list)
|
310 |
+
logger.info(f"DEFERRED [{task_id}]: No valid list of insight ops from LLM after parsing. Raw match (first 500 chars): {json_match_ops.group(1)[:500] if json_match_ops else 'N/A'}")
|
311 |
+
|
312 |
+
except Exception as e: logger.error(f"DEFERRED [{task_id}]: CRITICAL ERROR in deferred task: {e}", exc_info=True)
|
313 |
logger.info(f"DEFERRED [{task_id}]: END. Total: {time.time() - start_time:.2f}s")
|
314 |
|
315 |
def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_name: str, sel_model_disp_name: str, ui_api_key: str|None, cust_sys_prompt: str):
|
316 |
global current_chat_session_history
|
317 |
cleared_input, updated_gr_hist, status_txt = "", list(gr_hist_list), "Initializing..."
|
318 |
+
def_detect_out_md = gr.Markdown(visible=False)
|
319 |
+
def_fmt_out_txt = gr.Textbox(value="*Waiting...*", interactive=True)
|
320 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
321 |
|
322 |
if not user_msg_txt.strip():
|
323 |
status_txt = "Error: Empty message."
|
324 |
updated_gr_hist.append((user_msg_txt or "(Empty)", status_txt))
|
|
|
325 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
326 |
return
|
327 |
|
328 |
updated_gr_hist.append((user_msg_txt, "<i>Thinking...</i>"))
|
|
|
329 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
330 |
|
331 |
internal_hist = list(current_chat_session_history); internal_hist.append({"role": "user", "content": user_msg_txt})
|
|
|
355 |
if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
|
356 |
updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg or "(No text)")
|
357 |
|
358 |
+
def_fmt_out_txt = gr.Textbox(value=curr_bot_disp_msg, interactive=True, show_copy_button=True)
|
359 |
|
360 |
if curr_bot_disp_msg and not curr_bot_disp_msg.startswith("Error:"):
|
361 |
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".md", encoding='utf-8') as tmpfile:
|
|
|
366 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
367 |
|
368 |
insights_md_content = "### Insights Considered:\n" + ("\n".join([f"- **[{i.get('type','N/A')}|{i.get('score','N/A')}]** {i.get('text','N/A')[:100]}..." for i in insights_used_parsed[:3]]) if insights_used_parsed else "*None specific.*")
|
369 |
+
def_detect_out_md = gr.Markdown(value=insights_md_content, visible=True if insights_used_parsed else False)
|
370 |
|
371 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
372 |
if upd_type == "final_response_and_insights": break
|
|
|
377 |
updated_gr_hist[-1] = (user_msg_txt, error_message_for_chat)
|
378 |
else:
|
379 |
updated_gr_hist.append((user_msg_txt, error_message_for_chat))
|
380 |
+
def_fmt_out_txt = gr.Textbox(value=error_message_for_chat, interactive=True)
|
381 |
+
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
382 |
+
def_detect_out_md = gr.Markdown(value="*Error processing request.*", visible=True)
|
383 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
384 |
return
|
385 |
|
|
|
394 |
status_txt = "Response complete. Background learning initiated."
|
395 |
else:
|
396 |
status_txt = "Processing finished; no valid response or error occurred."
|
397 |
+
if final_bot_resp_acc.startswith("Error:"):
|
398 |
status_txt = final_bot_resp_acc
|
399 |
if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
|
400 |
+
updated_gr_hist[-1] = (user_msg_txt, final_bot_resp_acc)
|
401 |
def_fmt_out_txt = gr.Textbox(value=final_bot_resp_acc, interactive=True)
|
402 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
403 |
|
|
|
404 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
405 |
|
406 |
if temp_dl_file_path and os.path.exists(temp_dl_file_path):
|
|
|
408 |
except Exception as e_unlink: logger.error(f"Error deleting temp download file {temp_dl_file_path}: {e_unlink}")
|
409 |
|
410 |
# --- UI Functions for Rules and Memories ---
|
411 |
+
def ui_refresh_rules_display_fn(): return "\n\n---\n\n".join(get_all_rules_cached()) or "No rules found."
|
412 |
|
413 |
def ui_download_rules_action_fn():
|
414 |
rules_content = "\n\n---\n\n".join(get_all_rules_cached())
|
415 |
if not rules_content.strip():
|
416 |
gr.Warning("No rules to download.")
|
|
|
417 |
return gr.DownloadButton(value=None, interactive=False, label="No Rules")
|
|
|
418 |
try:
|
419 |
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt", encoding='utf-8') as tmpfile:
|
420 |
tmpfile.write(rules_content)
|
|
|
421 |
return tmpfile.name
|
422 |
except Exception as e:
|
423 |
logger.error(f"Error creating rules download file: {e}")
|
|
|
447 |
msg = f"Rules Upload: Processed {total_to_process}. Added: {added_count}, Skipped (duplicates): {skipped_count}, Errors/Invalid: {error_count}."
|
448 |
logger.info(msg); return msg
|
449 |
|
450 |
+
def ui_refresh_memories_display_fn(): return get_all_memories_cached() or []
|
451 |
|
452 |
def ui_download_memories_action_fn():
|
453 |
+
memories = get_all_memories_cached()
|
454 |
if not memories:
|
455 |
gr.Warning("No memories to download.")
|
456 |
return gr.DownloadButton(value=None, interactive=False, label="No Memories")
|
457 |
|
458 |
jsonl_content = ""
|
459 |
for mem_dict in memories:
|
460 |
+
try: jsonl_content += json.dumps(mem_dict) + "\n"
|
461 |
+
except Exception as e: logger.error(f"Error serializing memory for download: {mem_dict}, Error: {e}")
|
|
|
|
|
|
|
462 |
|
463 |
if not jsonl_content.strip():
|
464 |
gr.Warning("No valid memories to serialize for download.")
|
465 |
return gr.DownloadButton(value=None, interactive=False, label="No Data")
|
|
|
466 |
try:
|
467 |
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".jsonl", encoding='utf-8') as tmpfile:
|
468 |
tmpfile.write(jsonl_content)
|
469 |
+
return tmpfile.name
|
470 |
except Exception as e:
|
471 |
logger.error(f"Error creating memories download file: {e}")
|
472 |
gr.Error(f"Failed to prepare memories for download: {e}")
|
|
|
481 |
added_count, format_error_count, save_error_count = 0,0,0
|
482 |
memory_objects_to_process = []
|
483 |
try:
|
484 |
+
# Try parsing as a single JSON list first
|
485 |
parsed_json = json.loads(content)
|
486 |
+
if isinstance(parsed_json, list):
|
487 |
+
memory_objects_to_process = parsed_json
|
488 |
+
else: # If it's a single object, wrap it in a list
|
489 |
+
memory_objects_to_process = [parsed_json]
|
490 |
+
except json.JSONDecodeError: # If not a single JSON list, try JSONL
|
491 |
for line in content.splitlines():
|
492 |
if line.strip():
|
493 |
try: memory_objects_to_process.append(json.loads(line))
|
494 |
+
except json.JSONDecodeError: format_error_count += 1
|
495 |
+
|
496 |
+
if not memory_objects_to_process and format_error_count > 0 and not content.strip().startswith("["):
|
497 |
+
# If many format errors and not starting like a list, it was probably meant to be JSONL but all lines failed
|
498 |
+
return f"Memories Upload: File does not seem to be a valid JSON array or JSONL. Format errors on all lines attempted for JSONL."
|
499 |
+
elif not memory_objects_to_process:
|
500 |
+
return "No valid memory objects found in the uploaded file."
|
501 |
+
|
502 |
total_to_process = len(memory_objects_to_process)
|
503 |
if total_to_process == 0: return "No memory objects to process (after parsing)."
|
504 |
+
|
505 |
progress(0, desc="Starting memories upload...")
|
506 |
for idx, mem_data in enumerate(memory_objects_to_process):
|
507 |
if isinstance(mem_data, dict) and all(k in mem_data for k in ["user_input", "bot_response", "metrics"]):
|
508 |
success, _ = add_memory_entry(mem_data["user_input"], mem_data["metrics"], mem_data["bot_response"])
|
509 |
if success: added_count += 1
|
510 |
else: save_error_count += 1
|
511 |
+
else: format_error_count += 1 # Count errors if structure is wrong even after successful line parse
|
512 |
progress((idx + 1) / total_to_process, desc=f"Processed {idx+1}/{total_to_process} memories...")
|
513 |
+
|
514 |
+
msg = f"Memories Upload: Processed {total_to_process} objects. Added: {added_count}, Format Errors (incl. structure issues): {format_error_count}, Save Errors: {save_error_count}."
|
515 |
logger.info(msg); return msg
|
516 |
|
517 |
|
|
|
598 |
scale=7, lines=1, max_lines=3
|
599 |
)
|
600 |
send_btn = gr.Button("Send", variant="primary", scale=1, min_width=100)
|
601 |
+
with gr.Accordion("📝 Detailed Response & Insights", open=False):
|
602 |
fmt_report_tb = gr.Textbox(
|
603 |
label="Full AI Response", lines=8, interactive=True, show_copy_button=True
|
604 |
)
|
605 |
dl_report_btn = gr.DownloadButton(
|
606 |
+
"Download Report", value=None, interactive=False, visible=False
|
607 |
)
|
608 |
+
detect_out_md = gr.Markdown(visible=False)
|
609 |
|
610 |
with gr.TabItem("🧠 Knowledge Base"):
|
611 |
with gr.Row(equal_height=True):
|
612 |
with gr.Column():
|
613 |
gr.Markdown("### 📜 Rules Management")
|
614 |
rules_disp_ta = gr.TextArea(
|
615 |
+
label="Current Rules (Read-only, Edit via Upload/Save)", lines=10,
|
616 |
+
placeholder="Rules will appear here. Use 'Save Edited Text' or 'Upload File' to modify.",
|
617 |
+
interactive=True # Keep interactive for copy-pasting, but primary edit via save button
|
618 |
)
|
619 |
+
gr.Markdown("To edit rules, modify the text above and click 'Save Edited Text', or upload a new file.")
|
620 |
+
save_edited_rules_btn = gr.Button("💾 Save Edited Text", variant="primary")
|
621 |
with gr.Row(variant="compact"):
|
622 |
+
dl_rules_btn = gr.DownloadButton("⬇️ Download Rules", value=None)
|
623 |
+
clear_rules_btn = gr.Button("🗑️ Clear All Rules", variant="stop")
|
|
|
|
|
|
|
624 |
upload_rules_fobj = gr.File(
|
625 |
+
label="Upload Rules File (.txt with '---' separators, or .jsonl of rule strings)",
|
626 |
file_types=[".txt", ".jsonl"]
|
627 |
)
|
628 |
rules_stat_tb = gr.Textbox(
|
|
|
632 |
with gr.Column():
|
633 |
gr.Markdown("### 📚 Memories Management")
|
634 |
mems_disp_json = gr.JSON(
|
635 |
+
label="Current Memories (Read-only)", value=[]
|
636 |
)
|
637 |
+
gr.Markdown("To add memories, upload a .jsonl or .json file.")
|
638 |
with gr.Row(variant="compact"):
|
639 |
+
dl_mems_btn = gr.DownloadButton("⬇️ Download Memories", value=None)
|
640 |
+
clear_mems_btn = gr.Button("🗑️ Clear All Memories", variant="stop")
|
|
|
|
|
641 |
upload_mems_fobj = gr.File(
|
642 |
+
label="Upload Memories File (.jsonl of memory objects, or .json array of objects)",
|
643 |
+
file_types=[".jsonl", ".json"]
|
644 |
)
|
645 |
mems_stat_tb = gr.Textbox(
|
646 |
label="Memories Status", interactive=False, lines=1, elem_classes=["status-text"]
|
|
|
656 |
chat_ins = [user_msg_tb, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
|
657 |
chat_outs = [user_msg_tb, main_chat_disp, agent_stat_tb, detect_out_md, fmt_report_tb, dl_report_btn]
|
658 |
|
|
|
659 |
chat_event_args = {"fn": handle_gradio_chat_submit, "inputs": chat_ins, "outputs": chat_outs}
|
660 |
|
661 |
send_btn_click_event = send_btn.click(**chat_event_args)
|
662 |
user_msg_submit_event = user_msg_tb.submit(**chat_event_args)
|
663 |
|
|
|
664 |
for event in [send_btn_click_event, user_msg_submit_event]:
|
665 |
+
event.then(fn=ui_refresh_rules_display_fn, inputs=None, outputs=rules_disp_ta)
|
666 |
+
event.then(fn=ui_refresh_memories_display_fn, inputs=None, outputs=mems_disp_json)
|
|
|
667 |
|
668 |
# Rules Management events
|
|
|
669 |
dl_rules_btn.click(fn=ui_download_rules_action_fn, inputs=None, outputs=dl_rules_btn)
|
670 |
|
|
|
671 |
def save_edited_rules_action_fn(edited_rules_text: str, progress=gr.Progress()):
|
672 |
if not edited_rules_text.strip():
|
673 |
return "No rules text to save."
|
674 |
+
|
675 |
+
# Clear existing rules before adding from textarea to avoid duplicates if user edits and saves repeatedly
|
676 |
+
# Alternatively, implement a more sophisticated diff/update, but clearing and re-adding is simpler for now.
|
677 |
+
# For now, let's assume add_rule_entry handles duplicates by skipping.
|
678 |
+
# clear_all_rules_data_backend() # Option: clear all first
|
679 |
+
# logger.info("Cleared all rules before saving edited text.")
|
680 |
+
|
681 |
potential_rules = edited_rules_text.split("\n\n---\n\n")
|
682 |
+
if len(potential_rules) == 1 and "\n" in edited_rules_text: # Fallback for simple newline separation
|
683 |
potential_rules = [r.strip() for r in edited_rules_text.splitlines() if r.strip()]
|
684 |
+
|
685 |
if not potential_rules:
|
686 |
return "No rules found to process from editor."
|
687 |
+
|
688 |
added, skipped, errors = 0, 0, 0
|
689 |
total = len(potential_rules)
|
690 |
progress(0, desc=f"Saving {total} rules from editor...")
|
691 |
+
|
692 |
+
# To prevent duplicates from the text area itself if it contained them,
|
693 |
+
# process unique rules from the input text.
|
694 |
+
unique_rules_to_process = sorted(list(set(filter(None, [r.strip() for r in potential_rules]))))
|
695 |
+
|
696 |
+
# We need to identify rules to remove if they are no longer in the text area
|
697 |
+
# and rules to add if they are new. This is complex.
|
698 |
+
# Simpler: clear all existing rules, then add all from text area.
|
699 |
+
# This is destructive if the user only wanted to add/modify a few.
|
700 |
+
# For now, let's stick to `add_rule_entry` which skips duplicates.
|
701 |
+
# The user should be aware that "Save Edited Text" adds new rules from the text area
|
702 |
+
# and does not remove rules that are no longer present in the text area.
|
703 |
+
# To remove, they should use "Clear All Rules" or upload a new definitive list.
|
704 |
+
|
705 |
+
# For a "Save Edited Text" to truly reflect the text area as the source of truth:
|
706 |
+
# 1. Get current rules from text area.
|
707 |
+
# 2. Get rules currently in the system.
|
708 |
+
# 3. Rules to add = (rules in text area) - (rules in system)
|
709 |
+
# 4. Rules to remove = (rules in system) - (rules in text area)
|
710 |
+
# This is too complex for this iteration. The current add_rule_entry handles duplicates.
|
711 |
+
# Let's make `save_edited_rules_action_fn` clear and re-add for simplicity if that's desired.
|
712 |
+
# OR, it just adds new ones and updates existing ones if `add_rule_entry` can be made to update.
|
713 |
+
# Current `add_rule_entry` adds if not exact duplicate.
|
714 |
+
# Sticking to simpler "add if not present":
|
715 |
+
|
716 |
+
for idx, rule_text in enumerate(unique_rules_to_process):
|
717 |
+
success, status_msg = add_rule_entry(rule_text) # add_rule_entry handles duplicates
|
718 |
+
if success: added += 1
|
719 |
+
elif status_msg == "duplicate": skipped += 1
|
720 |
+
else: errors += 1
|
721 |
+
progress((idx + 1) / len(unique_rules_to_process), desc=f"Processed {idx+1}/{len(unique_rules_to_process)} rules...")
|
722 |
+
|
723 |
+
return f"Editor Save: Added: {added}, Skipped (duplicates): {skipped}, Errors/Invalid: {errors} from {len(unique_rules_to_process)} unique rules in text."
|
724 |
|
725 |
save_edited_rules_btn.click(
|
726 |
fn=save_edited_rules_action_fn,
|
727 |
inputs=[rules_disp_ta],
|
728 |
outputs=[rules_stat_tb],
|
729 |
show_progress="full"
|
730 |
+
).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta)
|
731 |
|
732 |
upload_rules_fobj.upload(
|
733 |
fn=ui_upload_rules_action_fn,
|
734 |
inputs=[upload_rules_fobj],
|
735 |
outputs=[rules_stat_tb],
|
736 |
show_progress="full"
|
737 |
+
).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta)
|
738 |
|
739 |
clear_rules_btn.click(
|
740 |
fn=lambda: ("All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules."),
|
741 |
outputs=rules_stat_tb
|
742 |
+
).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta)
|
743 |
|
744 |
# Memories Management events
|
|
|
745 |
dl_mems_btn.click(fn=ui_download_memories_action_fn, inputs=None, outputs=dl_mems_btn)
|
746 |
|
|
|
747 |
upload_mems_fobj.upload(
|
748 |
fn=ui_upload_memories_action_fn,
|
749 |
inputs=[upload_mems_fobj],
|
750 |
outputs=[mems_stat_tb],
|
751 |
show_progress="full"
|
752 |
+
).then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json)
|
753 |
|
754 |
clear_mems_btn.click(
|
755 |
fn=lambda: ("All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories."),
|
756 |
outputs=mems_stat_tb
|
757 |
+
).then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json)
|
758 |
|
|
|
759 |
if MEMORY_STORAGE_BACKEND == "RAM" and 'save_faiss_sidebar_btn' in locals():
|
760 |
def save_faiss_action_with_feedback_sidebar_fn():
|
761 |
save_faiss_indices_to_disk()
|
|
|
766 |
initialize_memory_system()
|
767 |
logger.info("App loaded. Memory system initialized.")
|
768 |
backend_status = "AI Systems Initialized. Ready."
|
769 |
+
rules_on_load = ui_refresh_rules_display_fn()
|
770 |
+
mems_on_load = ui_refresh_memories_display_fn()
|
771 |
+
# Initial population of display areas
|
772 |
+
return backend_status, rules_on_load, mems_on_load, gr.Markdown(visible=False), gr.Textbox(value="*Waiting...*", interactive=True), gr.DownloadButton(interactive=False, value=None, visible=False)
|
773 |
+
|
774 |
+
|
775 |
+
# Ensure all outputs for app_load_fn match the number of output components
|
776 |
+
# chat_outs are: [user_msg_tb, main_chat_disp, agent_stat_tb, detect_out_md, fmt_report_tb, dl_report_btn]
|
777 |
+
# demo.load outputs are: [agent_stat_tb, rules_disp_ta, mems_disp_json]
|
778 |
+
# Let's adjust app_load_fn to return values for the components it's supposed to update.
|
779 |
+
|
780 |
+
initial_load_outputs = [
|
781 |
+
agent_stat_tb,
|
782 |
+
rules_disp_ta,
|
783 |
+
mems_disp_json,
|
784 |
+
# Outputs for chat area that might need reset/initial state, matching chat_outs if possible
|
785 |
+
# user_msg_tb (cleared), main_chat_disp (cleared), agent_stat_tb (updated by app_load)
|
786 |
+
# detect_out_md, fmt_report_tb, dl_report_btn
|
787 |
+
detect_out_md, # from chat_outs
|
788 |
+
fmt_report_tb, # from chat_outs
|
789 |
+
dl_report_btn # from chat_outs
|
790 |
+
]
|
791 |
+
demo.load(fn=app_load_fn, inputs=None, outputs=initial_load_outputs)
|
792 |
|
|
|
793 |
|
794 |
if __name__ == "__main__":
|
795 |
+
logger.info(f"Starting Gradio AI Research Mega Agent (v6.0 - JSON Fix & UI Upload Simplification, Memory: {MEMORY_STORAGE_BACKEND})...")
|
796 |
app_port = int(os.getenv("GRADIO_PORT", 7860))
|
797 |
app_server = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
|
798 |
app_debug = os.getenv("GRADIO_DEBUG", "False").lower() == "true"
|