Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ import os
|
|
3 |
import json
|
4 |
import re
|
5 |
import logging
|
6 |
-
import threading
|
7 |
from datetime import datetime
|
8 |
from dotenv import load_dotenv
|
9 |
import gradio as gr
|
@@ -178,13 +178,15 @@ def process_user_interaction_gradio(user_input: str, provider_name: str, model_d
|
|
178 |
logger.info(f"PUI_GRADIO [{request_id}]: Finished. Total: {time.time() - process_start_time:.2f}s. Resp len: {len(final_bot_text)}")
|
179 |
yield "final_response_and_insights", {"response": final_bot_text, "insights_used": parsed_initial_insights_list}
|
180 |
|
181 |
-
|
182 |
-
|
183 |
-
|
|
|
|
|
184 |
try:
|
185 |
metrics = generate_interaction_metrics(user_input, bot_response, provider, model_disp_name, api_key_override)
|
186 |
-
logger.info(f"
|
187 |
-
add_memory_entry(user_input, metrics, bot_response)
|
188 |
|
189 |
summary = f"User:\"{user_input}\"\nAI:\"{bot_response}\"\nMetrics(takeaway):{metrics.get('takeaway','N/A')},Success:{metrics.get('response_success_score','N/A')}"
|
190 |
existing_rules_ctx = "\n".join([f"- \"{r}\"" for r in retrieve_rules_semantic(f"{summary}\n{user_input}", k=10)]) or "No existing rules context."
|
@@ -239,7 +241,7 @@ Combine all findings into a single, valid XML structure as specified in the syst
|
|
239 |
i_p, i_id = insight_env_model.split('/', 1)
|
240 |
i_d_n = next((dn for dn, mid in MODELS_BY_PROVIDER.get(i_p.lower(), {}).get("models", {}).items() if mid == i_id), None)
|
241 |
if i_d_n: insight_prov, insight_model_disp = i_p, i_d_n
|
242 |
-
logger.info(f"
|
243 |
|
244 |
raw_ops_xml_full = "".join(list(call_model_stream(provider=insight_prov, model_display_name=insight_model_disp, messages=insight_msgs, api_key_override=api_key_override, temperature=0.0, max_tokens=3500))).strip()
|
245 |
|
@@ -269,57 +271,57 @@ Combine all findings into a single, valid XML structure as specified in the syst
|
|
269 |
"old_insight_to_replace": old_insight_text
|
270 |
})
|
271 |
else:
|
272 |
-
logger.warning(f"
|
273 |
else:
|
274 |
-
logger.error(f"
|
275 |
except ET.ParseError as e:
|
276 |
-
logger.error(f"
|
277 |
except Exception as e_xml_proc:
|
278 |
-
logger.error(f"
|
279 |
else:
|
280 |
-
logger.info(f"
|
281 |
|
282 |
if ops_data_list:
|
283 |
-
logger.info(f"
|
284 |
for op_idx, op_data in enumerate(ops_data_list):
|
285 |
action = op_data["action"]
|
286 |
insight_text = op_data["insight"]
|
287 |
old_insight = op_data["old_insight_to_replace"]
|
288 |
|
289 |
if not re.match(r"\[(CORE_RULE|RESPONSE_PRINCIPLE|BEHAVIORAL_ADJUSTMENT|GENERAL_LEARNING)\|([\d\.]+?)\]", insight_text, re.I|re.DOTALL):
|
290 |
-
logger.warning(f"
|
291 |
continue
|
292 |
|
293 |
if action == "add":
|
294 |
-
success, status_msg = add_rule_entry(insight_text)
|
295 |
if success: processed_count +=1
|
296 |
-
else: logger.warning(f"
|
297 |
elif action == "update":
|
298 |
if old_insight:
|
299 |
if old_insight != insight_text:
|
300 |
-
remove_success = remove_rule_entry(old_insight)
|
301 |
if not remove_success:
|
302 |
-
logger.warning(f"
|
303 |
else:
|
304 |
-
logger.info(f"
|
305 |
|
306 |
-
success, status_msg = add_rule_entry(insight_text)
|
307 |
if success: processed_count +=1
|
308 |
-
else: logger.warning(f"
|
309 |
else:
|
310 |
-
logger.warning(f"
|
311 |
|
312 |
-
logger.info(f"
|
313 |
else:
|
314 |
-
logger.info(f"
|
315 |
|
316 |
-
except Exception as e: logger.error(f"
|
317 |
-
logger.info(f"
|
|
|
318 |
|
319 |
def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_name: str, sel_model_disp_name: str, ui_api_key: str|None, cust_sys_prompt: str):
|
320 |
global current_chat_session_history
|
321 |
cleared_input, updated_gr_hist, status_txt = "", list(gr_hist_list), "Initializing..."
|
322 |
-
# Ensure these are initialized with their correct Gradio update types
|
323 |
def_detect_out_md = gr.Markdown(visible=False)
|
324 |
def_fmt_out_txt = gr.Textbox(value="*Waiting...*", interactive=True)
|
325 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
@@ -355,7 +357,7 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
|
|
355 |
updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg)
|
356 |
elif upd_type == "final_response_and_insights":
|
357 |
final_bot_resp_acc, insights_used_parsed = upd_data["response"], upd_data["insights_used"]
|
358 |
-
status_txt = "Response
|
359 |
if not curr_bot_disp_msg and final_bot_resp_acc : curr_bot_disp_msg = final_bot_resp_acc
|
360 |
if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
|
361 |
updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg or "(No text)")
|
@@ -370,14 +372,16 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
|
|
370 |
else:
|
371 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
372 |
|
373 |
-
insights_md_content = "### Insights Considered:\n" + ("\n".join([f"- **[{i.get('type','N/A')}|{i.get('score','N/A')}]** {i.get('text','N/A')[:100]}..." for i in insights_used_parsed[:3]]) if insights_used_parsed else "*None specific.*")
|
374 |
def_detect_out_md = gr.Markdown(value=insights_md_content, visible=True if insights_used_parsed else False)
|
375 |
|
|
|
376 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
377 |
-
if upd_type == "final_response_and_insights": break
|
|
|
378 |
except Exception as e:
|
379 |
-
logger.error(f"Chat handler error: {e}", exc_info=True); status_txt = f"Error: {str(e)[:100]}"
|
380 |
-
error_message_for_chat = f"Sorry, an error occurred: {str(e)[:100]}"
|
381 |
if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
|
382 |
updated_gr_hist[-1] = (user_msg_txt, error_message_for_chat)
|
383 |
else:
|
@@ -388,6 +392,7 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
|
|
388 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
389 |
return
|
390 |
|
|
|
391 |
if final_bot_resp_acc and not final_bot_resp_acc.startswith("Error:"):
|
392 |
current_chat_session_history.extend([{"role": "user", "content": user_msg_txt}, {"role": "assistant", "content": final_bot_resp_acc}])
|
393 |
hist_len_check = MAX_HISTORY_TURNS * 2
|
@@ -395,32 +400,37 @@ def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_na
|
|
395 |
if len(current_chat_session_history) > hist_len_check:
|
396 |
current_chat_session_history = ([current_chat_session_history[0]] if current_chat_session_history[0]["role"] == "system" else []) + current_chat_session_history[-(MAX_HISTORY_TURNS * 2):]
|
397 |
|
398 |
-
#
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
412 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
413 |
|
414 |
-
# After the main response is fully yielded, and the deferred task thread is started,
|
415 |
-
# we can add a small delay here before the .then() clauses trigger the UI refresh.
|
416 |
-
# This gives the deferred task's initial, fast in-memory updates (like add_memory_entry)
|
417 |
-
# a better chance to complete before the UI tries to read that data.
|
418 |
-
# The LLM-based rule generation within the deferred task will still take longer.
|
419 |
-
if 'deferred_task_thread' in locals() and deferred_task_thread.is_alive():
|
420 |
-
# Wait a very short period for synchronous parts of deferred task
|
421 |
-
# This is a heuristic. The value might need tuning.
|
422 |
-
time.sleep(0.2) # e.g., 200 milliseconds
|
423 |
-
|
424 |
if temp_dl_file_path and os.path.exists(temp_dl_file_path):
|
425 |
try: os.unlink(temp_dl_file_path)
|
426 |
except Exception as e_unlink: logger.error(f"Error deleting temp download file {temp_dl_file_path}: {e_unlink}")
|
@@ -718,23 +728,14 @@ with gr.Blocks(
|
|
718 |
chat_ins = [user_msg_tb, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
|
719 |
chat_outs = [user_msg_tb, main_chat_disp, agent_stat_tb, detect_out_md, fmt_report_tb, dl_report_btn]
|
720 |
|
721 |
-
# Define a dummy function to introduce a delay if needed for UI refresh.
|
722 |
-
# For now, we will try without an explicit Gradio-level delay here,
|
723 |
-
# relying on the small time.sleep in handle_gradio_chat_submit's final part.
|
724 |
-
# def delayed_refresh_trigger():
|
725 |
-
# time.sleep(0.2) # Small delay
|
726 |
-
# return True # Or any value, just to trigger the next .then()
|
727 |
-
|
728 |
-
# chat_event_args is a dictionary containing common arguments for click/submit
|
729 |
chat_event_args = {"fn": handle_gradio_chat_submit, "inputs": chat_ins, "outputs": chat_outs}
|
730 |
|
731 |
-
# Setup events for chat submission
|
732 |
send_btn_click_event = send_btn.click(**chat_event_args)
|
733 |
user_msg_submit_event = user_msg_tb.submit(**chat_event_args)
|
734 |
|
735 |
-
#
|
736 |
-
#
|
737 |
-
#
|
738 |
for event in [send_btn_click_event, user_msg_submit_event]:
|
739 |
event.then(fn=ui_refresh_rules_display_fn, inputs=None, outputs=rules_disp_ta, show_progress=False)
|
740 |
event.then(fn=ui_refresh_memories_display_fn, inputs=None, outputs=mems_disp_json, show_progress=False)
|
@@ -788,7 +789,7 @@ with gr.Blocks(
|
|
788 |
clear_rules_btn.click(
|
789 |
fn=lambda: ("All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules."),
|
790 |
outputs=rules_stat_tb,
|
791 |
-
show_progress=False
|
792 |
).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
|
793 |
|
794 |
# Memories Management events
|
@@ -804,7 +805,7 @@ with gr.Blocks(
|
|
804 |
clear_mems_btn.click(
|
805 |
fn=lambda: ("All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories."),
|
806 |
outputs=mems_stat_tb,
|
807 |
-
show_progress=False
|
808 |
).then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json, show_progress=False)
|
809 |
|
810 |
if MEMORY_STORAGE_BACKEND == "RAM" and 'save_faiss_sidebar_btn' in locals():
|
@@ -840,7 +841,7 @@ with gr.Blocks(
|
|
840 |
|
841 |
|
842 |
if __name__ == "__main__":
|
843 |
-
logger.info(f"Starting Gradio AI Research Mega Agent (v6.
|
844 |
app_port = int(os.getenv("GRADIO_PORT", 7860))
|
845 |
app_server = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
|
846 |
app_debug = os.getenv("GRADIO_DEBUG", "False").lower() == "true"
|
|
|
3 |
import json
|
4 |
import re
|
5 |
import logging
|
6 |
+
# import threading # No longer needed for the learning task
|
7 |
from datetime import datetime
|
8 |
from dotenv import load_dotenv
|
9 |
import gradio as gr
|
|
|
178 |
logger.info(f"PUI_GRADIO [{request_id}]: Finished. Total: {time.time() - process_start_time:.2f}s. Resp len: {len(final_bot_text)}")
|
179 |
yield "final_response_and_insights", {"response": final_bot_text, "insights_used": parsed_initial_insights_list}
|
180 |
|
181 |
+
# Renamed from deferred_learning_and_memory_task
|
182 |
+
def perform_post_interaction_learning(user_input: str, bot_response: str, provider: str, model_disp_name: str, insights_reflected: list[dict], api_key_override: str = None):
|
183 |
+
task_id = os.urandom(4).hex() # Keep task_id for logging clarity
|
184 |
+
logger.info(f"POST_INTERACTION_LEARNING [{task_id}]: START User='{user_input[:40]}...', Bot='{bot_response[:40]}...'")
|
185 |
+
learning_start_time = time.time()
|
186 |
try:
|
187 |
metrics = generate_interaction_metrics(user_input, bot_response, provider, model_disp_name, api_key_override)
|
188 |
+
logger.info(f"POST_INTERACTION_LEARNING [{task_id}]: Metrics: {metrics}")
|
189 |
+
add_memory_entry(user_input, metrics, bot_response)
|
190 |
|
191 |
summary = f"User:\"{user_input}\"\nAI:\"{bot_response}\"\nMetrics(takeaway):{metrics.get('takeaway','N/A')},Success:{metrics.get('response_success_score','N/A')}"
|
192 |
existing_rules_ctx = "\n".join([f"- \"{r}\"" for r in retrieve_rules_semantic(f"{summary}\n{user_input}", k=10)]) or "No existing rules context."
|
|
|
241 |
i_p, i_id = insight_env_model.split('/', 1)
|
242 |
i_d_n = next((dn for dn, mid in MODELS_BY_PROVIDER.get(i_p.lower(), {}).get("models", {}).items() if mid == i_id), None)
|
243 |
if i_d_n: insight_prov, insight_model_disp = i_p, i_d_n
|
244 |
+
logger.info(f"POST_INTERACTION_LEARNING [{task_id}]: Generating insights with {insight_prov}/{insight_model_disp} (expecting XML)")
|
245 |
|
246 |
raw_ops_xml_full = "".join(list(call_model_stream(provider=insight_prov, model_display_name=insight_model_disp, messages=insight_msgs, api_key_override=api_key_override, temperature=0.0, max_tokens=3500))).strip()
|
247 |
|
|
|
271 |
"old_insight_to_replace": old_insight_text
|
272 |
})
|
273 |
else:
|
274 |
+
logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Skipped XML operation due to missing action or insight text. Action: {action}, Insight: {insight_text}")
|
275 |
else:
|
276 |
+
logger.error(f"POST_INTERACTION_LEARNING [{task_id}]: XML root tag is not <operations_list>. Found: {root.tag}. XML content:\n{xml_content_str}")
|
277 |
except ET.ParseError as e:
|
278 |
+
logger.error(f"POST_INTERACTION_LEARNING [{task_id}]: XML parsing error: {e}. XML content that failed:\n{xml_content_str}")
|
279 |
except Exception as e_xml_proc:
|
280 |
+
logger.error(f"POST_INTERACTION_LEARNING [{task_id}]: Error processing parsed XML: {e_xml_proc}. XML content:\n{xml_content_str}")
|
281 |
else:
|
282 |
+
logger.info(f"POST_INTERACTION_LEARNING [{task_id}]: No <operations_list> XML structure found in LLM output. Full raw output:\n{raw_ops_xml_full}")
|
283 |
|
284 |
if ops_data_list:
|
285 |
+
logger.info(f"POST_INTERACTION_LEARNING [{task_id}]: LLM provided {len(ops_data_list)} insight ops from XML.")
|
286 |
for op_idx, op_data in enumerate(ops_data_list):
|
287 |
action = op_data["action"]
|
288 |
insight_text = op_data["insight"]
|
289 |
old_insight = op_data["old_insight_to_replace"]
|
290 |
|
291 |
if not re.match(r"\[(CORE_RULE|RESPONSE_PRINCIPLE|BEHAVIORAL_ADJUSTMENT|GENERAL_LEARNING)\|([\d\.]+?)\]", insight_text, re.I|re.DOTALL):
|
292 |
+
logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx}: Skipped op due to invalid insight_text format from XML: '{insight_text[:100]}...'")
|
293 |
continue
|
294 |
|
295 |
if action == "add":
|
296 |
+
success, status_msg = add_rule_entry(insight_text)
|
297 |
if success: processed_count +=1
|
298 |
+
else: logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx} (add from XML): Failed to add rule '{insight_text[:50]}...'. Status: {status_msg}")
|
299 |
elif action == "update":
|
300 |
if old_insight:
|
301 |
if old_insight != insight_text:
|
302 |
+
remove_success = remove_rule_entry(old_insight)
|
303 |
if not remove_success:
|
304 |
+
logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx} (update from XML): Failed to remove old rule '{old_insight[:50]}...' before adding new.")
|
305 |
else:
|
306 |
+
logger.info(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx} (update from XML): Old insight is identical to new insight. Skipping removal.")
|
307 |
|
308 |
+
success, status_msg = add_rule_entry(insight_text)
|
309 |
if success: processed_count +=1
|
310 |
+
else: logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx} (update from XML): Failed to add/update rule '{insight_text[:50]}...'. Status: {status_msg}")
|
311 |
else:
|
312 |
+
logger.warning(f"POST_INTERACTION_LEARNING [{task_id}]: Op {op_idx}: Skipped op due to unknown action '{action}' from XML.")
|
313 |
|
314 |
+
logger.info(f"POST_INTERACTION_LEARNING [{task_id}]: Processed {processed_count} insight ops out of {len(ops_data_list)} received from XML.")
|
315 |
else:
|
316 |
+
logger.info(f"POST_INTERACTION_LEARNING [{task_id}]: No valid insight operations derived from LLM's XML output.")
|
317 |
|
318 |
+
except Exception as e: logger.error(f"POST_INTERACTION_LEARNING [{task_id}]: CRITICAL ERROR in learning task: {e}", exc_info=True)
|
319 |
+
logger.info(f"POST_INTERACTION_LEARNING [{task_id}]: END. Total: {time.time() - learning_start_time:.2f}s")
|
320 |
+
# This function now runs synchronously, so its completion means data is updated.
|
321 |
|
322 |
def handle_gradio_chat_submit(user_msg_txt: str, gr_hist_list: list, sel_prov_name: str, sel_model_disp_name: str, ui_api_key: str|None, cust_sys_prompt: str):
|
323 |
global current_chat_session_history
|
324 |
cleared_input, updated_gr_hist, status_txt = "", list(gr_hist_list), "Initializing..."
|
|
|
325 |
def_detect_out_md = gr.Markdown(visible=False)
|
326 |
def_fmt_out_txt = gr.Textbox(value="*Waiting...*", interactive=True)
|
327 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
|
|
357 |
updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg)
|
358 |
elif upd_type == "final_response_and_insights":
|
359 |
final_bot_resp_acc, insights_used_parsed = upd_data["response"], upd_data["insights_used"]
|
360 |
+
status_txt = "Response generated. Processing learning..." # Status update
|
361 |
if not curr_bot_disp_msg and final_bot_resp_acc : curr_bot_disp_msg = final_bot_resp_acc
|
362 |
if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
|
363 |
updated_gr_hist[-1] = (user_msg_txt, curr_bot_disp_msg or "(No text)")
|
|
|
372 |
else:
|
373 |
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
374 |
|
375 |
+
insights_md_content = "### Insights Considered (Pre-Response):\n" + ("\n".join([f"- **[{i.get('type','N/A')}|{i.get('score','N/A')}]** {i.get('text','N/A')[:100]}..." for i in insights_used_parsed[:3]]) if insights_used_parsed else "*None specific.*")
|
376 |
def_detect_out_md = gr.Markdown(value=insights_md_content, visible=True if insights_used_parsed else False)
|
377 |
|
378 |
+
# Yield intermediate updates for response streaming
|
379 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
380 |
+
if upd_type == "final_response_and_insights": break # Exit loop after main response processing
|
381 |
+
|
382 |
except Exception as e:
|
383 |
+
logger.error(f"Chat handler error during main processing: {e}", exc_info=True); status_txt = f"Error: {str(e)[:100]}"
|
384 |
+
error_message_for_chat = f"Sorry, an error occurred during response generation: {str(e)[:100]}"
|
385 |
if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
|
386 |
updated_gr_hist[-1] = (user_msg_txt, error_message_for_chat)
|
387 |
else:
|
|
|
392 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
393 |
return
|
394 |
|
395 |
+
# --- Post-Interaction Learning (Synchronous within this handler) ---
|
396 |
if final_bot_resp_acc and not final_bot_resp_acc.startswith("Error:"):
|
397 |
current_chat_session_history.extend([{"role": "user", "content": user_msg_txt}, {"role": "assistant", "content": final_bot_resp_acc}])
|
398 |
hist_len_check = MAX_HISTORY_TURNS * 2
|
|
|
400 |
if len(current_chat_session_history) > hist_len_check:
|
401 |
current_chat_session_history = ([current_chat_session_history[0]] if current_chat_session_history[0]["role"] == "system" else []) + current_chat_session_history[-(MAX_HISTORY_TURNS * 2):]
|
402 |
|
403 |
+
# Call learning function synchronously
|
404 |
+
status_txt = "<i>[Performing post-interaction learning...]</i>"
|
405 |
+
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn) # Update status
|
406 |
+
|
407 |
+
try:
|
408 |
+
perform_post_interaction_learning(
|
409 |
+
user_input=user_msg_txt,
|
410 |
+
bot_response=final_bot_resp_acc,
|
411 |
+
provider=sel_prov_name,
|
412 |
+
model_disp_name=sel_model_disp_name,
|
413 |
+
insights_reflected=insights_used_parsed,
|
414 |
+
api_key_override=ui_api_key.strip() if ui_api_key else None
|
415 |
+
)
|
416 |
+
status_txt = "Response & Learning Complete."
|
417 |
+
except Exception as e_learn:
|
418 |
+
logger.error(f"Error during post-interaction learning: {e_learn}", exc_info=True)
|
419 |
+
status_txt = "Response complete. Error during learning."
|
420 |
+
|
421 |
+
elif final_bot_resp_acc.startswith("Error:"):
|
422 |
+
status_txt = final_bot_resp_acc
|
423 |
+
if updated_gr_hist and updated_gr_hist[-1][0] == user_msg_txt:
|
424 |
+
updated_gr_hist[-1] = (user_msg_txt, final_bot_resp_acc)
|
425 |
+
def_fmt_out_txt = gr.Textbox(value=final_bot_resp_acc, interactive=True)
|
426 |
+
def_dl_btn = gr.DownloadButton(interactive=False, value=None, visible=False)
|
427 |
+
else:
|
428 |
+
status_txt = "Processing finished; no valid response or error occurred during main phase."
|
429 |
+
|
430 |
+
|
431 |
+
# Final yield for this handler. The .then() clauses will execute after this.
|
432 |
yield (cleared_input, updated_gr_hist, status_txt, def_detect_out_md, def_fmt_out_txt, def_dl_btn)
|
433 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
434 |
if temp_dl_file_path and os.path.exists(temp_dl_file_path):
|
435 |
try: os.unlink(temp_dl_file_path)
|
436 |
except Exception as e_unlink: logger.error(f"Error deleting temp download file {temp_dl_file_path}: {e_unlink}")
|
|
|
728 |
chat_ins = [user_msg_tb, main_chat_disp, prov_sel_dd, model_sel_dd, api_key_tb, sys_prompt_tb]
|
729 |
chat_outs = [user_msg_tb, main_chat_disp, agent_stat_tb, detect_out_md, fmt_report_tb, dl_report_btn]
|
730 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
731 |
chat_event_args = {"fn": handle_gradio_chat_submit, "inputs": chat_ins, "outputs": chat_outs}
|
732 |
|
|
|
733 |
send_btn_click_event = send_btn.click(**chat_event_args)
|
734 |
user_msg_submit_event = user_msg_tb.submit(**chat_event_args)
|
735 |
|
736 |
+
# The .then() calls will execute after handle_gradio_chat_submit fully completes (all yields).
|
737 |
+
# Since perform_post_interaction_learning is now synchronous within handle_gradio_chat_submit,
|
738 |
+
# the data will be updated before these .then() calls run.
|
739 |
for event in [send_btn_click_event, user_msg_submit_event]:
|
740 |
event.then(fn=ui_refresh_rules_display_fn, inputs=None, outputs=rules_disp_ta, show_progress=False)
|
741 |
event.then(fn=ui_refresh_memories_display_fn, inputs=None, outputs=mems_disp_json, show_progress=False)
|
|
|
789 |
clear_rules_btn.click(
|
790 |
fn=lambda: ("All rules cleared." if clear_all_rules_data_backend() else "Error clearing rules."),
|
791 |
outputs=rules_stat_tb,
|
792 |
+
show_progress=False
|
793 |
).then(fn=ui_refresh_rules_display_fn, outputs=rules_disp_ta, show_progress=False)
|
794 |
|
795 |
# Memories Management events
|
|
|
805 |
clear_mems_btn.click(
|
806 |
fn=lambda: ("All memories cleared." if clear_all_memory_data_backend() else "Error clearing memories."),
|
807 |
outputs=mems_stat_tb,
|
808 |
+
show_progress=False
|
809 |
).then(fn=ui_refresh_memories_display_fn, outputs=mems_disp_json, show_progress=False)
|
810 |
|
811 |
if MEMORY_STORAGE_BACKEND == "RAM" and 'save_faiss_sidebar_btn' in locals():
|
|
|
841 |
|
842 |
|
843 |
if __name__ == "__main__":
|
844 |
+
logger.info(f"Starting Gradio AI Research Mega Agent (v6.4 - Synchronous Learning for UI Refresh, Memory: {MEMORY_STORAGE_BACKEND})...")
|
845 |
app_port = int(os.getenv("GRADIO_PORT", 7860))
|
846 |
app_server = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
|
847 |
app_debug = os.getenv("GRADIO_DEBUG", "False").lower() == "true"
|