GuglielmoTor commited on
Commit
f1d603c
·
verified ·
1 Parent(s): 5a483f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -63
app.py CHANGED
@@ -332,13 +332,10 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
332
  action_col_visible_update, insights_chatbot_visible_update, chatbot_content_update,
333
  insights_chat_input_visible_update, insights_suggestions_row_visible_update,
334
  s1_upd, s2_upd, s3_upd, formula_display_visible_update, formula_content_update,
335
- formula_close_hint_md, # This is the component for the hint's visibility, should be formula_close_hint_visible_update
336
  new_active_action_state_to_set, new_current_chat_plot_id, updated_chat_histories,
337
  new_explored_plot_id_to_set
338
  ]
339
- # Correcting the hint update:
340
- final_updates[10] = formula_close_hint_visible_update
341
-
342
 
343
  final_updates.extend(generated_panel_vis_updates)
344
  final_updates.extend(generated_bomb_btn_updates)
@@ -654,113 +651,98 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
654
  eb_agent_instance_dict = {"agent": None} # To store agent instance across calls if needed, or re-init
655
 
656
  async def handle_eb_agent_chat(user_message: str, chat_history_list: list, current_token_state: dict):
 
 
 
657
  if not EB_AGENT_AVAILABLE or not os.getenv('GEMINI_API_KEY'):
658
  no_key_msg = "L'Agente AI non è disponibile. Assicurati che GEMINI_API_KEY sia configurata."
659
  chat_history_list.append([user_message, no_key_msg])
660
- return chat_history_list, chat_history_list, "", gr.update(value=no_key_msg)
 
 
661
 
662
  if not user_message.strip():
663
- return chat_history_list, chat_history_list, "", gr.update(value="Stato Agente: Per favore, inserisci una domanda.")
 
 
664
 
665
  status_update_msg = "Stato Agente: Elaborazione della tua richiesta..."
666
- yield chat_history_list + [[user_message, None]], chat_history_list + [[user_message, None]], "", gr.update(value=status_update_msg) # Show user message immediately
 
667
 
668
  # Prepare DataFrames for the agent
669
  df_follower_stats = current_token_state.get("bubble_follower_stats_df", pd.DataFrame())
670
- df_posts = current_token_state.get("bubble_posts_df", pd.DataFrame()) # Contains post content
671
- df_post_stats = current_token_state.get("bubble_post_stats_df", pd.DataFrame()) # Contains post metrics
672
  df_mentions = current_token_state.get("bubble_mentions_df", pd.DataFrame())
673
-
674
- # For simplicity, we can merge posts and post_stats if they have a common key (e.g., post_id)
675
- # Assuming 'id' in df_posts corresponds to 'post_id' in df_post_stats
676
- # This is a guess, adjust based on your actual schemas.
677
- # If no common key, pass them separately and instruct LLM.
678
- df_posts_combined = df_posts
679
- if not df_posts.empty and not df_post_stats.empty:
680
- # Attempt merge, ensure column names are correct for your DFs
681
- # Example: common_col_posts = 'id'; common_col_stats = 'post_id'
682
- # For now, let's assume they might not be easily mergeable or LLM can handle separately
683
- pass # LLM will see df_posts and df_post_stats separately
684
-
685
-
686
  dataframes_for_agent = {
687
- "follower_stats": df_follower_stats.copy() if not df_follower_stats.empty else pd.DataFrame(columns=['no_data_follower_stats']), # Use copy to avoid modifying state
688
  "posts": df_posts.copy() if not df_posts.empty else pd.DataFrame(columns=['no_data_posts']),
689
  "post_stats": df_post_stats.copy() if not df_post_stats.empty else pd.DataFrame(columns=['no_data_post_stats']),
690
  "mentions": df_mentions.copy() if not df_mentions.empty else pd.DataFrame(columns=['no_data_mentions'])
691
  }
692
 
693
- # Display schemas for user reference
694
- schemas_text = "Schemi DataFrames inviati all'Agente:\n\n"
695
- from eb_agent_module import get_all_schemas_representation # Re-import for safety if run in different context
696
- schemas_text += get_all_schemas_representation(dataframes_for_agent)
697
- # Limit schema display length for UI
698
  max_schema_display_len = 1500
699
- if len(schemas_text) > max_schema_display_len:
700
- schemas_text = schemas_text[:max_schema_display_len] + "\n...(schemi troncati per la visualizzazione)"
701
 
702
- # Initialize or update agent
703
- # For simplicity, re-initialize. If RAG embeddings are slow, consider stateful agent.
704
  current_agent = EmployerBrandingAgent(
705
  llm_model_name=EB_AGENT_LLM_MODEL,
706
  generation_config_params=EB_AGENT_GEN_CONFIG,
707
  safety_settings=EB_AGENT_SAFETY_SETTINGS,
708
  all_dataframes=dataframes_for_agent,
709
- rag_documents_df=eb_agent_default_rag_docs.copy(), # Use a copy of default RAG docs
710
  embedding_model_name=EB_AGENT_EMBEDDING_MODEL,
711
- force_sandbox=True # True to get Python code
712
  )
713
- # Restore chat history to the agent if it were stateful
714
- # current_agent.chat_history = [msg for pair in chat_history_list for msg_type, msg_content in (('user', pair[0]), ('assistant', pair[1])) if msg_content is not None]
715
- # For stateless re-init, we pass history to process_query if needed by LLM, but agent itself starts fresh or manages its own history via its methods
716
-
717
- # Simplified history for this agent: it manages its own via process_query
718
- # We only need to pass the user_message.
719
- # The agent's process_query will handle its internal history.
720
 
721
- # If the agent's internal history needs to match the UI history:
722
- # Convert Gradio chat history to agent's expected format
723
  agent_internal_history = []
724
- for user_q, ai_r in chat_history_list:
725
  if user_q: agent_internal_history.append({"role": "user", "content": user_q})
726
- if ai_r: agent_internal_history.append({"role": "assistant", "content": ai_r}) # or "model"
727
-
728
- # Update agent's history before processing (if agent supports this way)
729
- current_agent.chat_history = agent_internal_history # Agent will append new query and response
730
 
731
  try:
732
  logging.info(f"Sending to EB Agent. User: '{user_message}'. DF Keys: {list(dataframes_for_agent.keys())}")
733
- # The agent's process_query already appends to its internal history
734
  ai_response = await current_agent.process_query(user_query=user_message)
735
 
736
- # The agent's internal history is now updated. We need to reflect this in Gradio's history state.
737
- # The last two entries in agent.chat_history are the current user_message and ai_response
738
  updated_gradio_history = []
739
- temp_hist = current_agent.chat_history
740
  for i in range(0, len(temp_hist), 2):
741
  u_msg = temp_hist[i]['content']
742
- a_msg = temp_hist[i+1]['content'] if i+1 < len(temp_hist) else "Thinking..." # Should have assistant response
743
  updated_gradio_history.append([u_msg, a_msg])
744
 
745
  status_update_msg = "Stato Agente: Risposta ricevuta."
746
- yield updated_gradio_history, updated_gradio_history, "", gr.update(value=status_update_msg), gr.update(value=schemas_text)
 
747
 
748
  except Exception as e:
749
  logging.error(f"Error during EB Agent processing: {e}", exc_info=True)
750
  error_msg = f"# Errore dell'Agente AI:\n{type(e).__name__}: {str(e)}"
751
- # Use the last known good history before error for UI state
752
- chat_history_list.append([user_message, error_msg]) # Add error to current turn
 
 
 
 
 
 
 
 
 
 
 
753
  status_update_msg = f"Stato Agente: Errore - {type(e).__name__}"
754
- yield chat_history_list, chat_history_list, "", gr.update(value=status_update_msg), gr.update(value=schemas_text)
 
755
 
756
 
757
  def clear_eb_agent_chat_history():
758
- # eb_agent_instance_dict["agent"] might need its history cleared too if stateful
759
- # For re-init agent, just clearing UI state is enough.
760
- # If agent instance is stored and reused:
761
- # agent_to_clear = eb_agent_instance_dict.get("agent")
762
- # if agent_to_clear and hasattr(agent_to_clear, 'clear_chat_history'):
763
- # agent_to_clear.clear_chat_history()
764
  initial_msg = "Ciao! Sono il tuo Agente AI per l'Employer Branding. Come posso aiutarti?" if EB_AGENT_AVAILABLE else "Agente AI non disponibile."
765
  return [[None, initial_msg]], [[None, initial_msg]], "Stato Agente: Chat resettata."
766
 
 
332
  action_col_visible_update, insights_chatbot_visible_update, chatbot_content_update,
333
  insights_chat_input_visible_update, insights_suggestions_row_visible_update,
334
  s1_upd, s2_upd, s3_upd, formula_display_visible_update, formula_content_update,
335
+ formula_close_hint_visible_update, # Corrected from formula_close_hint_md
336
  new_active_action_state_to_set, new_current_chat_plot_id, updated_chat_histories,
337
  new_explored_plot_id_to_set
338
  ]
 
 
 
339
 
340
  final_updates.extend(generated_panel_vis_updates)
341
  final_updates.extend(generated_bomb_btn_updates)
 
651
  eb_agent_instance_dict = {"agent": None} # To store agent instance across calls if needed, or re-init
652
 
653
  async def handle_eb_agent_chat(user_message: str, chat_history_list: list, current_token_state: dict):
654
+ # Expected outputs: [eb_agent_chatbot_ui, eb_agent_chat_history_st, eb_agent_chat_input_ui, eb_agent_status_md, eb_agent_schema_display_md]
655
+ # (5 components)
656
+
657
  if not EB_AGENT_AVAILABLE or not os.getenv('GEMINI_API_KEY'):
658
  no_key_msg = "L'Agente AI non è disponibile. Assicurati che GEMINI_API_KEY sia configurata."
659
  chat_history_list.append([user_message, no_key_msg])
660
+ # Yield updates for all 5 components
661
+ yield chat_history_list, chat_history_list, gr.update(value=""), gr.update(value=no_key_msg), gr.update(value="Nessuno schema disponibile.")
662
+ return
663
 
664
  if not user_message.strip():
665
+ # Yield updates for all 5 components
666
+ yield chat_history_list, chat_history_list, gr.update(value=""), gr.update(value="Stato Agente: Per favore, inserisci una domanda."), gr.update() # No change to schema display
667
+ return
668
 
669
  status_update_msg = "Stato Agente: Elaborazione della tua richiesta..."
670
+ # Show user message immediately, update status
671
+ yield chat_history_list + [[user_message, None]], chat_history_list + [[user_message, None]], gr.update(value=""), gr.update(value=status_update_msg), gr.update()
672
 
673
  # Prepare DataFrames for the agent
674
  df_follower_stats = current_token_state.get("bubble_follower_stats_df", pd.DataFrame())
675
+ df_posts = current_token_state.get("bubble_posts_df", pd.DataFrame())
676
+ df_post_stats = current_token_state.get("bubble_post_stats_df", pd.DataFrame())
677
  df_mentions = current_token_state.get("bubble_mentions_df", pd.DataFrame())
678
+
 
 
 
 
 
 
 
 
 
 
 
 
679
  dataframes_for_agent = {
680
+ "follower_stats": df_follower_stats.copy() if not df_follower_stats.empty else pd.DataFrame(columns=['no_data_follower_stats']),
681
  "posts": df_posts.copy() if not df_posts.empty else pd.DataFrame(columns=['no_data_posts']),
682
  "post_stats": df_post_stats.copy() if not df_post_stats.empty else pd.DataFrame(columns=['no_data_post_stats']),
683
  "mentions": df_mentions.copy() if not df_mentions.empty else pd.DataFrame(columns=['no_data_mentions'])
684
  }
685
 
686
+ schemas_text_for_display = "Schemi DataFrames inviati all'Agente:\n\n"
687
+ from eb_agent_module import get_all_schemas_representation
688
+ schemas_text_for_display += get_all_schemas_representation(dataframes_for_agent)
 
 
689
  max_schema_display_len = 1500
690
+ if len(schemas_text_for_display) > max_schema_display_len:
691
+ schemas_text_for_display = schemas_text_for_display[:max_schema_display_len] + "\n...(schemi troncati per la visualizzazione)"
692
 
 
 
693
  current_agent = EmployerBrandingAgent(
694
  llm_model_name=EB_AGENT_LLM_MODEL,
695
  generation_config_params=EB_AGENT_GEN_CONFIG,
696
  safety_settings=EB_AGENT_SAFETY_SETTINGS,
697
  all_dataframes=dataframes_for_agent,
698
+ rag_documents_df=eb_agent_default_rag_docs.copy(),
699
  embedding_model_name=EB_AGENT_EMBEDDING_MODEL,
700
+ force_sandbox=True
701
  )
 
 
 
 
 
 
 
702
 
 
 
703
  agent_internal_history = []
704
+ for user_q, ai_r in chat_history_list: # Use the passed chat_history_list
705
  if user_q: agent_internal_history.append({"role": "user", "content": user_q})
706
+ if ai_r: agent_internal_history.append({"role": "assistant", "content": ai_r})
707
+ current_agent.chat_history = agent_internal_history
 
 
708
 
709
  try:
710
  logging.info(f"Sending to EB Agent. User: '{user_message}'. DF Keys: {list(dataframes_for_agent.keys())}")
 
711
  ai_response = await current_agent.process_query(user_query=user_message)
712
 
 
 
713
  updated_gradio_history = []
714
+ temp_hist = current_agent.chat_history
715
  for i in range(0, len(temp_hist), 2):
716
  u_msg = temp_hist[i]['content']
717
+ a_msg = temp_hist[i+1]['content'] if i+1 < len(temp_hist) else "Thinking..."
718
  updated_gradio_history.append([u_msg, a_msg])
719
 
720
  status_update_msg = "Stato Agente: Risposta ricevuta."
721
+ # Yield final updates for all 5 components
722
+ yield updated_gradio_history, updated_gradio_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display)
723
 
724
  except Exception as e:
725
  logging.error(f"Error during EB Agent processing: {e}", exc_info=True)
726
  error_msg = f"# Errore dell'Agente AI:\n{type(e).__name__}: {str(e)}"
727
+
728
+ # Ensure the current turn in chat_history_list reflects the error
729
+ # The last item in chat_history_list is [user_message, None] from the first yield
730
+ current_turn_history = chat_history_list + [[user_message, error_msg]] # This might duplicate user message if not careful
731
+
732
+ # Let's reconstruct history carefully to avoid duplicates
733
+ final_error_history = chat_history_list # This already has [user_message, None] as last item if first yield happened
734
+ if final_error_history and final_error_history[-1][0] == user_message and final_error_history[-1][1] is None:
735
+ final_error_history[-1][1] = error_msg # Update the assistant part of the last entry
736
+ else: # Should not happen if first yield was correct
737
+ final_error_history.append([user_message, error_msg])
738
+
739
+
740
  status_update_msg = f"Stato Agente: Errore - {type(e).__name__}"
741
+ # Yield error updates for all 5 components
742
+ yield final_error_history, final_error_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display)
743
 
744
 
745
  def clear_eb_agent_chat_history():
 
 
 
 
 
 
746
  initial_msg = "Ciao! Sono il tuo Agente AI per l'Employer Branding. Come posso aiutarti?" if EB_AGENT_AVAILABLE else "Agente AI non disponibile."
747
  return [[None, initial_msg]], [[None, initial_msg]], "Stato Agente: Chat resettata."
748