GuglielmoTor commited on
Commit
5044bff
·
verified ·
1 Parent(s): c784e5f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -79
app.py CHANGED
@@ -650,28 +650,23 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
650
  eb_agent_instance_dict = {"agent": None} # To store agent instance across calls if needed, or re-init
651
 
652
  async def handle_eb_agent_chat(user_message: str, chat_history_list: list, current_token_state: dict):
653
- # Expected outputs: [eb_agent_chatbot_ui, eb_agent_chat_history_st, eb_agent_chat_input_ui, eb_agent_status_md, eb_agent_schema_display_md]
654
-
655
  if not EB_AGENT_AVAILABLE or not os.getenv('GEMINI_API_KEY'):
656
- no_key_msg = "L'Agente AI non è disponibile. Assicurati che GEMINI_API_KEY sia configurata."
657
- # Ensure chat_history_list is mutable if it comes from gr.State
658
- current_chat_history = list(chat_history_list) if chat_history_list else []
659
  current_chat_history.append([user_message, no_key_msg])
660
- yield current_chat_history, current_chat_history, gr.update(value=""), gr.update(value=no_key_msg), gr.update(value="Nessuno schema disponibile.")
661
- return
662
-
663
- current_chat_history = list(chat_history_list) if chat_history_list else []
664
-
665
  if not user_message.strip():
666
- yield current_chat_history, current_chat_history, gr.update(value=""), gr.update(value="Stato Agente: Per favore, inserisci una domanda."), gr.update() # No change to schema display
667
- return
668
-
669
  status_update_msg = "Stato Agente: Elaborazione della tua richiesta..."
670
- # Show user message immediately, update status
671
- # Add user message to current history before yielding
672
  pending_history = current_chat_history + [[user_message, None]]
 
673
  yield pending_history, pending_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update()
674
-
675
  # Prepare DataFrames for the agent
676
  df_follower_stats = current_token_state.get("bubble_follower_stats_df", pd.DataFrame())
677
  df_posts = current_token_state.get("bubble_posts_df", pd.DataFrame())
@@ -686,18 +681,20 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
686
  }
687
 
688
  schemas_text_for_display = "Schemi DataFrames inviati all'Agente:\n\n"
689
- from eb_agent_module import get_all_schemas_representation # Assuming this is correctly imported in your main file
690
- schemas_text_for_display += get_all_schemas_representation(dataframes_for_agent) # Using the mock or your actual function
691
  max_schema_display_len = 1500
692
  if len(schemas_text_for_display) > max_schema_display_len:
693
  schemas_text_for_display = schemas_text_for_display[:max_schema_display_len] + "\n...(schemi troncati per la visualizzazione)"
694
 
695
- current_agent = EmployerBrandingAgent( # Using the mock or your actual class
696
- llm_model_name=EB_AGENT_LLM_MODEL,
697
- generation_config_dict=EB_AGENT_GEN_CONFIG,
698
- safety_settings_list=EB_AGENT_SAFETY_SETTINGS,
 
 
 
699
  all_dataframes=dataframes_for_agent,
700
- embedding_model_name=EB_AGENT_EMBEDDING_MODEL
701
  )
702
 
703
  agent_internal_history = []
@@ -721,81 +718,67 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
721
  init_success = await current_agent.initialize()
722
  if not init_success:
723
  error_msg = "Errore: Impossibile inizializzare l'agente AI."
724
- updated_history = current_chat_history + [[user_message, error_msg]]
725
- yield updated_history, updated_history, gr.update(value=""), gr.update(value="Stato Agente: Errore di inizializzazione"), gr.update(value=schemas_text_for_display)
726
- return
727
-
728
  logging.info(f"Sending to EB Agent. User: '{user_message}'. DF Keys: {list(dataframes_for_agent.keys())}")
729
- # ai_response_dict is what the agent returns. Based on error, it's {'text': 'blob...'}
730
- ai_response_dict = await current_agent.process_query(user_query=user_message)
731
 
732
  bot_message_for_display = "Error: Agent returned an unexpected response." # Default
733
-
 
 
734
  if isinstance(ai_response_dict, dict):
735
- combined_message_blob = ai_response_dict.get("text")
736
-
737
- if isinstance(combined_message_blob, str):
738
- text_part = combined_message_blob
739
- image_data_url = None
740
-
741
- # Attempt to parse image data URL from the combined_message_blob
742
- # This assumes the image data URL, if present, is on its own line or at the end.
743
- lines = combined_message_blob.splitlines()
744
- if lines:
745
- possible_image_prefixes = [
746
- "data:image/png;base64,",
747
- "data:image/jpeg;base64,",
748
- "data:image/gif;base64,",
749
- "data:image/webp;base64,"
750
- ]
751
- # Check lines from the end, as plot is likely at the end of the message
752
- for i in range(len(lines) - 1, -1, -1):
753
- current_line = lines[i].strip()
754
- for prefix in possible_image_prefixes:
755
- if current_line.startswith(prefix):
756
- # Basic validation: check for typical base64 characters and some length
757
- # This is a heuristic to ensure it's likely a valid base64 data string
758
- if len(current_line) > len(prefix) + 20 and \
759
- all(c in "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=" for c in current_line[len(prefix):]):
760
- image_data_url = current_line
761
- # Reconstruct text_part from lines *before* this image line
762
- text_part = "\n".join(lines[:i]).strip()
763
- break # Found image prefix
764
- if image_data_url:
765
- break # Found image line
766
-
767
- if image_data_url:
768
- # If text_part became empty after extracting image, use None for text in tuple
769
- bot_message_for_display = (text_part if text_part else None, image_data_url)
770
- else:
771
- # No image found or parsing failed, treat the whole blob as text
772
- bot_message_for_display = combined_message_blob
773
  else:
774
- bot_message_for_display = "Agent returned a dictionary, but the 'text' field was not a string or was missing."
775
- logging.warning(f"AI response dict 'text' field issue. Dict: {ai_response_dict}")
776
 
777
- elif isinstance(ai_response_dict, str): # Agent returned a plain string
778
  bot_message_for_display = ai_response_dict
779
- else: # Fallback for other unexpected types
 
780
  bot_message_for_display = f"Error: Agent returned an unexpected data type: {type(ai_response_dict)}."
781
  logging.error(f"Unexpected AI response type: {type(ai_response_dict)}, content: {ai_response_dict}")
782
-
783
- updated_history = current_chat_history + [[user_message, bot_message_for_display]]
 
784
 
785
  status_update_msg = "Stato Agente: Risposta ricevuta."
786
- yield updated_history, updated_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display)
787
-
788
  except Exception as e:
789
  logging.error(f"Error during EB Agent processing: {e}", exc_info=True)
790
  error_msg_for_chat = f"# Errore dell'Agente AI:\n{type(e).__name__}: {str(e)}"
791
- updated_history = current_chat_history + [[user_message, error_msg_for_chat]]
 
792
  status_update_msg = f"Stato Agente: Errore - {type(e).__name__}"
793
- yield updated_history, updated_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display)
794
 
795
 
796
  def clear_eb_agent_chat_history():
797
  initial_msg = "Ciao! Sono il tuo Agente AI per l'Employer Branding. Come posso aiutarti?" if EB_AGENT_AVAILABLE else "Agente AI non disponibile."
798
- return [[None, initial_msg]], [[None, initial_msg]], "Stato Agente: Chat resettata."
 
 
799
 
800
  # Connect UI to Handler for EB Agent
801
  eb_agent_submit_btn.click(
 
650
  eb_agent_instance_dict = {"agent": None} # To store agent instance across calls if needed, or re-init
651
 
652
  async def handle_eb_agent_chat(user_message: str, chat_history_list: list, current_token_state: dict):
653
+ current_chat_history = list(chat_history_list) if chat_history_list else []
654
+
655
  if not EB_AGENT_AVAILABLE or not os.getenv('GEMINI_API_KEY'):
656
+ no_key_msg = "L'Agente AI non è disponibile. Assicurati che GEMINI_API_KEY sia configurata e che l'agente sia caricato."
 
 
657
  current_chat_history.append([user_message, no_key_msg])
658
+ # Ensure all outputs are updated
659
+ return current_chat_history, current_chat_history, gr.update(value=""), gr.update(value=no_key_msg), gr.update(value="Nessuno schema disponibile.")
660
+
 
 
661
  if not user_message.strip():
662
+ # Ensure all outputs are updated even for empty message
663
+ return current_chat_history, current_chat_history, gr.update(value=""), gr.update(value="Stato Agente: Per favore, inserisci una domanda."), gr.update()
664
+
665
  status_update_msg = "Stato Agente: Elaborazione della tua richiesta..."
 
 
666
  pending_history = current_chat_history + [[user_message, None]]
667
+ # Yield intermediate state to show user message immediately
668
  yield pending_history, pending_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update()
669
+
670
  # Prepare DataFrames for the agent
671
  df_follower_stats = current_token_state.get("bubble_follower_stats_df", pd.DataFrame())
672
  df_posts = current_token_state.get("bubble_posts_df", pd.DataFrame())
 
681
  }
682
 
683
  schemas_text_for_display = "Schemi DataFrames inviati all'Agente:\n\n"
684
+ schemas_text_for_display += get_all_schemas_representation(dataframes_for_agent)
 
685
  max_schema_display_len = 1500
686
  if len(schemas_text_for_display) > max_schema_display_len:
687
  schemas_text_for_display = schemas_text_for_display[:max_schema_display_len] + "\n...(schemi troncati per la visualizzazione)"
688
 
689
+ # Update schema display before agent call
690
+ yield pending_history, pending_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display)
691
+
692
+ current_agent = EmployerBrandingAgent(
693
+ llm_model_name="gemini-2.5-flash-preview-05-20", # Replace with your actual EB_AGENT_LLM_MODEL
694
+ generation_config_dict={}, # Replace with your actual EB_AGENT_GEN_CONFIG
695
+ safety_settings_list=[], # Replace with your actual EB_AGENT_SAFETY_SETTINGS
696
  all_dataframes=dataframes_for_agent,
697
+ embedding_model_name="gemini-embedding-exp-03-07" # Replace with your actual EB_AGENT_EMBEDDING_MODEL
698
  )
699
 
700
  agent_internal_history = []
 
718
  init_success = await current_agent.initialize()
719
  if not init_success:
720
  error_msg = "Errore: Impossibile inizializzare l'agente AI."
721
+ # Use pending_history which already has the user message
722
+ updated_history = pending_history[:-1] + [[user_message, error_msg]] # Replace None with error
723
+ return updated_history, updated_history, gr.update(value=""), gr.update(value="Stato Agente: Errore di inizializzazione"), gr.update(value=schemas_text_for_display)
724
+
725
  logging.info(f"Sending to EB Agent. User: '{user_message}'. DF Keys: {list(dataframes_for_agent.keys())}")
726
+ ai_response_dict = await current_agent.process_query(user_query=user_message)
 
727
 
728
  bot_message_for_display = "Error: Agent returned an unexpected response." # Default
729
+ agent_text_response = None
730
+ agent_image_path = None
731
+
732
  if isinstance(ai_response_dict, dict):
733
+ agent_text_response = ai_response_dict.get("text")
734
+ agent_image_path = ai_response_dict.get("image_path")
735
+
736
+ logging.info(f"Agent response: text='{str(agent_text_response)[:100]}...', image_path='{agent_image_path}'")
737
+
738
+ if agent_image_path and os.path.exists(agent_image_path):
739
+ # If there's an image, display it. Text can accompany it or be None.
740
+ # Ensure text_response is a string or None for the tuple.
741
+ text_for_display = agent_text_response if isinstance(agent_text_response, str) else None
742
+ if not text_for_display and agent_text_response is not None: # e.g. if text_response was empty string
743
+ text_for_display = str(agent_text_response)
744
+
745
+ bot_message_for_display = (text_for_display, agent_image_path)
746
+ logging.info(f"Displaying image: {agent_image_path} with text: {text_for_display}")
747
+ elif agent_text_response is not None: # Text only, or image path was invalid
748
+ bot_message_for_display = agent_text_response
749
+ if agent_image_path: # Log if image path was given but not valid
750
+ logging.warning(f"Agent provided image_path '{agent_image_path}' but it does not exist or was not used.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
751
  else:
752
+ bot_message_for_display = "L'agente ha risposto, ma il contenuto non è visualizzabile."
753
+ logging.warning(f"AI response dict issue. Text: {agent_text_response}, Image Path: {agent_image_path}")
754
 
755
+ elif isinstance(ai_response_dict, str):
756
  bot_message_for_display = ai_response_dict
757
+ logging.warning(f"AI response was a plain string: {ai_response_dict}")
758
+ else:
759
  bot_message_for_display = f"Error: Agent returned an unexpected data type: {type(ai_response_dict)}."
760
  logging.error(f"Unexpected AI response type: {type(ai_response_dict)}, content: {ai_response_dict}")
761
+
762
+ # Use pending_history which already has the user message with None for bot response
763
+ updated_history = pending_history[:-1] + [[user_message, bot_message_for_display]]
764
 
765
  status_update_msg = "Stato Agente: Risposta ricevuta."
766
+ return updated_history, updated_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display)
767
+
768
  except Exception as e:
769
  logging.error(f"Error during EB Agent processing: {e}", exc_info=True)
770
  error_msg_for_chat = f"# Errore dell'Agente AI:\n{type(e).__name__}: {str(e)}"
771
+ # Use pending_history which already has the user message
772
+ updated_history = pending_history[:-1] + [[user_message, error_msg_for_chat]]
773
  status_update_msg = f"Stato Agente: Errore - {type(e).__name__}"
774
+ return updated_history, updated_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display)
775
 
776
 
777
  def clear_eb_agent_chat_history():
778
  initial_msg = "Ciao! Sono il tuo Agente AI per l'Employer Branding. Come posso aiutarti?" if EB_AGENT_AVAILABLE else "Agente AI non disponibile."
779
+ # Ensure all outputs are updated
780
+ return [[None, initial_msg]], [[None, initial_msg]], "Stato Agente: Chat resettata.", gr.update(value="Gli schemi dei dati verranno mostrati qui...")
781
+
782
 
783
  # Connect UI to Handler for EB Agent
784
  eb_agent_submit_btn.click(