GuglielmoTor commited on
Commit
6267007
·
verified ·
1 Parent(s): a8848c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -31
app.py CHANGED
@@ -652,28 +652,31 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
652
 
653
  async def handle_eb_agent_chat(user_message: str, chat_history_list: list, current_token_state: dict):
654
  # Expected outputs: [eb_agent_chatbot_ui, eb_agent_chat_history_st, eb_agent_chat_input_ui, eb_agent_status_md, eb_agent_schema_display_md]
655
- # (5 components)
656
-
657
  if not EB_AGENT_AVAILABLE or not os.getenv('GEMINI_API_KEY'):
658
  no_key_msg = "L'Agente AI non è disponibile. Assicurati che GEMINI_API_KEY sia configurata."
659
- chat_history_list.append([user_message, no_key_msg])
660
- # Yield updates for all 5 components
661
- yield chat_history_list, chat_history_list, gr.update(value=""), gr.update(value=no_key_msg), gr.update(value="Nessuno schema disponibile.")
 
662
  return
663
 
 
 
664
  if not user_message.strip():
665
- # Yield updates for all 5 components
666
- yield chat_history_list, chat_history_list, gr.update(value=""), gr.update(value="Stato Agente: Per favore, inserisci una domanda."), gr.update() # No change to schema display
667
  return
668
 
669
  status_update_msg = "Stato Agente: Elaborazione della tua richiesta..."
670
  # Show user message immediately, update status
671
- yield chat_history_list + [[user_message, None]], chat_history_list + [[user_message, None]], gr.update(value=""), gr.update(value=status_update_msg), gr.update()
 
 
672
 
673
  # Prepare DataFrames for the agent
674
  df_follower_stats = current_token_state.get("bubble_follower_stats_df", pd.DataFrame())
675
- df_posts = current_token_state.get("bubble_posts_df", pd.DataFrame())
676
- df_post_stats = current_token_state.get("bubble_post_stats_df", pd.DataFrame())
677
  df_mentions = current_token_state.get("bubble_mentions_df", pd.DataFrame())
678
 
679
  dataframes_for_agent = {
@@ -684,13 +687,13 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
684
  }
685
 
686
  schemas_text_for_display = "Schemi DataFrames inviati all'Agente:\n\n"
687
- from eb_agent_module import get_all_schemas_representation
688
- schemas_text_for_display += get_all_schemas_representation(dataframes_for_agent)
689
  max_schema_display_len = 1500
690
  if len(schemas_text_for_display) > max_schema_display_len:
691
  schemas_text_for_display = schemas_text_for_display[:max_schema_display_len] + "\n...(schemi troncati per la visualizzazione)"
692
 
693
- current_agent = EmployerBrandingAgent(
694
  llm_model_name=EB_AGENT_LLM_MODEL,
695
  generation_config_dict=EB_AGENT_GEN_CONFIG,
696
  safety_settings_list=EB_AGENT_SAFETY_SETTINGS,
@@ -699,44 +702,96 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
699
  embedding_model_name=EB_AGENT_EMBEDDING_MODEL
700
  )
701
 
702
- # Initialize the agent's chat history with previous conversation
703
  agent_internal_history = []
704
- for user_q, ai_r in chat_history_list:
705
  if user_q: agent_internal_history.append({"role": "user", "content": user_q})
706
- if ai_r: agent_internal_history.append({"role": "model", "content": ai_r})
707
-
708
- # ADD THE CURRENT USER MESSAGE TO THE AGENT'S HISTORY - THIS WAS MISSING!
 
 
 
 
 
 
 
 
709
  agent_internal_history.append({"role": "user", "content": user_message})
710
  current_agent.chat_history = agent_internal_history
711
-
712
  try:
713
- # Initialize the agent first
714
  init_success = await current_agent.initialize()
715
  if not init_success:
716
  error_msg = "Errore: Impossibile inizializzare l'agente AI."
717
- updated_history = chat_history_list + [[user_message, error_msg]]
718
  yield updated_history, updated_history, gr.update(value=""), gr.update(value="Stato Agente: Errore di inizializzazione"), gr.update(value=schemas_text_for_display)
719
  return
720
 
721
  logging.info(f"Sending to EB Agent. User: '{user_message}'. DF Keys: {list(dataframes_for_agent.keys())}")
722
- ai_response = await current_agent.process_query(user_query=user_message)
 
723
 
724
- # Update the chat history with the AI response
725
- updated_history = chat_history_list + [[user_message, ai_response]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
726
 
727
  status_update_msg = "Stato Agente: Risposta ricevuta."
728
- # Yield final updates for all 5 components
729
  yield updated_history, updated_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display)
730
 
731
  except Exception as e:
732
  logging.error(f"Error during EB Agent processing: {e}", exc_info=True)
733
- error_msg = f"# Errore dell'Agente AI:\n{type(e).__name__}: {str(e)}"
734
-
735
- # Update history with error message
736
- updated_history = chat_history_list + [[user_message, error_msg]]
737
-
738
  status_update_msg = f"Stato Agente: Errore - {type(e).__name__}"
739
- # Yield error updates for all 5 components
740
  yield updated_history, updated_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display)
741
 
742
 
 
652
 
653
  async def handle_eb_agent_chat(user_message: str, chat_history_list: list, current_token_state: dict):
654
  # Expected outputs: [eb_agent_chatbot_ui, eb_agent_chat_history_st, eb_agent_chat_input_ui, eb_agent_status_md, eb_agent_schema_display_md]
655
+
 
656
  if not EB_AGENT_AVAILABLE or not os.getenv('GEMINI_API_KEY'):
657
  no_key_msg = "L'Agente AI non è disponibile. Assicurati che GEMINI_API_KEY sia configurata."
658
+ # Ensure chat_history_list is mutable if it comes from gr.State
659
+ current_chat_history = list(chat_history_list) if chat_history_list else []
660
+ current_chat_history.append([user_message, no_key_msg])
661
+ yield current_chat_history, current_chat_history, gr.update(value=""), gr.update(value=no_key_msg), gr.update(value="Nessuno schema disponibile.")
662
  return
663
 
664
+ current_chat_history = list(chat_history_list) if chat_history_list else []
665
+
666
  if not user_message.strip():
667
+ yield current_chat_history, current_chat_history, gr.update(value=""), gr.update(value="Stato Agente: Per favore, inserisci una domanda."), gr.update() # No change to schema display
 
668
  return
669
 
670
  status_update_msg = "Stato Agente: Elaborazione della tua richiesta..."
671
  # Show user message immediately, update status
672
+ # Add user message to current history before yielding
673
+ pending_history = current_chat_history + [[user_message, None]]
674
+ yield pending_history, pending_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update()
675
 
676
  # Prepare DataFrames for the agent
677
  df_follower_stats = current_token_state.get("bubble_follower_stats_df", pd.DataFrame())
678
+ df_posts = current_token_state.get("bubble_posts_df", pd.DataFrame())
679
+ df_post_stats = current_token_state.get("bubble_post_stats_df", pd.DataFrame())
680
  df_mentions = current_token_state.get("bubble_mentions_df", pd.DataFrame())
681
 
682
  dataframes_for_agent = {
 
687
  }
688
 
689
  schemas_text_for_display = "Schemi DataFrames inviati all'Agente:\n\n"
690
+ # from eb_agent_module import get_all_schemas_representation # Assuming this is correctly imported in your main file
691
+ schemas_text_for_display += get_all_schemas_representation(dataframes_for_agent) # Using the mock or your actual function
692
  max_schema_display_len = 1500
693
  if len(schemas_text_for_display) > max_schema_display_len:
694
  schemas_text_for_display = schemas_text_for_display[:max_schema_display_len] + "\n...(schemi troncati per la visualizzazione)"
695
 
696
+ current_agent = EmployerBrandingAgent( # Using the mock or your actual class
697
  llm_model_name=EB_AGENT_LLM_MODEL,
698
  generation_config_dict=EB_AGENT_GEN_CONFIG,
699
  safety_settings_list=EB_AGENT_SAFETY_SETTINGS,
 
702
  embedding_model_name=EB_AGENT_EMBEDDING_MODEL
703
  )
704
 
 
705
  agent_internal_history = []
706
+ for user_q, ai_r_obj in current_chat_history: # Iterate over the current history being built
707
  if user_q: agent_internal_history.append({"role": "user", "content": user_q})
708
+ # ai_r_obj could be string, tuple (text, image_url), or None
709
+ if ai_r_obj:
710
+ if isinstance(ai_r_obj, tuple):
711
+ # If it's a (text, image_url) tuple, take the text part for agent's history
712
+ # Or combine them if your agent can handle it. For simplicity, just text.
713
+ text_for_agent_history = ai_r_obj[0] if ai_r_obj[0] else "Visual media displayed."
714
+ agent_internal_history.append({"role": "model", "content": text_for_agent_history})
715
+ elif isinstance(ai_r_obj, str):
716
+ agent_internal_history.append({"role": "model", "content": ai_r_obj})
717
+
718
+ # ADD THE CURRENT USER MESSAGE TO THE AGENT'S HISTORY
719
  agent_internal_history.append({"role": "user", "content": user_message})
720
  current_agent.chat_history = agent_internal_history
721
+
722
  try:
 
723
  init_success = await current_agent.initialize()
724
  if not init_success:
725
  error_msg = "Errore: Impossibile inizializzare l'agente AI."
726
+ updated_history = current_chat_history + [[user_message, error_msg]]
727
  yield updated_history, updated_history, gr.update(value=""), gr.update(value="Stato Agente: Errore di inizializzazione"), gr.update(value=schemas_text_for_display)
728
  return
729
 
730
  logging.info(f"Sending to EB Agent. User: '{user_message}'. DF Keys: {list(dataframes_for_agent.keys())}")
731
+ # ai_response_dict is what the agent returns. Based on error, it's {'text': 'blob...'}
732
+ ai_response_dict = await current_agent.process_query(user_query=user_message)
733
 
734
+ bot_message_for_display = "Error: Agent returned an unexpected response." # Default
735
+
736
+ if isinstance(ai_response_dict, dict):
737
+ combined_message_blob = ai_response_dict.get("text")
738
+
739
+ if isinstance(combined_message_blob, str):
740
+ text_part = combined_message_blob
741
+ image_data_url = None
742
+
743
+ # Attempt to parse image data URL from the combined_message_blob
744
+ # This assumes the image data URL, if present, is on its own line or at the end.
745
+ lines = combined_message_blob.splitlines()
746
+ if lines:
747
+ possible_image_prefixes = [
748
+ "data:image/png;base64,",
749
+ "data:image/jpeg;base64,",
750
+ "data:image/gif;base64,",
751
+ "data:image/webp;base64,"
752
+ ]
753
+ # Check lines from the end, as plot is likely at the end of the message
754
+ for i in range(len(lines) - 1, -1, -1):
755
+ current_line = lines[i].strip()
756
+ for prefix in possible_image_prefixes:
757
+ if current_line.startswith(prefix):
758
+ # Basic validation: check for typical base64 characters and some length
759
+ # This is a heuristic to ensure it's likely a valid base64 data string
760
+ if len(current_line) > len(prefix) + 20 and \
761
+ all(c in "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=" for c in current_line[len(prefix):]):
762
+ image_data_url = current_line
763
+ # Reconstruct text_part from lines *before* this image line
764
+ text_part = "\n".join(lines[:i]).strip()
765
+ break # Found image prefix
766
+ if image_data_url:
767
+ break # Found image line
768
+
769
+ if image_data_url:
770
+ # If text_part became empty after extracting image, use None for text in tuple
771
+ bot_message_for_display = (text_part if text_part else None, image_data_url)
772
+ else:
773
+ # No image found or parsing failed, treat the whole blob as text
774
+ bot_message_for_display = combined_message_blob
775
+ else:
776
+ bot_message_for_display = "Agent returned a dictionary, but the 'text' field was not a string or was missing."
777
+ logging.warning(f"AI response dict 'text' field issue. Dict: {ai_response_dict}")
778
+
779
+ elif isinstance(ai_response_dict, str): # Agent returned a plain string
780
+ bot_message_for_display = ai_response_dict
781
+ else: # Fallback for other unexpected types
782
+ bot_message_for_display = f"Error: Agent returned an unexpected data type: {type(ai_response_dict)}."
783
+ logging.error(f"Unexpected AI response type: {type(ai_response_dict)}, content: {ai_response_dict}")
784
+
785
+ updated_history = current_chat_history + [[user_message, bot_message_for_display]]
786
 
787
  status_update_msg = "Stato Agente: Risposta ricevuta."
 
788
  yield updated_history, updated_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display)
789
 
790
  except Exception as e:
791
  logging.error(f"Error during EB Agent processing: {e}", exc_info=True)
792
+ error_msg_for_chat = f"# Errore dell'Agente AI:\n{type(e).__name__}: {str(e)}"
793
+ updated_history = current_chat_history + [[user_message, error_msg_for_chat]]
 
 
 
794
  status_update_msg = f"Stato Agente: Errore - {type(e).__name__}"
 
795
  yield updated_history, updated_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display)
796
 
797