GuglielmoTor commited on
Commit
65551e2
·
verified ·
1 Parent(s): f458876

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -249
app.py CHANGED
@@ -1,6 +1,4 @@
1
  # app.py
2
- # (Showing relevant parts that need modification)
3
-
4
  import gradio as gr
5
  import pandas as pd
6
  import os
@@ -12,7 +10,7 @@ import time # For profiling if needed
12
  from datetime import datetime, timedelta # Added timedelta
13
  import numpy as np
14
  from collections import OrderedDict # To maintain section order
15
- import asyncio # For async operations with the new agent
16
 
17
  # --- Module Imports ---
18
  from gradio_utils import get_url_user_token
@@ -38,37 +36,13 @@ from chatbot_prompts import get_initial_insight_prompt_and_suggestions # MODIFIE
38
  from chatbot_handler import generate_llm_response
39
  # --- END EXISTING CHATBOT MODULE IMPORTS ---
40
 
41
- # --- NEW EMPLOYER BRANDING AGENT MODULE IMPORTS ---
42
- try:
43
- from eb_agent_module import (
44
- EmployerBrandingAgent,
45
- GENERATION_CONFIG_PARAMS as EB_AGENT_GEN_CONFIG, # Rename to avoid conflict
46
- LLM_MODEL_NAME as EB_AGENT_LLM_MODEL, # Rename
47
- GEMINI_EMBEDDING_MODEL_NAME as EB_AGENT_EMBEDDING_MODEL, # Rename
48
- DEFAULT_SAFETY_SETTINGS as EB_AGENT_SAFETY_SETTINGS, # Import safety settings
49
- get_all_schemas_representation
50
- )
51
- EB_AGENT_AVAILABLE = True
52
- logging.info("Successfully imported EmployerBrandingAgent module.")
53
- except ImportError as e:
54
- logging.error(f"Failed to import EmployerBrandingAgent module: {e}", exc_info=True)
55
- EB_AGENT_AVAILABLE = False
56
- # Define dummy classes/variables if import fails, so app can still run
57
- class EmployerBrandingAgent:
58
- def __init__(self, *args, **kwargs): logging.error("EB Agent Dummy Class Initialized")
59
- async def process_query(self, query, **kwargs): return "# Error: Employer Branding Agent module not loaded."
60
- def update_dataframes(self, dfs): pass
61
- def clear_chat_history(self): pass
62
- EB_AGENT_GEN_CONFIG, EB_AGENT_LLM_MODEL, EB_AGENT_EMBEDDING_MODEL, EB_AGENT_SAFETY_SETTINGS = {}, None, None, pd.DataFrame(), {}
63
-
64
-
65
  # Configure logging
66
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(module)s - %(message)s')
67
 
68
 
69
  # --- Gradio UI Blocks ---
70
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
71
- title="LinkedIn Organization Dashboard") as app:
72
  token_state = gr.State(value={
73
  "token": None, "client_id": None, "org_urn": None,
74
  "bubble_posts_df": pd.DataFrame(), "bubble_post_stats_df": pd.DataFrame(),
@@ -84,11 +58,6 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
84
  current_chat_plot_id_st = gr.State(None)
85
  plot_data_for_chatbot_st = gr.State({})
86
 
87
- # --- NEW: States for Employer Branding Agent Tab ---
88
- eb_agent_chat_history_st = gr.State([])
89
- # The agent instance itself will be created on-the-fly or managed if complex state is needed.
90
- # For now, we'll re-initialize it with fresh data in the handler.
91
-
92
  gr.Markdown("# 🚀 LinkedIn Organization Dashboard")
93
  url_user_token_display = gr.Textbox(label="User Token (Nascosto)", interactive=False, visible=False)
94
  status_box = gr.Textbox(label="Stato Generale Token LinkedIn", interactive=False, value="Inizializzazione...")
@@ -337,8 +306,8 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
337
  new_explored_plot_id_to_set
338
  ]
339
 
340
- final_updates.extend(generated_panel_vis_updates)
341
- final_updates.extend(generated_bomb_btn_updates)
342
  final_updates.extend(generated_formula_btn_updates)
343
  final_updates.extend(generated_explore_btn_updates)
344
  final_updates.extend(section_title_vis_updates)
@@ -610,214 +579,6 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
610
  show_progress="full"
611
  )
612
 
613
- # --- NEW: Tab 5 for Employer Branding Agent ---
614
- with gr.TabItem("5️⃣ Agente AI Employer Branding", id="tab_eb_agent"):
615
- gr.Markdown("## 🤖 Interagisci con l'Agente AI per l'Employer Branding")
616
-
617
- if not EB_AGENT_AVAILABLE:
618
- gr.Markdown("<p style='color:red;font-weight:bold;'>Attenzione: Il modulo dell'Agente AI per l'Employer Branding non è stato caricato correttamente. Controllare i log e l'installazione della libreria `google-generativeai` e la variabile d'ambiente `GEMINI_API_KEY`.</p>")
619
- elif not os.getenv('GEMINI_API_KEY'):
620
- gr.Markdown("<p style='color:orange;font-weight:bold;'>Attenzione: La variabile d'ambiente `GEMINI_API_KEY` non è impostata. Le funzionalità dell'Agente AI saranno limitate o non funzioneranno.</p>")
621
-
622
-
623
- gr.Markdown(
624
- "Fai domande sui tuoi dati LinkedIn (statistiche follower, post e menzioni) per ottenere insights e codice Pandas per analizzarli. "
625
- "L'agente utilizza i dati attualmente disponibili nello stato dell'applicazione."
626
- )
627
- with gr.Row():
628
- with gr.Column(scale=2):
629
- eb_agent_chatbot_ui = gr.Chatbot(
630
- label="Chat con Agente AI EB",
631
- value=[[None, "Ciao! Sono il tuo Agente AI per l'Employer Branding. Come posso aiutarti ad analizzare i tuoi dati LinkedIn oggi? Chiedimi di generare codice Pandas o di fornire insights."]] if EB_AGENT_AVAILABLE else [[None, "Agente AI non disponibile."]],
632
- bubble_full_width=False,
633
- height=500,
634
- placeholder="L'Agente AI è pronto. Chiedi pure..."
635
- )
636
- eb_agent_chat_input_ui = gr.Textbox(
637
- label="La tua domanda:",
638
- placeholder="Es: 'Mostrami le aziende dei miei follower nel settore tecnologico' o 'Qual è il sentiment medio delle mie menzioni?'",
639
- lines=3,
640
- interactive=EB_AGENT_AVAILABLE # Disable if agent not available
641
- )
642
- with gr.Row():
643
- eb_agent_submit_btn = gr.Button("💬 Invia Messaggio", variant="primary", interactive=EB_AGENT_AVAILABLE)
644
- eb_agent_clear_btn = gr.Button("🗑️ Cancella Chat", variant="stop", interactive=EB_AGENT_AVAILABLE)
645
- with gr.Column(scale=1):
646
- gr.Markdown("#### Schemi Dati Disponibili per l'Agente:")
647
- eb_agent_schema_display_md = gr.Markdown("Gli schemi dei dati (follower, post, menzioni) verranno mostrati qui quando l'agente viene inizializzato con una query.")
648
- eb_agent_status_md = gr.Markdown("Stato Agente: In attesa di input...")
649
-
650
- # --- NEW: Handler for Employer Branding Agent Chat ---
651
- eb_agent_instance_dict = {"agent": None} # To store agent instance across calls if needed, or re-init
652
-
653
- async def handle_eb_agent_chat(user_message: str, chat_history_list: list, current_token_state: dict):
654
- current_chat_history = list(chat_history_list) if chat_history_list else []
655
-
656
- if not EB_AGENT_AVAILABLE or not os.getenv('GEMINI_API_KEY'):
657
- no_key_msg = "L'Agente AI non è disponibile. Assicurati che GEMINI_API_KEY sia configurata e che l'agente sia caricato."
658
- current_chat_history.append([user_message, no_key_msg])
659
- # Ensure all outputs are updated
660
- yield current_chat_history, current_chat_history, gr.update(value=""), gr.update(value=no_key_msg), gr.update(value="Nessuno schema disponibile.")
661
- return
662
-
663
- if not user_message.strip():
664
- # Ensure all outputs are updated even for empty message
665
- yield current_chat_history, current_chat_history, gr.update(value=""), gr.update(value="Stato Agente: Per favore, inserisci una domanda."), gr.update()
666
- return
667
-
668
- status_update_msg = "Stato Agente: Elaborazione della tua richiesta..."
669
- pending_history = current_chat_history + [[user_message, None]]
670
- # Yield intermediate state to show user message immediately
671
- yield pending_history, pending_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update()
672
-
673
- # Prepare DataFrames for the agent
674
- df_follower_stats = current_token_state.get("bubble_follower_stats_df", pd.DataFrame())
675
- df_posts = current_token_state.get("bubble_posts_df", pd.DataFrame())
676
- df_post_stats = current_token_state.get("bubble_post_stats_df", pd.DataFrame())
677
- df_mentions = current_token_state.get("bubble_mentions_df", pd.DataFrame())
678
-
679
- dataframes_for_agent = {
680
- "follower_stats": df_follower_stats.copy() if not df_follower_stats.empty else pd.DataFrame(columns=['no_data_follower_stats']),
681
- "posts": df_posts.copy() if not df_posts.empty else pd.DataFrame(columns=['no_data_posts']),
682
- "post_stats": df_post_stats.copy() if not df_post_stats.empty else pd.DataFrame(columns=['no_data_post_stats']),
683
- "mentions": df_mentions.copy() if not df_mentions.empty else pd.DataFrame(columns=['no_data_mentions'])
684
- }
685
-
686
- schemas_text_for_display = "Schemi DataFrames inviati all'Agente:\n\n"
687
- schemas_text_for_display += get_all_schemas_representation(dataframes_for_agent)
688
- max_schema_display_len = 1500
689
- if len(schemas_text_for_display) > max_schema_display_len:
690
- schemas_text_for_display = schemas_text_for_display[:max_schema_display_len] + "\n...(schemi troncati per la visualizzazione)"
691
-
692
- # Update schema display before agent call
693
- yield pending_history, pending_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display)
694
-
695
- current_agent = EmployerBrandingAgent(
696
- llm_model_name="gemini-2.5-flash-preview-05-20", # Replace with your actual EB_AGENT_LLM_MODEL
697
- generation_config_dict={}, # Replace with your actual EB_AGENT_GEN_CONFIG
698
- safety_settings_list=[], # Replace with your actual EB_AGENT_SAFETY_SETTINGS
699
- all_dataframes=dataframes_for_agent,
700
- embedding_model_name="gemini-embedding-exp-03-07" # Replace with your actual EB_AGENT_EMBEDDING_MODEL
701
- )
702
-
703
- agent_internal_history = []
704
- for user_q, ai_r_obj in current_chat_history: # Iterate over the current history being built
705
- if user_q: agent_internal_history.append({"role": "user", "content": user_q})
706
- # ai_r_obj could be string, tuple (text, image_url), or None
707
- if ai_r_obj:
708
- if isinstance(ai_r_obj, tuple):
709
- # If it's a (text, image_url) tuple, take the text part for agent's history
710
- # Or combine them if your agent can handle it. For simplicity, just text.
711
- text_for_agent_history = ai_r_obj[0] if ai_r_obj[0] else "Visual media displayed."
712
- agent_internal_history.append({"role": "model", "content": text_for_agent_history})
713
- elif isinstance(ai_r_obj, str):
714
- agent_internal_history.append({"role": "model", "content": ai_r_obj})
715
-
716
- # ADD THE CURRENT USER MESSAGE TO THE AGENT'S HISTORY
717
- agent_internal_history.append({"role": "user", "content": user_message})
718
- current_agent.chat_history = agent_internal_history
719
-
720
- try:
721
- init_success = await current_agent.initialize()
722
- if not init_success:
723
- error_msg = "Errore: Impossibile inizializzare l'agente AI."
724
- # Use pending_history which already has the user message
725
- updated_history = pending_history[:-1] + [[user_message, error_msg]] # Replace None with error
726
- yield updated_history, updated_history, gr.update(value=""), gr.update(value="Stato Agente: Errore di inizializzazione"), gr.update(value=schemas_text_for_display)
727
- return
728
-
729
- logging.info(f"Sending to EB Agent. User: '{user_message}'. DF Keys: {list(dataframes_for_agent.keys())}")
730
- ai_response_dict = await current_agent.process_query(user_query=user_message)
731
-
732
- bot_message_for_display = "Error: Agent returned an unexpected response." # Default
733
- agent_text_response = None
734
- agent_image_path = None
735
- bot_response_list_for_history = []
736
-
737
- if isinstance(ai_response_dict, dict):
738
- agent_text_response = ai_response_dict.get("text")
739
- agent_image_path = ai_response_dict.get("image_path")
740
-
741
- logging.info(f"Agent response: text='{str(agent_text_response)[:100]}...', image_path='{agent_image_path}'")
742
-
743
- if agent_image_path and os.path.exists(agent_image_path):
744
- text_for_display = str(agent_text_response) if agent_text_response is not None else ""
745
-
746
- if text_for_display.strip(): # If there's actual text, add it as the first bot message
747
- bot_response_list_for_history.append(text_for_display)
748
- logging.info(f"Adding text message to chat: '{text_for_display[:100]}...'")
749
-
750
- # Add the image as a second bot message (or first if no text)
751
- # Using a simple caption for the image tuple
752
- bot_response_list_for_history.append(( "📊 Generated Chart", agent_image_path))
753
- logging.info(f"Adding image message to chat: {agent_image_path}")
754
- status_update_msg = "Stato Agente: Risposta con grafico ricevuta."
755
-
756
- elif agent_text_response is not None:
757
- bot_response_list_for_history.append(str(agent_text_response))
758
- if agent_image_path:
759
- logging.warning(f"Agent provided image_path '{agent_image_path}' but it does not exist or was not used.")
760
- status_update_msg = "Stato Agente: Risposta testuale ricevuta."
761
- else:
762
- bot_response_list_for_history.append("L'agente ha risposto, ma il contenuto non è visualizzabile.")
763
- logging.warning(f"AI response dict issue. Text: {agent_text_response}, Image Path: {agent_image_path}")
764
- status_update_msg = "Stato Agente: Risposta con problemi."
765
-
766
- elif isinstance(ai_response_dict, str):
767
- bot_response_list_for_history.append(ai_response_dict)
768
- logging.warning(f"AI response was a plain string: {ai_response_dict}")
769
- status_update_msg = "Stato Agente: Risposta testuale ricevuta."
770
- else:
771
- bot_response_list_for_history.append(f"Error: Agent returned an unexpected data type: {type(ai_response_dict)}.")
772
- logging.error(f"Unexpected AI response type: {type(ai_response_dict)}, content: {ai_response_dict}")
773
- status_update_msg = "Stato Agente: Errore nella risposta."
774
-
775
- # Construct the final history
776
- # pending_history already has [user_message, None]
777
- # We replace the None with the first bot response, then append any subsequent bot responses
778
- final_updated_history = pending_history[:-1] # Remove the [user_message, None]
779
- final_updated_history.append([user_message, bot_response_list_for_history[0] if bot_response_list_for_history else "No response content."])
780
- if len(bot_response_list_for_history) > 1:
781
- for bot_msg_item in bot_response_list_for_history[1:]:
782
- final_updated_history.append([None, bot_msg_item]) # Subsequent messages from bot have None for user part
783
-
784
- yield final_updated_history, final_updated_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display)
785
- return
786
-
787
- except Exception as e:
788
- logging.error(f"Error during EB Agent processing: {e}", exc_info=True)
789
- error_msg_for_chat = f"# Errore dell'Agente AI:\n{type(e).__name__}: {str(e)}"
790
- updated_history = pending_history[:-1] + [[user_message, error_msg_for_chat]]
791
- status_update_msg = f"Stato Agente: Errore - {type(e).__name__}"
792
- yield updated_history, updated_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display)
793
- return
794
-
795
- def clear_eb_agent_chat_history():
796
- initial_msg = "Ciao! Sono il tuo Agente AI per l'Employer Branding. Come posso aiutarti?" if EB_AGENT_AVAILABLE else "Agente AI non disponibile."
797
- # Ensure all outputs are updated
798
- return [[None, initial_msg]], [[None, initial_msg]], "Stato Agente: Chat resettata.", gr.update(value="Gli schemi dei dati verranno mostrati qui...")
799
-
800
-
801
- # Connect UI to Handler for EB Agent
802
- eb_agent_submit_btn.click(
803
- fn=handle_eb_agent_chat,
804
- inputs=[eb_agent_chat_input_ui, eb_agent_chat_history_st, token_state],
805
- outputs=[eb_agent_chatbot_ui, eb_agent_chat_history_st, eb_agent_chat_input_ui, eb_agent_status_md, eb_agent_schema_display_md],
806
- api_name="eb_agent_chat_submit"
807
- )
808
- eb_agent_chat_input_ui.submit(
809
- fn=handle_eb_agent_chat,
810
- inputs=[eb_agent_chat_input_ui, eb_agent_chat_history_st, token_state],
811
- outputs=[eb_agent_chatbot_ui, eb_agent_chat_history_st, eb_agent_chat_input_ui, eb_agent_status_md, eb_agent_schema_display_md],
812
- api_name="eb_agent_chat_enter"
813
- )
814
- eb_agent_clear_btn.click(
815
- fn=clear_eb_agent_chat_history,
816
- inputs=[],
817
- outputs=[eb_agent_chatbot_ui, eb_agent_chat_history_st, eb_agent_status_md],
818
- api_name="eb_agent_clear_chat"
819
- )
820
-
821
 
822
  # --- Sync Events (at the end of the app's 'with gr.Blocks()' context) ---
823
  sync_event_part1 = sync_data_btn.click(fn=sync_all_linkedin_data_orchestrator, inputs=[token_state], outputs=[sync_status_html_output, token_state], show_progress="full")
@@ -836,13 +597,7 @@ if __name__ == "__main__":
836
  if not all(os.environ.get(var) for var in [BUBBLE_APP_NAME_ENV_VAR, BUBBLE_API_KEY_PRIVATE_ENV_VAR, BUBBLE_API_ENDPOINT_ENV_VAR]):
837
  logging.warning("ATTENZIONE: Variabili Bubble non impostate.")
838
 
839
- if not EB_AGENT_AVAILABLE:
840
- logging.error("L'Agente AI per l'Employer Branding non è disponibile a causa di errori di importazione.")
841
- elif not os.getenv('GEMINI_API_KEY'):
842
- logging.warning("ATTENZIONE: GEMINI_API_KEY non è impostata. L'Agente AI per l'Employer Branding potrebbe non funzionare.")
843
-
844
  try: logging.info(f"Matplotlib: {matplotlib.__version__}, Backend: {matplotlib.get_backend()}")
845
  except ImportError: logging.warning("Matplotlib non trovato.")
846
 
847
  app.launch(server_name="0.0.0.0", server_port=7860, debug=True)
848
-
 
1
  # app.py
 
 
2
  import gradio as gr
3
  import pandas as pd
4
  import os
 
10
  from datetime import datetime, timedelta # Added timedelta
11
  import numpy as np
12
  from collections import OrderedDict # To maintain section order
13
+ import asyncio # For async operations
14
 
15
  # --- Module Imports ---
16
  from gradio_utils import get_url_user_token
 
36
  from chatbot_handler import generate_llm_response
37
  # --- END EXISTING CHATBOT MODULE IMPORTS ---
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  # Configure logging
40
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(module)s - %(message)s')
41
 
42
 
43
  # --- Gradio UI Blocks ---
44
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
45
+ title="LinkedIn Organization Dashboard") as app:
46
  token_state = gr.State(value={
47
  "token": None, "client_id": None, "org_urn": None,
48
  "bubble_posts_df": pd.DataFrame(), "bubble_post_stats_df": pd.DataFrame(),
 
58
  current_chat_plot_id_st = gr.State(None)
59
  plot_data_for_chatbot_st = gr.State({})
60
 
 
 
 
 
 
61
  gr.Markdown("# 🚀 LinkedIn Organization Dashboard")
62
  url_user_token_display = gr.Textbox(label="User Token (Nascosto)", interactive=False, visible=False)
63
  status_box = gr.Textbox(label="Stato Generale Token LinkedIn", interactive=False, value="Inizializzazione...")
 
306
  new_explored_plot_id_to_set
307
  ]
308
 
309
+ final_updates.extend(generated_panel_vis_updates)
310
+ final_updates.extend(generated_bomb_btn_updates)
311
  final_updates.extend(generated_formula_btn_updates)
312
  final_updates.extend(generated_explore_btn_updates)
313
  final_updates.extend(section_title_vis_updates)
 
579
  show_progress="full"
580
  )
581
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
582
 
583
  # --- Sync Events (at the end of the app's 'with gr.Blocks()' context) ---
584
  sync_event_part1 = sync_data_btn.click(fn=sync_all_linkedin_data_orchestrator, inputs=[token_state], outputs=[sync_status_html_output, token_state], show_progress="full")
 
597
  if not all(os.environ.get(var) for var in [BUBBLE_APP_NAME_ENV_VAR, BUBBLE_API_KEY_PRIVATE_ENV_VAR, BUBBLE_API_ENDPOINT_ENV_VAR]):
598
  logging.warning("ATTENZIONE: Variabili Bubble non impostate.")
599
 
 
 
 
 
 
600
  try: logging.info(f"Matplotlib: {matplotlib.__version__}, Backend: {matplotlib.get_backend()}")
601
  except ImportError: logging.warning("Matplotlib non trovato.")
602
 
603
  app.launch(server_name="0.0.0.0", server_port=7860, debug=True)