# app.py import gradio as gr import pandas as pd import os import logging import matplotlib matplotlib.use('Agg') # Set backend for Matplotlib to avoid GUI conflicts with Gradio # import time # No longer directly used here for profiling from datetime import datetime, timedelta # import numpy as np # No longer directly used here # from collections import OrderedDict, defaultdict # Moved or not needed directly # --- Module Imports --- from utils.gradio_utils import get_url_user_token from config import ( LINKEDIN_CLIENT_ID_ENV_VAR, BUBBLE_APP_NAME_ENV_VAR, BUBBLE_API_KEY_PRIVATE_ENV_VAR, BUBBLE_API_ENDPOINT_ENV_VAR) # PLOT_ID_TO_FORMULA_KEY_MAP moved from services.state_manager import process_and_store_bubble_token from services.sync_logic import sync_all_linkedin_data_orchestrator from ui.ui_generators import display_main_dashboard # Other UI generators moved or used internally by new modules # --- NEW UI MODULE IMPORTS --- from ui import analytics_tab from ui import agentic_module # --- EXISTING CHATBOT MODULE IMPORTS (used by analytics_tab) --- # from features.chatbot.chatbot_prompts import get_initial_insight_prompt_and_suggestions # Used in analytics_tab # from features.chatbot.chatbot_handler import generate_llm_response # Used in analytics_tab # --- AGENTIC PIPELINE IMPORTS (used by agentic_module) --- # AGENTIC_MODULES_LOADED is handled within agentic_module.py # Configure logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(module)s - %(message)s') # 1. Set Vertex AI usage preference (if applicable) os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "False" # 2. Get your API key user_provided_api_key = os.environ.get("GEMINI_API_KEY") if user_provided_api_key: os.environ["GOOGLE_API_KEY"] = user_provided_api_key logging.info("GOOGLE_API_KEY environment variable has been set from GEMINI_API_KEY.") else: logging.error(f"CRITICAL ERROR: The API key environment variable 'GEMINI_API_KEY' was not found.") # --- Gradio UI Blocks --- with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"), title="LinkedIn Organization Dashboard") as app: # --- Core States --- token_state = gr.State(value={ "token": None, "client_id": None, "org_urn": None, "bubble_posts_df": pd.DataFrame(), "bubble_post_stats_df": pd.DataFrame(), "bubble_mentions_df": pd.DataFrame(), "bubble_follower_stats_df": pd.DataFrame(), "fetch_count_for_api": 0, "url_user_token_temp_storage": None, "config_date_col_posts": "published_at", "config_date_col_mentions": "date", "config_date_col_followers": "date", "config_media_type_col": "media_type", "config_eb_labels_col": "li_eb_label" }) # States for analytics tab chatbot (passed to analytics_tab module) chat_histories_st = gr.State({}) current_chat_plot_id_st = gr.State(None) plot_data_for_chatbot_st = gr.State({}) # Populated by analytics_tab.handle_refresh_analytics_graphs active_panel_action_state = gr.State(None) # For insights/formula panel explored_plot_id_state = gr.State(None) # For explore plot view # States for Agentic Pipeline (passed to agentic_module) orchestration_raw_results_st = gr.State(None) key_results_for_selection_st = gr.State([]) # Stores the list of dicts for choices selected_key_result_ids_st = gr.State([]) # Stores the selected unique_kr_ids # --- Top Level UI --- gr.Markdown("# 🚀 LinkedIn Organization Dashboard") url_user_token_display = gr.Textbox(label="User Token (Nascosto)", interactive=False, visible=False) status_box = gr.Textbox(label="Stato Generale Token LinkedIn", interactive=False, value="Inizializzazione...") org_urn_display = gr.Textbox(label="URN Organizzazione (Nascosto)", interactive=False, visible=False) # Load URL parameters on app load app.load(fn=get_url_user_token, inputs=None, outputs=[url_user_token_display, org_urn_display], api_name="get_url_params", show_progress=False) # --- Tabs --- with gr.Tabs() as tabs: # --- Tab 1: Dashboard & Sync --- with gr.TabItem("1️⃣ Dashboard & Sync", id="tab_dashboard_sync"): gr.Markdown("Il sistema controlla i dati esistenti da Bubble. 'Sincronizza' si attiva se sono necessari nuovi dati.") sync_data_btn = gr.Button("🔄 Sincronizza Dati LinkedIn", variant="primary", visible=False, interactive=False) sync_status_html_output = gr.HTML("

Stato sincronizzazione...

") dashboard_display_html = gr.HTML("

Caricamento dashboard...

") # --- Tab 2: Analisi Grafici --- with gr.TabItem("2️⃣ Analisi Grafici", id="tab_analytics"): # Build UI and wire internal events within analytics_tab module (apply_filter_btn_analytics, date_filter_selector_analytics, custom_start_date_picker_analytics, custom_end_date_picker_analytics, analytics_status_md_ref, # Reference to the status markdown in analytics tab analytics_refresh_outputs_components, # list of components for refresh handler output analytics_refresh_outputs_plus_states # list of components + states for refresh handler output ) = analytics_tab.build_and_wire_tab( token_state, chat_histories_st, current_chat_plot_id_st, plot_data_for_chatbot_st, active_panel_action_state, explored_plot_id_state ) # --- Tabs 3 & 4: Agentic Pipeline --- # build_and_wire_tabs will create TabItems internally agentic_pipeline_output_components = agentic_module.build_and_wire_tabs( orchestration_raw_results_st, key_results_for_selection_st, selected_key_result_ids_st ) # --- Event Chaining & Orchestration --- # Initial Load Sequence (Simplified: direct calls, complex logic in handlers) def initial_load_sequence_wrapper(url_token, org_urn_val, current_state): # This function is primarily for the first tab's initial state. status_msg, new_state, btn_update = process_and_store_bubble_token(url_token, org_urn_val, current_state) dashboard_content = display_main_dashboard(new_state) # From ui_generators return status_msg, new_state, btn_update, dashboard_content # Outputs for the agentic pipeline handler # Order: report_display, key_results_cbg, okr_detail_display, # orchestration_raw_results_st, selected_key_result_ids_st, key_results_for_selection_st, # agentic_pipeline_status_md agentic_pipeline_full_outputs_list = agentic_pipeline_output_components[:3] + \ [orchestration_raw_results_st, selected_key_result_ids_st, key_results_for_selection_st] + \ [agentic_pipeline_output_components[3]] initial_load_event = org_urn_display.change( fn=initial_load_sequence_wrapper, inputs=[url_user_token_display, org_urn_display, token_state], outputs=[status_box, token_state, sync_data_btn, dashboard_display_html], show_progress="full" ) # After initial load, refresh analytics graphs initial_load_event.then( fn=analytics_tab.handle_refresh_analytics_graphs, inputs=[token_state, date_filter_selector_analytics, custom_start_date_picker_analytics, custom_end_date_picker_analytics, chat_histories_st], outputs=analytics_refresh_outputs_plus_states, # Use the list from analytics_tab show_progress="full" ).then( # Then run agentic pipeline fn=agentic_module.handle_run_agentic_pipeline, inputs=[token_state, orchestration_raw_results_st, key_results_for_selection_st, selected_key_result_ids_st], # Pass states outputs=agentic_pipeline_full_outputs_list, show_progress="minimal" ) # Sync Data Event Chain sync_event_part1 = sync_data_btn.click( fn=sync_all_linkedin_data_orchestrator, inputs=[token_state], outputs=[sync_status_html_output, token_state], # token_state is updated here show_progress="full" ) # After sync, re-process token and update dashboard display (Tab 1) sync_event_part2 = sync_event_part1.then( fn=process_and_store_bubble_token, # This updates token_state again inputs=[url_user_token_display, org_urn_display, token_state], # Pass the updated token_state outputs=[status_box, token_state, sync_data_btn], # token_state updated again show_progress=False ) # After token processing, re-run agentic pipeline with potentially new data sync_event_part2.then( fn=agentic_module.handle_run_agentic_pipeline, inputs=[token_state, orchestration_raw_results_st, key_results_for_selection_st, selected_key_result_ids_st], # Pass the latest token_state outputs=agentic_pipeline_full_outputs_list, show_progress="minimal" ) # Then, update the main dashboard display on Tab 1 sync_event_part3 = sync_event_part2.then( fn=display_main_dashboard, inputs=[token_state], # Use the latest token_state outputs=[dashboard_display_html], show_progress=False ) # Finally, refresh analytics graphs on Tab 2 sync_event_graphs_after_sync = sync_event_part3.then( fn=analytics_tab.handle_refresh_analytics_graphs, inputs=[token_state, date_filter_selector_analytics, custom_start_date_picker_analytics, custom_end_date_picker_analytics, chat_histories_st], outputs=analytics_refresh_outputs_plus_states, # Use the list from analytics_tab show_progress="full" ) # --- App Launch --- if __name__ == "__main__": if not os.environ.get(LINKEDIN_CLIENT_ID_ENV_VAR): logging.warning(f"ATTENZIONE: '{LINKEDIN_CLIENT_ID_ENV_VAR}' non impostata.") if not all(os.environ.get(var) for var in [BUBBLE_APP_NAME_ENV_VAR, BUBBLE_API_KEY_PRIVATE_ENV_VAR, BUBBLE_API_ENDPOINT_ENV_VAR]): logging.warning("ATTENZIONE: Variabili Bubble non impostate.") # AGENTIC_MODULES_LOADED is now checked within agentic_module.py, log from there if needed. # We can add a check here based on the import success if desired for app startup. if not agentic_module.AGENTIC_MODULES_LOADED: # Check the flag from the module logging.warning("CRITICAL: Agentic pipeline modules failed to load. Tabs 3 and 4 will be non-functional.") if not os.environ.get("GEMINI_API_KEY") and agentic_module.AGENTIC_MODULES_LOADED: logging.warning("ATTENZIONE: 'GEMINI_API_KEY' non impostata. La pipeline AI per le tab 3 e 4 potrebbe non funzionare.") try: logging.info(f"Matplotlib: {matplotlib.__version__}, Backend: {matplotlib.get_backend()}") except ImportError: logging.warning("Matplotlib non trovato.") app.launch(server_name="0.0.0.0", server_port=7860, debug=True)