Spaces:
Running
Running
File size: 11,072 Bytes
6277fe0 b560569 575b933 b0464a9 87a87e7 791c130 1fa587a f7fc39b 575b933 826a2a1 575b933 1fa587a 2bd9dad 1fa587a 2601f1c 1fa587a 5a483f8 1fa587a 7aa6c73 2a3b22e 3b4dccb 2a3b22e 77179e2 1644cc1 77179e2 1fa587a 1644cc1 77179e2 1644cc1 77179e2 1fa587a 77179e2 b0464a9 2a3b22e adb3bbe 1fa587a 67742c4 a342a6b 6a8e128 1fa587a 6a8e128 2601f1c 67742c4 6277fe0 1fa587a 6277fe0 1fa587a adb3bbe 1fa587a 7aa6c73 1fa587a 7aa6c73 1fa587a a342a6b d33040c 6277fe0 1fa587a a342a6b 575b933 1fa587a 791c130 1fa587a 791c130 d33040c 6277fe0 1fa587a 791c130 1fa587a 1644cc1 1fa587a 1644cc1 1fa587a 1644cc1 1fa587a 1644cc1 5a483f8 1644cc1 1fa587a 1644cc1 1fa587a 1644cc1 1fa587a 1644cc1 1fa587a 1644cc1 1fa587a 1644cc1 1fa587a 1644cc1 1fa587a 1644cc1 1fa587a 1644cc1 1fa587a 1644cc1 1fa587a 1644cc1 1fa587a 1644cc1 1fa587a 1644cc1 1fa587a 5a483f8 266ae82 1fa587a adb3bbe a6bc02b 1fa587a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 |
# app.py
import gradio as gr
import pandas as pd
import os
import logging
import matplotlib
matplotlib.use('Agg') # Set backend for Matplotlib to avoid GUI conflicts with Gradio
# import time # No longer directly used here for profiling
from datetime import datetime, timedelta
# import numpy as np # No longer directly used here
# from collections import OrderedDict, defaultdict # Moved or not needed directly
# --- Module Imports ---
from utils.gradio_utils import get_url_user_token
from config import (
LINKEDIN_CLIENT_ID_ENV_VAR, BUBBLE_APP_NAME_ENV_VAR,
BUBBLE_API_KEY_PRIVATE_ENV_VAR, BUBBLE_API_ENDPOINT_ENV_VAR) # PLOT_ID_TO_FORMULA_KEY_MAP moved
from services.state_manager import process_and_store_bubble_token
from services.sync_logic import sync_all_linkedin_data_orchestrator
from ui.ui_generators import display_main_dashboard # Other UI generators moved or used internally by new modules
# --- NEW UI MODULE IMPORTS ---
from ui import analytics_tab
from ui import agentic_module
# --- EXISTING CHATBOT MODULE IMPORTS (used by analytics_tab) ---
# from features.chatbot.chatbot_prompts import get_initial_insight_prompt_and_suggestions # Used in analytics_tab
# from features.chatbot.chatbot_handler import generate_llm_response # Used in analytics_tab
# --- AGENTIC PIPELINE IMPORTS (used by agentic_module) ---
# AGENTIC_MODULES_LOADED is handled within agentic_module.py
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(module)s - %(message)s')
# 1. Set Vertex AI usage preference (if applicable)
os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "False"
# 2. Get your API key
user_provided_api_key = os.environ.get("GEMINI_API_KEY")
if user_provided_api_key:
os.environ["GOOGLE_API_KEY"] = user_provided_api_key
logging.info("GOOGLE_API_KEY environment variable has been set from GEMINI_API_KEY.")
else:
logging.error(f"CRITICAL ERROR: The API key environment variable 'GEMINI_API_KEY' was not found.")
# --- Gradio UI Blocks ---
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
title="LinkedIn Organization Dashboard") as app:
# --- Core States ---
token_state = gr.State(value={
"token": None, "client_id": None, "org_urn": None,
"bubble_posts_df": pd.DataFrame(), "bubble_post_stats_df": pd.DataFrame(),
"bubble_mentions_df": pd.DataFrame(), "bubble_follower_stats_df": pd.DataFrame(),
"fetch_count_for_api": 0, "url_user_token_temp_storage": None,
"config_date_col_posts": "published_at", "config_date_col_mentions": "date",
"config_date_col_followers": "date", "config_media_type_col": "media_type",
"config_eb_labels_col": "li_eb_label"
})
# States for analytics tab chatbot (passed to analytics_tab module)
chat_histories_st = gr.State({})
current_chat_plot_id_st = gr.State(None)
plot_data_for_chatbot_st = gr.State({}) # Populated by analytics_tab.handle_refresh_analytics_graphs
active_panel_action_state = gr.State(None) # For insights/formula panel
explored_plot_id_state = gr.State(None) # For explore plot view
# States for Agentic Pipeline (passed to agentic_module)
orchestration_raw_results_st = gr.State(None)
key_results_for_selection_st = gr.State([]) # Stores the list of dicts for choices
selected_key_result_ids_st = gr.State([]) # Stores the selected unique_kr_ids
# --- Top Level UI ---
gr.Markdown("# 🚀 LinkedIn Organization Dashboard")
url_user_token_display = gr.Textbox(label="User Token (Nascosto)", interactive=False, visible=False)
status_box = gr.Textbox(label="Stato Generale Token LinkedIn", interactive=False, value="Inizializzazione...")
org_urn_display = gr.Textbox(label="URN Organizzazione (Nascosto)", interactive=False, visible=False)
# Load URL parameters on app load
app.load(fn=get_url_user_token, inputs=None, outputs=[url_user_token_display, org_urn_display], api_name="get_url_params", show_progress=False)
# --- Tabs ---
with gr.Tabs() as tabs:
# --- Tab 1: Dashboard & Sync ---
with gr.TabItem("1️⃣ Dashboard & Sync", id="tab_dashboard_sync"):
gr.Markdown("Il sistema controlla i dati esistenti da Bubble. 'Sincronizza' si attiva se sono necessari nuovi dati.")
sync_data_btn = gr.Button("🔄 Sincronizza Dati LinkedIn", variant="primary", visible=False, interactive=False)
sync_status_html_output = gr.HTML("<p style='text-align:center;'>Stato sincronizzazione...</p>")
dashboard_display_html = gr.HTML("<p style='text-align:center;'>Caricamento dashboard...</p>")
# --- Tab 2: Analisi Grafici ---
with gr.TabItem("2️⃣ Analisi Grafici", id="tab_analytics"):
# Build UI and wire internal events within analytics_tab module
(apply_filter_btn_analytics, date_filter_selector_analytics,
custom_start_date_picker_analytics, custom_end_date_picker_analytics,
analytics_status_md_ref, # Reference to the status markdown in analytics tab
analytics_refresh_outputs_components, # list of components for refresh handler output
analytics_refresh_outputs_plus_states # list of components + states for refresh handler output
) = analytics_tab.build_and_wire_tab(
token_state, chat_histories_st, current_chat_plot_id_st,
plot_data_for_chatbot_st, active_panel_action_state, explored_plot_id_state
)
# --- Tabs 3 & 4: Agentic Pipeline ---
# build_and_wire_tabs will create TabItems internally
agentic_pipeline_output_components = agentic_module.build_and_wire_tabs(
orchestration_raw_results_st,
key_results_for_selection_st,
selected_key_result_ids_st
)
# --- Event Chaining & Orchestration ---
# Initial Load Sequence (Simplified: direct calls, complex logic in handlers)
def initial_load_sequence_wrapper(url_token, org_urn_val, current_state):
# This function is primarily for the first tab's initial state.
status_msg, new_state, btn_update = process_and_store_bubble_token(url_token, org_urn_val, current_state)
dashboard_content = display_main_dashboard(new_state) # From ui_generators
return status_msg, new_state, btn_update, dashboard_content
# Outputs for the agentic pipeline handler
# Order: report_display, key_results_cbg, okr_detail_display,
# orchestration_raw_results_st, selected_key_result_ids_st, key_results_for_selection_st,
# agentic_pipeline_status_md
agentic_pipeline_full_outputs_list = agentic_pipeline_output_components[:3] + \
[orchestration_raw_results_st, selected_key_result_ids_st, key_results_for_selection_st] + \
[agentic_pipeline_output_components[3]]
initial_load_event = org_urn_display.change(
fn=initial_load_sequence_wrapper,
inputs=[url_user_token_display, org_urn_display, token_state],
outputs=[status_box, token_state, sync_data_btn, dashboard_display_html],
show_progress="full"
)
# After initial load, refresh analytics graphs
initial_load_event.then(
fn=analytics_tab.handle_refresh_analytics_graphs,
inputs=[token_state, date_filter_selector_analytics, custom_start_date_picker_analytics, custom_end_date_picker_analytics, chat_histories_st],
outputs=analytics_refresh_outputs_plus_states, # Use the list from analytics_tab
show_progress="full"
).then( # Then run agentic pipeline
fn=agentic_module.handle_run_agentic_pipeline,
inputs=[token_state, orchestration_raw_results_st, key_results_for_selection_st, selected_key_result_ids_st], # Pass states
outputs=agentic_pipeline_full_outputs_list,
show_progress="minimal"
)
# Sync Data Event Chain
sync_event_part1 = sync_data_btn.click(
fn=sync_all_linkedin_data_orchestrator,
inputs=[token_state],
outputs=[sync_status_html_output, token_state], # token_state is updated here
show_progress="full"
)
# After sync, re-process token and update dashboard display (Tab 1)
sync_event_part2 = sync_event_part1.then(
fn=process_and_store_bubble_token, # This updates token_state again
inputs=[url_user_token_display, org_urn_display, token_state], # Pass the updated token_state
outputs=[status_box, token_state, sync_data_btn], # token_state updated again
show_progress=False
)
# After token processing, re-run agentic pipeline with potentially new data
sync_event_part2.then(
fn=agentic_module.handle_run_agentic_pipeline,
inputs=[token_state, orchestration_raw_results_st, key_results_for_selection_st, selected_key_result_ids_st], # Pass the latest token_state
outputs=agentic_pipeline_full_outputs_list,
show_progress="minimal"
)
# Then, update the main dashboard display on Tab 1
sync_event_part3 = sync_event_part2.then(
fn=display_main_dashboard,
inputs=[token_state], # Use the latest token_state
outputs=[dashboard_display_html],
show_progress=False
)
# Finally, refresh analytics graphs on Tab 2
sync_event_graphs_after_sync = sync_event_part3.then(
fn=analytics_tab.handle_refresh_analytics_graphs,
inputs=[token_state, date_filter_selector_analytics, custom_start_date_picker_analytics, custom_end_date_picker_analytics, chat_histories_st],
outputs=analytics_refresh_outputs_plus_states, # Use the list from analytics_tab
show_progress="full"
)
# --- App Launch ---
if __name__ == "__main__":
if not os.environ.get(LINKEDIN_CLIENT_ID_ENV_VAR): logging.warning(f"ATTENZIONE: '{LINKEDIN_CLIENT_ID_ENV_VAR}' non impostata.")
if not all(os.environ.get(var) for var in [BUBBLE_APP_NAME_ENV_VAR, BUBBLE_API_KEY_PRIVATE_ENV_VAR, BUBBLE_API_ENDPOINT_ENV_VAR]):
logging.warning("ATTENZIONE: Variabili Bubble non impostate.")
# AGENTIC_MODULES_LOADED is now checked within agentic_module.py, log from there if needed.
# We can add a check here based on the import success if desired for app startup.
if not agentic_module.AGENTIC_MODULES_LOADED: # Check the flag from the module
logging.warning("CRITICAL: Agentic pipeline modules failed to load. Tabs 3 and 4 will be non-functional.")
if not os.environ.get("GEMINI_API_KEY") and agentic_module.AGENTIC_MODULES_LOADED:
logging.warning("ATTENZIONE: 'GEMINI_API_KEY' non impostata. La pipeline AI per le tab 3 e 4 potrebbe non funzionare.")
try:
logging.info(f"Matplotlib: {matplotlib.__version__}, Backend: {matplotlib.get_backend()}")
except ImportError:
logging.warning("Matplotlib non trovato.")
app.launch(server_name="0.0.0.0", server_port=7860, debug=True)
|