Spaces:
Running
Running
# app.py | |
# (Showing relevant parts that need modification) | |
import gradio as gr | |
import pandas as pd | |
import os | |
import logging | |
import matplotlib | |
matplotlib.use('Agg') # Set backend for Matplotlib to avoid GUI conflicts with Gradio | |
import matplotlib.pyplot as plt | |
import time # For profiling if needed | |
from datetime import datetime, timedelta # Added timedelta | |
import numpy as np | |
from collections import OrderedDict # To maintain section order | |
import asyncio # For async operations with the new agent | |
# --- Module Imports --- | |
from gradio_utils import get_url_user_token | |
# Functions from newly created/refactored modules | |
from config import ( | |
LINKEDIN_CLIENT_ID_ENV_VAR, BUBBLE_APP_NAME_ENV_VAR, | |
BUBBLE_API_KEY_PRIVATE_ENV_VAR, BUBBLE_API_ENDPOINT_ENV_VAR, | |
PLOT_ID_TO_FORMULA_KEY_MAP) | |
from state_manager import process_and_store_bubble_token | |
from sync_logic import sync_all_linkedin_data_orchestrator | |
from ui_generators import ( | |
display_main_dashboard, | |
run_mentions_tab_display, | |
run_follower_stats_tab_display, | |
build_analytics_tab_plot_area, # EXPECTED TO RETURN: plot_ui_objects, section_titles_map | |
BOMB_ICON, EXPLORE_ICON, FORMULA_ICON, ACTIVE_ICON | |
) | |
from analytics_plot_generator import update_analytics_plots_figures, create_placeholder_plot | |
from formulas import PLOT_FORMULAS | |
# --- EXISTING CHATBOT MODULE IMPORTS --- | |
from chatbot_prompts import get_initial_insight_prompt_and_suggestions # MODIFIED IMPORT | |
from chatbot_handler import generate_llm_response | |
# --- END EXISTING CHATBOT MODULE IMPORTS --- | |
# --- NEW EMPLOYER BRANDING AGENT MODULE IMPORTS --- | |
try: | |
from eb_agent_module import ( | |
EmployerBrandingAgent, | |
GENERATION_CONFIG_PARAMS as EB_AGENT_GEN_CONFIG, # Rename to avoid conflict | |
LLM_MODEL_NAME as EB_AGENT_LLM_MODEL, # Rename | |
GEMINI_EMBEDDING_MODEL_NAME as EB_AGENT_EMBEDDING_MODEL, # Rename | |
DEFAULT_SAFETY_SETTINGS as EB_AGENT_SAFETY_SETTINGS # Import safety settings | |
) | |
EB_AGENT_AVAILABLE = True | |
logging.info("Successfully imported EmployerBrandingAgent module.") | |
except ImportError as e: | |
logging.error(f"Failed to import EmployerBrandingAgent module: {e}", exc_info=True) | |
EB_AGENT_AVAILABLE = False | |
# Define dummy classes/variables if import fails, so app can still run | |
class EmployerBrandingAgent: | |
def __init__(self, *args, **kwargs): logging.error("EB Agent Dummy Class Initialized") | |
async def process_query(self, query, **kwargs): return "# Error: Employer Branding Agent module not loaded." | |
def update_dataframes(self, dfs): pass | |
def clear_chat_history(self): pass | |
EB_AGENT_GEN_CONFIG, EB_AGENT_LLM_MODEL, EB_AGENT_EMBEDDING_MODEL, EB_AGENT_SAFETY_SETTINGS = {}, None, None, pd.DataFrame(), {} | |
# Configure logging | |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(module)s - %(message)s') | |
# --- Gradio UI Blocks --- | |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"), | |
title="LinkedIn Organization Dashboard") as app: | |
token_state = gr.State(value={ | |
"token": None, "client_id": None, "org_urn": None, | |
"bubble_posts_df": pd.DataFrame(), "bubble_post_stats_df": pd.DataFrame(), | |
"bubble_mentions_df": pd.DataFrame(), "bubble_follower_stats_df": pd.DataFrame(), | |
"fetch_count_for_api": 0, "url_user_token_temp_storage": None, | |
"config_date_col_posts": "published_at", "config_date_col_mentions": "date", | |
"config_date_col_followers": "date", "config_media_type_col": "media_type", | |
"config_eb_labels_col": "li_eb_label" | |
}) | |
# States for existing analytics tab chatbot | |
chat_histories_st = gr.State({}) | |
current_chat_plot_id_st = gr.State(None) | |
plot_data_for_chatbot_st = gr.State({}) | |
# --- NEW: States for Employer Branding Agent Tab --- | |
eb_agent_chat_history_st = gr.State([]) | |
# The agent instance itself will be created on-the-fly or managed if complex state is needed. | |
# For now, we'll re-initialize it with fresh data in the handler. | |
gr.Markdown("# 🚀 LinkedIn Organization Dashboard") | |
url_user_token_display = gr.Textbox(label="User Token (Nascosto)", interactive=False, visible=False) | |
status_box = gr.Textbox(label="Stato Generale Token LinkedIn", interactive=False, value="Inizializzazione...") | |
org_urn_display = gr.Textbox(label="URN Organizzazione (Nascosto)", interactive=False, visible=False) | |
app.load(fn=get_url_user_token, inputs=None, outputs=[url_user_token_display, org_urn_display], api_name="get_url_params", show_progress=False) | |
def initial_load_sequence(url_token, org_urn_val, current_state): | |
status_msg, new_state, btn_update = process_and_store_bubble_token(url_token, org_urn_val, current_state) | |
dashboard_content = display_main_dashboard(new_state) | |
return status_msg, new_state, btn_update, dashboard_content | |
with gr.Tabs() as tabs: | |
with gr.TabItem("1️⃣ Dashboard & Sync", id="tab_dashboard_sync"): | |
gr.Markdown("Il sistema controlla i dati esistenti da Bubble. 'Sincronizza' si attiva se sono necessari nuovi dati.") | |
sync_data_btn = gr.Button("🔄 Sincronizza Dati LinkedIn", variant="primary", visible=False, interactive=False) | |
sync_status_html_output = gr.HTML("<p style='text-align:center;'>Stato sincronizzazione...</p>") | |
dashboard_display_html = gr.HTML("<p style='text-align:center;'>Caricamento dashboard...</p>") | |
org_urn_display.change( | |
fn=initial_load_sequence, | |
inputs=[url_user_token_display, org_urn_display, token_state], | |
outputs=[status_box, token_state, sync_data_btn, dashboard_display_html], | |
show_progress="full" | |
) | |
with gr.TabItem("2️⃣ Analisi", id="tab_analytics"): | |
gr.Markdown("## 📈 Analisi Performance LinkedIn") | |
gr.Markdown("Seleziona un intervallo di date. Clicca i pulsanti (💣 Insights, ƒ Formula, 🧭 Esplora) su un grafico per azioni.") | |
analytics_status_md = gr.Markdown("Stato analisi...") | |
with gr.Row(): | |
date_filter_selector = gr.Radio( | |
["Sempre", "Ultimi 7 Giorni", "Ultimi 30 Giorni", "Intervallo Personalizzato"], | |
label="Seleziona Intervallo Date", value="Sempre", scale=3 | |
) | |
with gr.Column(scale=2): | |
custom_start_date_picker = gr.DateTime(label="Data Inizio", visible=False, include_time=False, type="datetime") | |
custom_end_date_picker = gr.DateTime(label="Data Fine", visible=False, include_time=False, type="datetime") | |
apply_filter_btn = gr.Button("🔍 Applica Filtro & Aggiorna Analisi", variant="primary") | |
def toggle_custom_date_pickers(selection): | |
is_custom = selection == "Intervallo Personalizzato" | |
return gr.update(visible=is_custom), gr.update(visible=is_custom) | |
date_filter_selector.change( | |
fn=toggle_custom_date_pickers, | |
inputs=[date_filter_selector], | |
outputs=[custom_start_date_picker, custom_end_date_picker] | |
) | |
plot_configs = [ | |
{"label": "Numero di Follower nel Tempo", "id": "followers_count", "section": "Dinamiche dei Follower"}, | |
{"label": "Tasso di Crescita Follower", "id": "followers_growth_rate", "section": "Dinamiche dei Follower"}, | |
{"label": "Follower per Località", "id": "followers_by_location", "section": "Demografia Follower"}, | |
{"label": "Follower per Ruolo (Funzione)", "id": "followers_by_role", "section": "Demografia Follower"}, | |
{"label": "Follower per Settore", "id": "followers_by_industry", "section": "Demografia Follower"}, | |
{"label": "Follower per Anzianità", "id": "followers_by_seniority", "section": "Demografia Follower"}, | |
{"label": "Tasso di Engagement nel Tempo", "id": "engagement_rate", "section": "Approfondimenti Performance Post"}, | |
{"label": "Copertura nel Tempo", "id": "reach_over_time", "section": "Approfondimenti Performance Post"}, | |
{"label": "Visualizzazioni nel Tempo", "id": "impressions_over_time", "section": "Approfondimenti Performance Post"}, | |
{"label": "Reazioni (Like) nel Tempo", "id": "likes_over_time", "section": "Approfondimenti Performance Post"}, | |
{"label": "Click nel Tempo", "id": "clicks_over_time", "section": "Engagement Dettagliato Post nel Tempo"}, | |
{"label": "Condivisioni nel Tempo", "id": "shares_over_time", "section": "Engagement Dettagliato Post nel Tempo"}, | |
{"label": "Commenti nel Tempo", "id": "comments_over_time", "section": "Engagement Dettagliato Post nel Tempo"}, | |
{"label": "Ripartizione Commenti per Sentiment", "id": "comments_sentiment", "section": "Engagement Dettagliato Post nel Tempo"}, | |
{"label": "Frequenza Post", "id": "post_frequency_cs", "section": "Analisi Strategia Contenuti"}, | |
{"label": "Ripartizione Contenuti per Formato", "id": "content_format_breakdown_cs", "section": "Analisi Strategia Contenuti"}, | |
{"label": "Ripartizione Contenuti per Argomenti", "id": "content_topic_breakdown_cs", "section": "Analisi Strategia Contenuti"}, | |
{"label": "Volume Menzioni nel Tempo (Dettaglio)", "id": "mention_analysis_volume", "section": "Analisi Menzioni (Dettaglio)"}, | |
{"label": "Ripartizione Menzioni per Sentiment (Dettaglio)", "id": "mention_analysis_sentiment", "section": "Analisi Menzioni (Dettaglio)"} | |
] | |
assert len(plot_configs) == 19, "Mancata corrispondenza in plot_configs e grafici attesi." | |
unique_ordered_sections = list(OrderedDict.fromkeys(pc["section"] for pc in plot_configs)) | |
num_unique_sections = len(unique_ordered_sections) | |
active_panel_action_state = gr.State(None) | |
explored_plot_id_state = gr.State(None) | |
plot_ui_objects = {} | |
section_titles_map = {} | |
with gr.Row(equal_height=False): | |
with gr.Column(scale=8) as plots_area_col: | |
ui_elements_tuple = build_analytics_tab_plot_area(plot_configs) | |
if isinstance(ui_elements_tuple, tuple) and len(ui_elements_tuple) == 2: | |
plot_ui_objects, section_titles_map = ui_elements_tuple | |
if not all(sec_name in section_titles_map for sec_name in unique_ordered_sections): | |
logging.error("section_titles_map from build_analytics_tab_plot_area is incomplete.") | |
for sec_name in unique_ordered_sections: | |
if sec_name not in section_titles_map: | |
section_titles_map[sec_name] = gr.Markdown(f"### {sec_name} (Error Placeholder)") | |
else: | |
logging.error("build_analytics_tab_plot_area did not return a tuple of (plot_ui_objects, section_titles_map).") | |
plot_ui_objects = ui_elements_tuple if isinstance(ui_elements_tuple, dict) else {} | |
for sec_name in unique_ordered_sections: | |
section_titles_map[sec_name] = gr.Markdown(f"### {sec_name} (Error Placeholder)") | |
with gr.Column(scale=4, visible=False) as global_actions_column_ui: | |
gr.Markdown("### 💡 Azioni Contestuali Grafico") | |
insights_chatbot_ui = gr.Chatbot( | |
label="Chat Insights", type="messages", height=450, | |
bubble_full_width=False, visible=False, show_label=False, | |
placeholder="L'analisi AI del grafico apparirà qui. Fai domande di approfondimento!" | |
) | |
insights_chat_input_ui = gr.Textbox( | |
label="La tua domanda:", placeholder="Chiedi all'AI riguardo a questo grafico...", | |
lines=2, visible=False, show_label=False | |
) | |
with gr.Row(visible=False) as insights_suggestions_row_ui: | |
insights_suggestion_1_btn = gr.Button(value="Suggerimento 1", size="sm", min_width=50) | |
insights_suggestion_2_btn = gr.Button(value="Suggerimento 2", size="sm", min_width=50) | |
insights_suggestion_3_btn = gr.Button(value="Suggerimento 3", size="sm", min_width=50) | |
formula_display_markdown_ui = gr.Markdown( | |
"I dettagli sulla formula/metodologia appariranno qui.", visible=False | |
) | |
formula_close_hint_md = gr.Markdown( # Component for the hint's visibility | |
"<p style='font-size:0.9em; text-align:center; margin-top:10px;'><em>Click the active ƒ button on the plot again to close this panel.</em></p>", | |
visible=False | |
) | |
# --- ASYNC HANDLERS FOR ANALYTICS TAB --- | |
async def handle_panel_action( | |
plot_id_clicked: str, action_type: str, current_active_action_from_state: dict, | |
current_chat_histories: dict, current_chat_plot_id: str, | |
current_plot_data_for_chatbot: dict, current_explored_plot_id: str | |
): | |
logging.info(f"Panel Action: '{action_type}' for plot '{plot_id_clicked}'. Active: {current_active_action_from_state}, Explored: {current_explored_plot_id}") | |
clicked_plot_config = next((p for p in plot_configs if p["id"] == plot_id_clicked), None) | |
if not clicked_plot_config: | |
logging.error(f"Config not found for plot_id {plot_id_clicked}") | |
num_plots = len(plot_configs) | |
error_list_len = 15 + (4 * num_plots) + num_unique_sections | |
error_list = [gr.update()] * error_list_len | |
error_list[11] = current_active_action_from_state | |
error_list[12] = current_chat_plot_id | |
error_list[13] = current_chat_histories | |
error_list[14] = current_explored_plot_id | |
return error_list | |
clicked_plot_label = clicked_plot_config["label"] | |
clicked_plot_section = clicked_plot_config["section"] | |
hypothetical_new_active_state = {"plot_id": plot_id_clicked, "type": action_type} | |
is_toggling_off = current_active_action_from_state == hypothetical_new_active_state | |
action_col_visible_update = gr.update(visible=False) | |
insights_chatbot_visible_update, insights_chat_input_visible_update, insights_suggestions_row_visible_update = gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) | |
formula_display_visible_update = gr.update(visible=False) | |
formula_close_hint_visible_update = gr.update(visible=False) | |
chatbot_content_update, s1_upd, s2_upd, s3_upd, formula_content_update = gr.update(), gr.update(), gr.update(), gr.update(), gr.update() | |
new_active_action_state_to_set, new_current_chat_plot_id = None, current_chat_plot_id | |
updated_chat_histories, new_explored_plot_id_to_set = current_chat_histories, current_explored_plot_id | |
generated_panel_vis_updates = [] | |
generated_bomb_btn_updates = [] | |
generated_formula_btn_updates = [] | |
generated_explore_btn_updates = [] | |
section_title_vis_updates = [gr.update()] * num_unique_sections | |
if is_toggling_off: | |
new_active_action_state_to_set = None | |
action_col_visible_update = gr.update(visible=False) | |
logging.info(f"Toggling OFF panel {action_type} for {plot_id_clicked}.") | |
for _ in plot_configs: | |
generated_bomb_btn_updates.append(gr.update(value=BOMB_ICON)) | |
generated_formula_btn_updates.append(gr.update(value=FORMULA_ICON)) | |
if current_explored_plot_id: | |
explored_cfg = next((p for p in plot_configs if p["id"] == current_explored_plot_id), None) | |
explored_sec = explored_cfg["section"] if explored_cfg else None | |
for i, sec_name in enumerate(unique_ordered_sections): | |
section_title_vis_updates[i] = gr.update(visible=(sec_name == explored_sec)) | |
for cfg in plot_configs: | |
is_exp = (cfg["id"] == current_explored_plot_id) | |
generated_panel_vis_updates.append(gr.update(visible=is_exp)) | |
generated_explore_btn_updates.append(gr.update(value=ACTIVE_ICON if is_exp else EXPLORE_ICON)) | |
else: | |
for i in range(num_unique_sections): section_title_vis_updates[i] = gr.update(visible=True) | |
for _ in plot_configs: | |
generated_panel_vis_updates.append(gr.update(visible=True)) | |
generated_explore_btn_updates.append(gr.update(value=EXPLORE_ICON)) | |
if action_type == "insights": new_current_chat_plot_id = None | |
else: # Toggling ON a new action or switching actions | |
new_active_action_state_to_set = hypothetical_new_active_state | |
action_col_visible_update = gr.update(visible=True) | |
new_explored_plot_id_to_set = None | |
logging.info(f"Toggling ON panel {action_type} for {plot_id_clicked}. Cancelling explore view if any.") | |
for i, sec_name in enumerate(unique_ordered_sections): | |
section_title_vis_updates[i] = gr.update(visible=(sec_name == clicked_plot_section)) | |
for cfg in plot_configs: | |
generated_panel_vis_updates.append(gr.update(visible=(cfg["id"] == plot_id_clicked))) | |
generated_explore_btn_updates.append(gr.update(value=EXPLORE_ICON)) | |
for cfg_btn in plot_configs: | |
is_act_ins = new_active_action_state_to_set == {"plot_id": cfg_btn["id"], "type": "insights"} | |
is_act_for = new_active_action_state_to_set == {"plot_id": cfg_btn["id"], "type": "formula"} | |
generated_bomb_btn_updates.append(gr.update(value=ACTIVE_ICON if is_act_ins else BOMB_ICON)) | |
generated_formula_btn_updates.append(gr.update(value=ACTIVE_ICON if is_act_for else FORMULA_ICON)) | |
if action_type == "insights": | |
insights_chatbot_visible_update, insights_chat_input_visible_update, insights_suggestions_row_visible_update = gr.update(visible=True), gr.update(visible=True), gr.update(visible=True) | |
new_current_chat_plot_id = plot_id_clicked | |
history = current_chat_histories.get(plot_id_clicked, []) | |
summary = current_plot_data_for_chatbot.get(plot_id_clicked, f"No summary for '{clicked_plot_label}'.") | |
if not history: | |
prompt, sugg = get_initial_insight_prompt_and_suggestions(plot_id_clicked, clicked_plot_label, summary) | |
llm_hist = [{"role": "user", "content": prompt}] | |
resp = await generate_llm_response(prompt, plot_id_clicked, clicked_plot_label, llm_hist, summary) # This is your existing LLM call | |
history = [{"role": "assistant", "content": resp}] | |
updated_chat_histories = {**current_chat_histories, plot_id_clicked: history} | |
else: | |
_, sugg = get_initial_insight_prompt_and_suggestions(plot_id_clicked, clicked_plot_label, summary) | |
chatbot_content_update = gr.update(value=history) | |
s1_upd,s2_upd,s3_upd = gr.update(value=sugg[0] if sugg else "N/A"),gr.update(value=sugg[1] if len(sugg)>1 else "N/A"),gr.update(value=sugg[2] if len(sugg)>2 else "N/A") | |
elif action_type == "formula": | |
formula_display_visible_update = gr.update(visible=True) | |
formula_close_hint_visible_update = gr.update(visible=True) | |
f_key = PLOT_ID_TO_FORMULA_KEY_MAP.get(plot_id_clicked) | |
f_text = f"**Formula/Methodology for: {clicked_plot_label}** (ID: `{plot_id_clicked}`)\n\n" | |
if f_key and f_key in PLOT_FORMULAS: | |
f_data = PLOT_FORMULAS[f_key] | |
f_text += f"### {f_data['title']}\n\n{f_data['description']}\n\n**Calculation:**\n" + "\n".join([f"- {s}" for s in f_data['calculation_steps']]) | |
else: f_text += "(No detailed formula information found.)" | |
formula_content_update = gr.update(value=f_text) | |
new_current_chat_plot_id = None | |
final_updates = [ | |
action_col_visible_update, insights_chatbot_visible_update, chatbot_content_update, | |
insights_chat_input_visible_update, insights_suggestions_row_visible_update, | |
s1_upd, s2_upd, s3_upd, formula_display_visible_update, formula_content_update, | |
formula_close_hint_visible_update, # Corrected from formula_close_hint_md | |
new_active_action_state_to_set, new_current_chat_plot_id, updated_chat_histories, | |
new_explored_plot_id_to_set | |
] | |
final_updates.extend(generated_panel_vis_updates) | |
final_updates.extend(generated_bomb_btn_updates) | |
final_updates.extend(generated_formula_btn_updates) | |
final_updates.extend(generated_explore_btn_updates) | |
final_updates.extend(section_title_vis_updates) | |
logging.debug(f"handle_panel_action returning {len(final_updates)} updates. Expected {15 + 4*len(plot_configs) + num_unique_sections}.") | |
return final_updates | |
async def handle_chat_message_submission(user_message: str, current_plot_id: str, chat_histories: dict, current_plot_data_for_chatbot: dict ): | |
if not current_plot_id or not user_message.strip(): | |
current_history_for_plot = chat_histories.get(current_plot_id, []) | |
if not isinstance(current_history_for_plot, list): current_history_for_plot = [] | |
yield current_history_for_plot, gr.update(value=""), chat_histories; return | |
cfg = next((p for p in plot_configs if p["id"] == current_plot_id), None) | |
lbl = cfg["label"] if cfg else "Selected Plot" | |
summary = current_plot_data_for_chatbot.get(current_plot_id, f"No summary for '{lbl}'.") | |
hist_for_plot = chat_histories.get(current_plot_id, []) | |
if not isinstance(hist_for_plot, list): hist_for_plot = [] | |
hist = hist_for_plot.copy() + [{"role": "user", "content": user_message}] | |
yield hist, gr.update(value=""), chat_histories | |
resp = await generate_llm_response(user_message, current_plot_id, lbl, hist, summary) # Existing LLM | |
hist.append({"role": "assistant", "content": resp}) | |
updated_chat_histories = {**chat_histories, current_plot_id: hist} | |
yield hist, "", updated_chat_histories | |
async def handle_suggested_question_click(suggestion_text: str, current_plot_id: str, chat_histories: dict, current_plot_data_for_chatbot: dict): | |
if not current_plot_id or not suggestion_text.strip() or suggestion_text == "N/A": | |
current_history_for_plot = chat_histories.get(current_plot_id, []) | |
if not isinstance(current_history_for_plot, list): current_history_for_plot = [] | |
yield current_history_for_plot, gr.update(value=""), chat_histories; return | |
async for update_chunk in handle_chat_message_submission(suggestion_text, current_plot_id, chat_histories, current_plot_data_for_chatbot): | |
yield update_chunk | |
def handle_explore_click(plot_id_clicked, current_explored_plot_id_from_state, current_active_panel_action_state): | |
# This function remains synchronous as per original | |
logging.info(f"Explore Click: Plot '{plot_id_clicked}'. Current Explored: {current_explored_plot_id_from_state}. Active Panel: {current_active_panel_action_state}") | |
num_plots = len(plot_configs) | |
if not plot_ui_objects: | |
logging.error("plot_ui_objects not populated for handle_explore_click.") | |
error_list_len = 4 + (4 * num_plots) + num_unique_sections | |
error_list = [gr.update()] * error_list_len | |
error_list[0] = current_explored_plot_id_from_state | |
error_list[2] = current_active_panel_action_state | |
return error_list | |
new_explored_id_to_set = None | |
is_toggling_off_explore = (plot_id_clicked == current_explored_plot_id_from_state) | |
action_col_upd = gr.update() | |
new_active_panel_state_upd = current_active_panel_action_state | |
formula_hint_upd = gr.update(visible=False) | |
panel_vis_updates = [] | |
explore_btns_updates = [] | |
bomb_btns_updates = [] | |
formula_btns_updates = [] | |
section_title_vis_updates = [gr.update()] * num_unique_sections | |
clicked_cfg = next((p for p in plot_configs if p["id"] == plot_id_clicked), None) | |
sec_of_clicked = clicked_cfg["section"] if clicked_cfg else None | |
if is_toggling_off_explore: | |
new_explored_id_to_set = None | |
logging.info(f"Stopping explore for {plot_id_clicked}. All plots/sections to be visible.") | |
for i in range(num_unique_sections): section_title_vis_updates[i] = gr.update(visible=True) | |
for _ in plot_configs: | |
panel_vis_updates.append(gr.update(visible=True)) | |
explore_btns_updates.append(gr.update(value=EXPLORE_ICON)) | |
bomb_btns_updates.append(gr.update()) | |
formula_btns_updates.append(gr.update()) | |
else: | |
new_explored_id_to_set = plot_id_clicked | |
logging.info(f"Exploring {plot_id_clicked}. Hiding other plots/sections.") | |
for i, sec_name in enumerate(unique_ordered_sections): | |
section_title_vis_updates[i] = gr.update(visible=(sec_name == sec_of_clicked)) | |
for cfg in plot_configs: | |
is_target = (cfg["id"] == new_explored_id_to_set) | |
panel_vis_updates.append(gr.update(visible=is_target)) | |
explore_btns_updates.append(gr.update(value=ACTIVE_ICON if is_target else EXPLORE_ICON)) | |
if current_active_panel_action_state: | |
logging.info("Closing active insight/formula panel due to explore click.") | |
action_col_upd = gr.update(visible=False) | |
new_active_panel_state_upd = None | |
formula_hint_upd = gr.update(visible=False) | |
for _ in plot_configs: | |
bomb_btns_updates.append(gr.update(value=BOMB_ICON)) | |
formula_btns_updates.append(gr.update(value=FORMULA_ICON)) | |
else: | |
for _ in plot_configs: | |
bomb_btns_updates.append(gr.update()) | |
formula_btns_updates.append(gr.update()) | |
final_explore_updates = [ | |
new_explored_id_to_set, action_col_upd, new_active_panel_state_upd, formula_hint_upd | |
] | |
final_explore_updates.extend(panel_vis_updates) | |
final_explore_updates.extend(explore_btns_updates) | |
final_explore_updates.extend(bomb_btns_updates) | |
final_explore_updates.extend(formula_btns_updates) | |
final_explore_updates.extend(section_title_vis_updates) | |
logging.debug(f"handle_explore_click returning {len(final_explore_updates)} updates. Expected {4 + 4*len(plot_configs) + num_unique_sections}.") | |
return final_explore_updates | |
_base_action_panel_ui_outputs = [ | |
global_actions_column_ui, insights_chatbot_ui, insights_chatbot_ui, | |
insights_chat_input_ui, insights_suggestions_row_ui, | |
insights_suggestion_1_btn, insights_suggestion_2_btn, insights_suggestion_3_btn, | |
formula_display_markdown_ui, formula_display_markdown_ui, | |
formula_close_hint_md | |
] | |
_action_panel_state_outputs = [active_panel_action_state, current_chat_plot_id_st, chat_histories_st, explored_plot_id_state] | |
action_panel_outputs_list = _base_action_panel_ui_outputs + _action_panel_state_outputs | |
action_panel_outputs_list.extend([plot_ui_objects.get(pc["id"], {}).get("panel_component", gr.update()) for pc in plot_configs]) | |
action_panel_outputs_list.extend([plot_ui_objects.get(pc["id"], {}).get("bomb_button", gr.update()) for pc in plot_configs]) | |
action_panel_outputs_list.extend([plot_ui_objects.get(pc["id"], {}).get("formula_button", gr.update()) for pc in plot_configs]) | |
action_panel_outputs_list.extend([plot_ui_objects.get(pc["id"], {}).get("explore_button", gr.update()) for pc in plot_configs]) | |
action_panel_outputs_list.extend([section_titles_map.get(s_name, gr.update()) for s_name in unique_ordered_sections]) | |
_explore_base_outputs = [explored_plot_id_state, global_actions_column_ui, active_panel_action_state, formula_close_hint_md] | |
explore_outputs_list = _explore_base_outputs | |
explore_outputs_list.extend([plot_ui_objects.get(pc["id"], {}).get("panel_component", gr.update()) for pc in plot_configs]) | |
explore_outputs_list.extend([plot_ui_objects.get(pc["id"], {}).get("explore_button", gr.update()) for pc in plot_configs]) | |
explore_outputs_list.extend([plot_ui_objects.get(pc["id"], {}).get("bomb_button", gr.update()) for pc in plot_configs]) | |
explore_outputs_list.extend([plot_ui_objects.get(pc["id"], {}).get("formula_button", gr.update()) for pc in plot_configs]) | |
explore_outputs_list.extend([section_titles_map.get(s_name, gr.update()) for s_name in unique_ordered_sections]) | |
action_click_inputs = [active_panel_action_state, chat_histories_st, current_chat_plot_id_st, plot_data_for_chatbot_st, explored_plot_id_state] | |
explore_click_inputs = [explored_plot_id_state, active_panel_action_state] | |
def create_panel_action_handler(p_id, action_type_str): | |
async def _handler(curr_active_val, curr_chats_val, curr_chat_pid, curr_plot_data, curr_explored_id): | |
return await handle_panel_action(p_id, action_type_str, curr_active_val, curr_chats_val, curr_chat_pid, curr_plot_data, curr_explored_id) | |
return _handler | |
for config_item in plot_configs: | |
plot_id = config_item["id"] | |
if plot_id in plot_ui_objects: | |
ui_obj = plot_ui_objects[plot_id] | |
if ui_obj.get("bomb_button"): | |
ui_obj["bomb_button"].click(fn=create_panel_action_handler(plot_id, "insights"), inputs=action_click_inputs, outputs=action_panel_outputs_list, api_name=f"action_insights_{plot_id}") | |
if ui_obj.get("formula_button"): | |
ui_obj["formula_button"].click(fn=create_panel_action_handler(plot_id, "formula"), inputs=action_click_inputs, outputs=action_panel_outputs_list, api_name=f"action_formula_{plot_id}") | |
if ui_obj.get("explore_button"): | |
# Original lambda was not async, ensure it matches handle_explore_click signature and type | |
ui_obj["explore_button"].click( | |
fn=lambda current_explored_val, current_active_panel_val, p_id=plot_id: handle_explore_click(p_id, current_explored_val, current_active_panel_val), | |
inputs=explore_click_inputs, | |
outputs=explore_outputs_list, | |
api_name=f"action_explore_{plot_id}" | |
) # if handle_explore_click becomes async, this needs 'await' or be wrapped | |
else: logging.warning(f"UI object for plot_id '{plot_id}' not found for click handlers.") | |
chat_submission_outputs = [insights_chatbot_ui, insights_chat_input_ui, chat_histories_st] | |
chat_submission_inputs = [insights_chat_input_ui, current_chat_plot_id_st, chat_histories_st, plot_data_for_chatbot_st] | |
insights_chat_input_ui.submit(fn=handle_chat_message_submission, inputs=chat_submission_inputs, outputs=chat_submission_outputs, api_name="submit_chat_message") | |
suggestion_click_inputs_base = [current_chat_plot_id_st, chat_histories_st, plot_data_for_chatbot_st] | |
insights_suggestion_1_btn.click(fn=handle_suggested_question_click, inputs=[insights_suggestion_1_btn] + suggestion_click_inputs_base, outputs=chat_submission_outputs, api_name="click_suggestion_1") | |
insights_suggestion_2_btn.click(fn=handle_suggested_question_click, inputs=[insights_suggestion_2_btn] + suggestion_click_inputs_base, outputs=chat_submission_outputs, api_name="click_suggestion_2") | |
insights_suggestion_3_btn.click(fn=handle_suggested_question_click, inputs=[insights_suggestion_3_btn] + suggestion_click_inputs_base, outputs=chat_submission_outputs, api_name="click_suggestion_3") | |
def refresh_all_analytics_ui_elements(current_token_state_val, date_filter_val, custom_start_val, custom_end_val, current_chat_histories_val): | |
# This function remains synchronous as per original | |
logging.info("Refreshing all analytics UI elements and resetting actions/chat.") | |
plot_gen_results = update_analytics_plots_figures(current_token_state_val, date_filter_val, custom_start_val, custom_end_val, plot_configs) | |
status_msg, gen_figs, new_summaries = plot_gen_results[0], plot_gen_results[1:-1], plot_gen_results[-1] | |
all_updates = [status_msg] | |
all_updates.extend(gen_figs if len(gen_figs) == len(plot_configs) else [create_placeholder_plot("Error", f"Fig missing {i}") for i in range(len(plot_configs))]) | |
all_updates.extend([ | |
gr.update(visible=False), | |
gr.update(value=[], visible=False), | |
gr.update(value="", visible=False), | |
gr.update(visible=False), | |
gr.update(value="S1"), gr.update(value="S2"), gr.update(value="S3"), | |
gr.update(value="Formula details here.", visible=False), | |
gr.update(visible=False) | |
]) | |
all_updates.extend([ | |
None, | |
None, | |
{}, | |
new_summaries | |
]) | |
for _ in plot_configs: | |
all_updates.extend([ | |
gr.update(value=BOMB_ICON), | |
gr.update(value=FORMULA_ICON), | |
gr.update(value=EXPLORE_ICON), | |
gr.update(visible=True) | |
]) | |
all_updates.append(None) | |
all_updates.extend([gr.update(visible=True)] * num_unique_sections) | |
logging.info(f"Prepared {len(all_updates)} updates for analytics refresh. Expected {15 + 5*len(plot_configs) + num_unique_sections}.") | |
return all_updates | |
apply_filter_and_sync_outputs_list = [analytics_status_md] | |
apply_filter_and_sync_outputs_list.extend([plot_ui_objects.get(pc["id"], {}).get("plot_component", gr.update()) for pc in plot_configs]) | |
_ui_resets_for_filter = [ | |
global_actions_column_ui, insights_chatbot_ui, insights_chat_input_ui, | |
insights_suggestions_row_ui, insights_suggestion_1_btn, insights_suggestion_2_btn, insights_suggestion_3_btn, | |
formula_display_markdown_ui, formula_close_hint_md | |
] | |
apply_filter_and_sync_outputs_list.extend(_ui_resets_for_filter) | |
_state_resets_for_filter = [active_panel_action_state, current_chat_plot_id_st, chat_histories_st, plot_data_for_chatbot_st] | |
apply_filter_and_sync_outputs_list.extend(_state_resets_for_filter) | |
for pc in plot_configs: | |
pid = pc["id"] | |
apply_filter_and_sync_outputs_list.extend([ | |
plot_ui_objects.get(pid, {}).get("bomb_button", gr.update()), | |
plot_ui_objects.get(pid, {}).get("formula_button", gr.update()), | |
plot_ui_objects.get(pid, {}).get("explore_button", gr.update()), | |
plot_ui_objects.get(pid, {}).get("panel_component", gr.update()) | |
]) | |
apply_filter_and_sync_outputs_list.append(explored_plot_id_state) | |
apply_filter_and_sync_outputs_list.extend([section_titles_map.get(s_name, gr.update()) for s_name in unique_ordered_sections]) | |
apply_filter_btn.click( | |
fn=refresh_all_analytics_ui_elements, | |
inputs=[token_state, date_filter_selector, custom_start_date_picker, custom_end_date_picker, chat_histories_st], | |
outputs=apply_filter_and_sync_outputs_list, show_progress="full" | |
) | |
with gr.TabItem("3️⃣ Menzioni", id="tab_mentions"): | |
refresh_mentions_display_btn = gr.Button("🔄 Aggiorna Visualizzazione Menzioni", variant="secondary") | |
mentions_html = gr.HTML("Dati menzioni...") | |
mentions_sentiment_dist_plot = gr.Plot(label="Distribuzione Sentiment Menzioni") | |
refresh_mentions_display_btn.click( | |
fn=run_mentions_tab_display, inputs=[token_state], | |
outputs=[mentions_html, mentions_sentiment_dist_plot], | |
show_progress="full" | |
) | |
with gr.TabItem("4️⃣ Statistiche Follower", id="tab_follower_stats"): | |
refresh_follower_stats_btn = gr.Button("🔄 Aggiorna Visualizzazione Statistiche Follower", variant="secondary") | |
follower_stats_html = gr.HTML("Statistiche follower...") | |
with gr.Row(): | |
fs_plot_monthly_gains = gr.Plot(label="Guadagni Mensili Follower") | |
with gr.Row(): | |
fs_plot_seniority = gr.Plot(label="Follower per Anzianità (Top 10 Organici)") | |
fs_plot_industry = gr.Plot(label="Follower per Settore (Top 10 Organici)") | |
refresh_follower_stats_btn.click( | |
fn=run_follower_stats_tab_display, inputs=[token_state], | |
outputs=[follower_stats_html, fs_plot_monthly_gains, fs_plot_seniority, fs_plot_industry], | |
show_progress="full" | |
) | |
# --- NEW: Tab 5 for Employer Branding Agent --- | |
with gr.TabItem("5️⃣ Agente AI Employer Branding", id="tab_eb_agent"): | |
gr.Markdown("## 🤖 Interagisci con l'Agente AI per l'Employer Branding") | |
if not EB_AGENT_AVAILABLE: | |
gr.Markdown("<p style='color:red;font-weight:bold;'>Attenzione: Il modulo dell'Agente AI per l'Employer Branding non è stato caricato correttamente. Controllare i log e l'installazione della libreria `google-generativeai` e la variabile d'ambiente `GEMINI_API_KEY`.</p>") | |
elif not os.getenv('GEMINI_API_KEY'): | |
gr.Markdown("<p style='color:orange;font-weight:bold;'>Attenzione: La variabile d'ambiente `GEMINI_API_KEY` non è impostata. Le funzionalità dell'Agente AI saranno limitate o non funzioneranno.</p>") | |
gr.Markdown( | |
"Fai domande sui tuoi dati LinkedIn (statistiche follower, post e menzioni) per ottenere insights e codice Pandas per analizzarli. " | |
"L'agente utilizza i dati attualmente disponibili nello stato dell'applicazione." | |
) | |
with gr.Row(): | |
with gr.Column(scale=2): | |
eb_agent_chatbot_ui = gr.Chatbot( | |
label="Chat con Agente AI EB", | |
value=[[None, "Ciao! Sono il tuo Agente AI per l'Employer Branding. Come posso aiutarti ad analizzare i tuoi dati LinkedIn oggi? Chiedimi di generare codice Pandas o di fornire insights."]] if EB_AGENT_AVAILABLE else [[None, "Agente AI non disponibile."]], | |
bubble_full_width=False, | |
height=500, | |
placeholder="L'Agente AI è pronto. Chiedi pure..." | |
) | |
eb_agent_chat_input_ui = gr.Textbox( | |
label="La tua domanda:", | |
placeholder="Es: 'Mostrami le aziende dei miei follower nel settore tecnologico' o 'Qual è il sentiment medio delle mie menzioni?'", | |
lines=3, | |
interactive=EB_AGENT_AVAILABLE # Disable if agent not available | |
) | |
with gr.Row(): | |
eb_agent_submit_btn = gr.Button("💬 Invia Messaggio", variant="primary", interactive=EB_AGENT_AVAILABLE) | |
eb_agent_clear_btn = gr.Button("🗑️ Cancella Chat", variant="stop", interactive=EB_AGENT_AVAILABLE) | |
with gr.Column(scale=1): | |
gr.Markdown("#### Schemi Dati Disponibili per l'Agente:") | |
eb_agent_schema_display_md = gr.Markdown("Gli schemi dei dati (follower, post, menzioni) verranno mostrati qui quando l'agente viene inizializzato con una query.") | |
eb_agent_status_md = gr.Markdown("Stato Agente: In attesa di input...") | |
# --- NEW: Handler for Employer Branding Agent Chat --- | |
eb_agent_instance_dict = {"agent": None} # To store agent instance across calls if needed, or re-init | |
async def handle_eb_agent_chat(user_message: str, chat_history_list: list, current_token_state: dict): | |
# Expected outputs: [eb_agent_chatbot_ui, eb_agent_chat_history_st, eb_agent_chat_input_ui, eb_agent_status_md, eb_agent_schema_display_md] | |
if not EB_AGENT_AVAILABLE or not os.getenv('GEMINI_API_KEY'): | |
no_key_msg = "L'Agente AI non è disponibile. Assicurati che GEMINI_API_KEY sia configurata." | |
# Ensure chat_history_list is mutable if it comes from gr.State | |
current_chat_history = list(chat_history_list) if chat_history_list else [] | |
current_chat_history.append([user_message, no_key_msg]) | |
yield current_chat_history, current_chat_history, gr.update(value=""), gr.update(value=no_key_msg), gr.update(value="Nessuno schema disponibile.") | |
return | |
current_chat_history = list(chat_history_list) if chat_history_list else [] | |
if not user_message.strip(): | |
yield current_chat_history, current_chat_history, gr.update(value=""), gr.update(value="Stato Agente: Per favore, inserisci una domanda."), gr.update() # No change to schema display | |
return | |
status_update_msg = "Stato Agente: Elaborazione della tua richiesta..." | |
# Show user message immediately, update status | |
# Add user message to current history before yielding | |
pending_history = current_chat_history + [[user_message, None]] | |
yield pending_history, pending_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update() | |
# Prepare DataFrames for the agent | |
df_follower_stats = current_token_state.get("bubble_follower_stats_df", pd.DataFrame()) | |
df_posts = current_token_state.get("bubble_posts_df", pd.DataFrame()) | |
df_post_stats = current_token_state.get("bubble_post_stats_df", pd.DataFrame()) | |
df_mentions = current_token_state.get("bubble_mentions_df", pd.DataFrame()) | |
dataframes_for_agent = { | |
"follower_stats": df_follower_stats.copy() if not df_follower_stats.empty else pd.DataFrame(columns=['no_data_follower_stats']), | |
"posts": df_posts.copy() if not df_posts.empty else pd.DataFrame(columns=['no_data_posts']), | |
"post_stats": df_post_stats.copy() if not df_post_stats.empty else pd.DataFrame(columns=['no_data_post_stats']), | |
"mentions": df_mentions.copy() if not df_mentions.empty else pd.DataFrame(columns=['no_data_mentions']) | |
} | |
schemas_text_for_display = "Schemi DataFrames inviati all'Agente:\n\n" | |
from eb_agent_module import get_all_schemas_representation # Assuming this is correctly imported in your main file | |
schemas_text_for_display += get_all_schemas_representation(dataframes_for_agent) # Using the mock or your actual function | |
max_schema_display_len = 1500 | |
if len(schemas_text_for_display) > max_schema_display_len: | |
schemas_text_for_display = schemas_text_for_display[:max_schema_display_len] + "\n...(schemi troncati per la visualizzazione)" | |
current_agent = EmployerBrandingAgent( # Using the mock or your actual class | |
llm_model_name=EB_AGENT_LLM_MODEL, | |
generation_config_dict=EB_AGENT_GEN_CONFIG, | |
safety_settings_list=EB_AGENT_SAFETY_SETTINGS, | |
all_dataframes=dataframes_for_agent, | |
embedding_model_name=EB_AGENT_EMBEDDING_MODEL | |
) | |
agent_internal_history = [] | |
for user_q, ai_r_obj in current_chat_history: # Iterate over the current history being built | |
if user_q: agent_internal_history.append({"role": "user", "content": user_q}) | |
# ai_r_obj could be string, tuple (text, image_url), or None | |
if ai_r_obj: | |
if isinstance(ai_r_obj, tuple): | |
# If it's a (text, image_url) tuple, take the text part for agent's history | |
# Or combine them if your agent can handle it. For simplicity, just text. | |
text_for_agent_history = ai_r_obj[0] if ai_r_obj[0] else "Visual media displayed." | |
agent_internal_history.append({"role": "model", "content": text_for_agent_history}) | |
elif isinstance(ai_r_obj, str): | |
agent_internal_history.append({"role": "model", "content": ai_r_obj}) | |
# ADD THE CURRENT USER MESSAGE TO THE AGENT'S HISTORY | |
agent_internal_history.append({"role": "user", "content": user_message}) | |
current_agent.chat_history = agent_internal_history | |
try: | |
init_success = await current_agent.initialize() | |
if not init_success: | |
error_msg = "Errore: Impossibile inizializzare l'agente AI." | |
updated_history = current_chat_history + [[user_message, error_msg]] | |
yield updated_history, updated_history, gr.update(value=""), gr.update(value="Stato Agente: Errore di inizializzazione"), gr.update(value=schemas_text_for_display) | |
return | |
logging.info(f"Sending to EB Agent. User: '{user_message}'. DF Keys: {list(dataframes_for_agent.keys())}") | |
# ai_response_dict is what the agent returns. Based on error, it's {'text': 'blob...'} | |
ai_response_dict = await current_agent.process_query(user_query=user_message) | |
bot_message_for_display = "Error: Agent returned an unexpected response." # Default | |
if isinstance(ai_response_dict, dict): | |
combined_message_blob = ai_response_dict.get("text") | |
if isinstance(combined_message_blob, str): | |
text_part = combined_message_blob | |
image_data_url = None | |
# Attempt to parse image data URL from the combined_message_blob | |
# This assumes the image data URL, if present, is on its own line or at the end. | |
lines = combined_message_blob.splitlines() | |
if lines: | |
possible_image_prefixes = [ | |
"data:image/png;base64,", | |
"data:image/jpeg;base64,", | |
"data:image/gif;base64,", | |
"data:image/webp;base64," | |
] | |
# Check lines from the end, as plot is likely at the end of the message | |
for i in range(len(lines) - 1, -1, -1): | |
current_line = lines[i].strip() | |
for prefix in possible_image_prefixes: | |
if current_line.startswith(prefix): | |
# Basic validation: check for typical base64 characters and some length | |
# This is a heuristic to ensure it's likely a valid base64 data string | |
if len(current_line) > len(prefix) + 20 and \ | |
all(c in "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=" for c in current_line[len(prefix):]): | |
image_data_url = current_line | |
# Reconstruct text_part from lines *before* this image line | |
text_part = "\n".join(lines[:i]).strip() | |
break # Found image prefix | |
if image_data_url: | |
break # Found image line | |
if image_data_url: | |
# If text_part became empty after extracting image, use None for text in tuple | |
bot_message_for_display = (text_part if text_part else None, image_data_url) | |
else: | |
# No image found or parsing failed, treat the whole blob as text | |
bot_message_for_display = combined_message_blob | |
else: | |
bot_message_for_display = "Agent returned a dictionary, but the 'text' field was not a string or was missing." | |
logging.warning(f"AI response dict 'text' field issue. Dict: {ai_response_dict}") | |
elif isinstance(ai_response_dict, str): # Agent returned a plain string | |
bot_message_for_display = ai_response_dict | |
else: # Fallback for other unexpected types | |
bot_message_for_display = f"Error: Agent returned an unexpected data type: {type(ai_response_dict)}." | |
logging.error(f"Unexpected AI response type: {type(ai_response_dict)}, content: {ai_response_dict}") | |
updated_history = current_chat_history + [[user_message, bot_message_for_display]] | |
status_update_msg = "Stato Agente: Risposta ricevuta." | |
yield updated_history, updated_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display) | |
except Exception as e: | |
logging.error(f"Error during EB Agent processing: {e}", exc_info=True) | |
error_msg_for_chat = f"# Errore dell'Agente AI:\n{type(e).__name__}: {str(e)}" | |
updated_history = current_chat_history + [[user_message, error_msg_for_chat]] | |
status_update_msg = f"Stato Agente: Errore - {type(e).__name__}" | |
yield updated_history, updated_history, gr.update(value=""), gr.update(value=status_update_msg), gr.update(value=schemas_text_for_display) | |
def clear_eb_agent_chat_history(): | |
initial_msg = "Ciao! Sono il tuo Agente AI per l'Employer Branding. Come posso aiutarti?" if EB_AGENT_AVAILABLE else "Agente AI non disponibile." | |
return [[None, initial_msg]], [[None, initial_msg]], "Stato Agente: Chat resettata." | |
# Connect UI to Handler for EB Agent | |
eb_agent_submit_btn.click( | |
fn=handle_eb_agent_chat, | |
inputs=[eb_agent_chat_input_ui, eb_agent_chat_history_st, token_state], | |
outputs=[eb_agent_chatbot_ui, eb_agent_chat_history_st, eb_agent_chat_input_ui, eb_agent_status_md, eb_agent_schema_display_md], | |
api_name="eb_agent_chat_submit" | |
) | |
eb_agent_chat_input_ui.submit( | |
fn=handle_eb_agent_chat, | |
inputs=[eb_agent_chat_input_ui, eb_agent_chat_history_st, token_state], | |
outputs=[eb_agent_chatbot_ui, eb_agent_chat_history_st, eb_agent_chat_input_ui, eb_agent_status_md, eb_agent_schema_display_md], | |
api_name="eb_agent_chat_enter" | |
) | |
eb_agent_clear_btn.click( | |
fn=clear_eb_agent_chat_history, | |
inputs=[], | |
outputs=[eb_agent_chatbot_ui, eb_agent_chat_history_st, eb_agent_status_md], | |
api_name="eb_agent_clear_chat" | |
) | |
# --- Sync Events (at the end of the app's 'with gr.Blocks()' context) --- | |
sync_event_part1 = sync_data_btn.click(fn=sync_all_linkedin_data_orchestrator, inputs=[token_state], outputs=[sync_status_html_output, token_state], show_progress="full") | |
sync_event_part2 = sync_event_part1.then(fn=process_and_store_bubble_token, inputs=[url_user_token_display, org_urn_display, token_state], outputs=[status_box, token_state, sync_data_btn], show_progress=False) | |
sync_event_part3 = sync_event_part2.then(fn=display_main_dashboard, inputs=[token_state], outputs=[dashboard_display_html], show_progress=False) | |
sync_event_final = sync_event_part3.then( | |
fn=refresh_all_analytics_ui_elements, # This is synchronous | |
inputs=[token_state, date_filter_selector, custom_start_date_picker, custom_end_date_picker, chat_histories_st], | |
outputs=apply_filter_and_sync_outputs_list, | |
show_progress="full" | |
) | |
if __name__ == "__main__": | |
if not os.environ.get(LINKEDIN_CLIENT_ID_ENV_VAR): logging.warning(f"ATTENZIONE: '{LINKEDIN_CLIENT_ID_ENV_VAR}' non impostata.") | |
if not all(os.environ.get(var) for var in [BUBBLE_APP_NAME_ENV_VAR, BUBBLE_API_KEY_PRIVATE_ENV_VAR, BUBBLE_API_ENDPOINT_ENV_VAR]): | |
logging.warning("ATTENZIONE: Variabili Bubble non impostate.") | |
if not EB_AGENT_AVAILABLE: | |
logging.error("L'Agente AI per l'Employer Branding non è disponibile a causa di errori di importazione.") | |
elif not os.getenv('GEMINI_API_KEY'): | |
logging.warning("ATTENZIONE: GEMINI_API_KEY non è impostata. L'Agente AI per l'Employer Branding potrebbe non funzionare.") | |
try: logging.info(f"Matplotlib: {matplotlib.__version__}, Backend: {matplotlib.get_backend()}") | |
except ImportError: logging.warning("Matplotlib non trovato.") | |
app.launch(server_name="0.0.0.0", server_port=7860, debug=True) | |