Spaces:
Running
Running
File size: 10,072 Bytes
adb3bbe b560569 896ae69 7a4c907 f7fc39b a9b7f24 d252c6d adb3bbe 538b42b f7fc39b 179ea1f 3038c7b f97b21b 493ca9b 3038c7b 179ea1f 3038c7b 179ea1f b560569 3038c7b adb3bbe 179ea1f 7a4c907 3038c7b 179ea1f adb3bbe 3038c7b 179ea1f 7a4c907 179ea1f fc7a7e4 179ea1f 3038c7b 179ea1f fc7a7e4 3038c7b 179ea1f 3038c7b 179ea1f 3038c7b 179ea1f 3038c7b 179ea1f 3038c7b 179ea1f fc7a7e4 3038c7b 179ea1f 3038c7b 179ea1f 3038c7b adb3bbe 7a4c907 179ea1f adb3bbe 8a531f0 3038c7b 179ea1f 4cc3230 f7fc39b 179ea1f 4cc3230 7a4c907 6d43d2f 3038c7b adb3bbe 6d43d2f 4cc3230 3038c7b 179ea1f f7fc39b cb4dce3 7a4c907 179ea1f cb4dce3 b8b7e00 538b42b adb3bbe 179ea1f adb3bbe 3038c7b adb3bbe f7fc39b 179ea1f f7fc39b 179ea1f f7fc39b a9b7f24 179ea1f f7fc39b a9b7f24 179ea1f f7fc39b 179ea1f f7fc39b 3038c7b 179ea1f 73e88eb 179ea1f 3038c7b 179ea1f 3038c7b 179ea1f adb3bbe f7fc39b adb3bbe 179ea1f adb3bbe 7ab0240 adb3bbe 179ea1f f7fc39b 179ea1f a9b7f24 179ea1f f7fc39b 88d3a6e f7fc39b 2051c7a f7fc39b f466d89 f7fc39b 6d43d2f f7fc39b 179ea1f adb3bbe 179ea1f f7fc39b adb3bbe 06d22e5 538b42b f7fc39b 538b42b 179ea1f b8b7e00 538b42b adb3bbe 179ea1f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
# -*- coding: utf-8 -*-
import gradio as gr
import json
import os # Added to access environment variables
# Assuming these custom modules exist in your project directory or Python path
from Data_Fetching_and_Rendering import fetch_and_render_dashboard
from analytics_fetch_and_rendering import fetch_and_render_analytics
from mentions_dashboard import generate_mentions_dashboard
# Import the function from your utils file
from gradio_utils import get_url_user_token
# Import the Bubble API call function (ensure filename matches: Bubble_API_Calls.py)
from Bubble_API_Calls import fetch_linkedin_token_from_bubble
# --- Session State dependent functions ---
def check_token_status(current_token_state):
"""Checks if a token exists in the session state."""
# Only check for the presence of the token
if current_token_state and current_token_state.get("token"):
return "β
Token available"
return "β Token not available" # Changed message for clarity
# --- Function to process and store token from Bubble ---
def process_and_store_bubble_token(url_user_token_str, current_token_state):
"""
Fetches token from Bubble, loads client_id from env, updates session state,
and returns UI update values.
Args:
url_user_token_str: The token string extracted from the URL.
current_token_state: The current session state for the token.
Returns:
Tuple: (bubble_api_status_msg, overall_token_status_msg, updated_token_state)
"""
bubble_api_status_msg = "Waiting for URL token..."
# Initialize new_token_state, removing the 'status' field
new_token_state = current_token_state.copy() if current_token_state else {"token": None, "client_id": None}
new_token_state["token"] = None # Assume no token until successfully fetched
# Attempt to load Linkedin_client_id from environment variable
linkedin_client_id_from_env = os.environ.get("Linkedin_client_id")
if not linkedin_client_id_from_env:
bubble_api_status_msg = "β CRITICAL ERROR: 'Linkedin_client_id' environment variable not set."
print(bubble_api_status_msg)
new_token_state["client_id"] = "ENV VAR MISSING" # Indicate error in state
# Return values: bubble_api_status, overall_token_status, new_token_state
return check_token_status(new_token_state), new_token_state
# Store client_id from env in the state, regardless of token outcome (if env var exists)
new_token_state["client_id"] = linkedin_client_id_from_env
if not url_user_token_str or "not found" in url_user_token_str or "Could not access" in url_user_token_str:
bubble_api_status_msg = f"βΉοΈ No valid user token from URL to query Bubble. ({url_user_token_str})"
# Client ID is known (if env var was found), but no token to fetch
return check_token_status(new_token_state), new_token_state
print(f"Attempting to fetch token from Bubble with user token: {url_user_token_str}") # Changed "state" to "user token" for clarity
parsed_token_dict = fetch_linkedin_token_from_bubble(url_user_token_str)
if parsed_token_dict and isinstance(parsed_token_dict, dict) and "access_token" in parsed_token_dict:
new_token_state["token"] = parsed_token_dict # Store the actual token
bubble_api_status_msg = f"β
Token successfully fetched from Bubble for user token: {url_user_token_str}. Client ID loaded."
print(bubble_api_status_msg)
else:
bubble_api_status_msg = f"β Failed to fetch a valid token from Bubble for user token: {url_user_token_str}. Check console logs from Bubble_API_Calls.py."
print(bubble_api_status_msg)
# Token fetch failed, token remains None. Client_id is already set if env var was found.
# Return values: bubble_api_status, overall_token_status, new_token_state
return check_token_status(new_token_state), new_token_state
# --- Guarded fetch functions (now use token_state, checking only for token presence) ---
def guarded_fetch_dashboard(current_token_state):
# Check only for the presence of the token
if not (current_token_state and current_token_state.get("token")):
return "<p style='color:red; text-align:center;'>β Access denied. No token available.</p>"
html = fetch_and_render_dashboard(
current_token_state.get("client_id"), # Use .get for safety
current_token_state.get("token")
)
return html
def guarded_fetch_analytics(current_token_state):
# Check only for the presence of the token
if not (current_token_state and current_token_state.get("token")):
return (
"<p style='color:red; text-align:center;'>β Access denied. No token available.</p>",
None, None, None, None, None, None, None
)
client_id = current_token_state.get("client_id")
token_data = current_token_state.get("token")
count_md, plot, growth_plot, avg_post_eng_rate, interaction_metrics, eb_metrics, mentions_vol_metrics, mentions_sentiment_metrics = fetch_and_render_analytics(
client_id,
token_data
)
return count_md, plot, growth_plot, avg_post_eng_rate, interaction_metrics, eb_metrics, mentions_vol_metrics, mentions_sentiment_metrics
def run_mentions_and_load(current_token_state):
# Check only for the presence of the token
if not (current_token_state and current_token_state.get("token")):
return ("<p style='color:red; text-align:center;'>β Access denied. No token available.</p>", None)
html, fig = generate_mentions_dashboard(
current_token_state.get("client_id"),
current_token_state.get("token")
)
return html, fig
# --- Build the Gradio UI ---
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
title="LinkedIn Post Viewer & Analytics") as app:
# Modified token_state: removed 'status', client_id will be populated from env
token_state = gr.State(value={"token": None, "client_id": None})
gr.Markdown("# π LinkedIn Organization Post Viewer & Analytics")
gr.Markdown("Token is supplied via URL parameter for Bubble.io lookup. Then explore dashboard and analytics.")
url_user_token_display = gr.Textbox(
label="User Token (from URL - Hidden)",
interactive=False,
placeholder="Attempting to load from URL...",
visible=False # Kept hidden as per original logic
)
status_box = gr.Textbox(label="Overall Token Status", interactive=False, placeholder="Waiting for token check...") # Added placeholder
app.load(
fn=get_url_user_token,
inputs=None,
outputs=[url_user_token_display]
)
# Modified outputs for process_and_store_bubble_token
url_user_token_display.change(
fn=process_and_store_bubble_token,
inputs=[url_user_token_display, token_state],
outputs=[status_box, token_state] # Removed client_display
)
# app.load to initialize status_box based on initial token_state
app.load(fn=check_token_status, inputs=[token_state], outputs=status_box)
# Removed app.load for get_active_client_id and client_display
timer = gr.Timer(5.0)
timer.tick(fn=check_token_status, inputs=[token_state], outputs=status_box)
# Removed timer.tick for get_active_client_id and client_display
with gr.Tabs():
with gr.TabItem("1οΈβ£ Dashboard"):
gr.Markdown("View your organization's recent posts and their engagement statistics.")
fetch_dashboard_btn = gr.Button("π Fetch Posts & Stats", variant="primary")
dashboard_html = gr.HTML(value="<p style='text-align: center; color: #555;'>Waiting for token...</p>")
fetch_dashboard_btn.click(
fn=guarded_fetch_dashboard,
inputs=[token_state],
outputs=[dashboard_html]
)
with gr.TabItem("2οΈβ£ Analytics"):
gr.Markdown("View follower count and monthly gains for your organization.")
fetch_analytics_btn = gr.Button("π Fetch Follower Analytics", variant="primary")
follower_count = gr.Markdown("<p style='text-align: center; color: #555;'>Waiting for token...</p>")
with gr.Row():
follower_plot = gr.Plot(visible=True)
growth_rate_plot = gr.Plot(visible=True)
with gr.Row():
post_eng_rate_plot = gr.Plot(visible=True)
with gr.Row():
interaction_data = gr.Plot(visible=True)
with gr.Row():
eb_data = gr.Plot(visible=True)
with gr.Row():
mentions_vol_data = gr.Plot(visible=True)
mentions_sentiment_data = gr.Plot(visible=True)
fetch_analytics_btn.click(
fn=guarded_fetch_analytics,
inputs=[token_state],
outputs=[follower_count, follower_plot, growth_rate_plot, post_eng_rate_plot, interaction_data, eb_data, mentions_vol_data, mentions_sentiment_data]
)
with gr.TabItem("3οΈβ£ Mentions"):
gr.Markdown("Analyze sentiment of recent posts that mention your organization.")
fetch_mentions_btn = gr.Button("π§ Fetch Mentions & Sentiment", variant="primary")
mentions_html = gr.HTML(value="<p style='text-align: center; color: #555;'>Waiting for token...</p>")
mentions_plot = gr.Plot(visible=True)
fetch_mentions_btn.click(
fn=run_mentions_and_load,
inputs=[token_state],
outputs=[mentions_html, mentions_plot]
)
if __name__ == "__main__":
# Ensure the Linkedin_client_id environment variable is set before launching.
# You might want to add a check here and print a warning if it's not set.
if not os.environ.get("Linkedin_client_id"):
print("WARNING: The 'Linkedin_client_id' environment variable is not set. The application may not function correctly.")
app.launch(server_name="0.0.0.0", server_port=7860, share=True) |