Spaces:
Sleeping
Sleeping
File size: 10,244 Bytes
adb3bbe b560569 896ae69 7a4c907 f7fc39b a9b7f24 d252c6d adb3bbe 538b42b f7fc39b 3038c7b f97b21b 493ca9b 3038c7b b560569 3038c7b adb3bbe 7a4c907 3038c7b adb3bbe 3038c7b 7a4c907 3038c7b 7a4c907 3038c7b 7a4c907 3038c7b 7a4c907 3038c7b 7a4c907 3038c7b adb3bbe 7a4c907 adb3bbe 8a531f0 3038c7b 4cc3230 f7fc39b 4cc3230 7a4c907 6d43d2f 3038c7b adb3bbe 6d43d2f 4cc3230 3038c7b f7fc39b cb4dce3 7a4c907 cb4dce3 b8b7e00 538b42b adb3bbe 3038c7b adb3bbe 3038c7b adb3bbe f7fc39b a9b7f24 3038c7b a9b7f24 f7fc39b a9b7f24 f7fc39b 7a4c907 f7fc39b 3038c7b 7a4c907 73e88eb f7fc39b 3038c7b a9b7f24 7a4c907 3038c7b adb3bbe f7fc39b adb3bbe 7a4c907 adb3bbe 7ab0240 adb3bbe 4cc3230 f7fc39b 4cc3230 a9b7f24 f7fc39b 88d3a6e f7fc39b 2051c7a f7fc39b f466d89 f7fc39b 6d43d2f f7fc39b a9b7f24 adb3bbe 7a4c907 f7fc39b adb3bbe 06d22e5 538b42b f7fc39b 538b42b 7a4c907 b8b7e00 538b42b adb3bbe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
# -*- coding: utf-8 -*-
import gradio as gr
import json
import os # Added to access environment variables
# Assuming these custom modules exist in your project directory or Python path
from Data_Fetching_and_Rendering import fetch_and_render_dashboard
from analytics_fetch_and_rendering import fetch_and_render_analytics
from mentions_dashboard import generate_mentions_dashboard
# Import the function from your utils file
from gradio_utils import get_url_user_token
# Import the Bubble API call function (ensure filename matches: Bubble_API_Calls.py)
from Bubble_API_Calls import fetch_linkedin_token_from_bubble
# --- Session State dependent functions ---
def check_token_status(current_token_state):
"""Checks if a valid token exists in the session state."""
if current_token_state and current_token_state.get("token") and current_token_state.get("status"):
return "β
Token available"
return "β Waiting for tokenβ¦"
def get_active_client_id(current_token_state):
"""Gets the client_id from the session state if a token is available."""
if current_token_state and current_token_state.get("token") and current_token_state.get("status"):
return current_token_state.get("client_id", "Client ID not set")
return ""
# --- Function to process and store token from Bubble ---
def process_and_store_bubble_token(url_user_token_str, current_token_state):
"""
Fetches token from Bubble, loads client_id from env, updates session state,
and returns UI update values.
Args:
url_user_token_str: The token string extracted from the URL.
current_token_state: The current session state for the token.
Returns:
Tuple: (bubble_api_status_msg, overall_status, client_id_display, updated_token_state)
"""
bubble_api_status_msg = "Waiting for URL token..."
# Ensure new_token_state is a new dictionary, not a reference to current_token_state
new_token_state = current_token_state.copy() if current_token_state else {"status": False, "token": None, "client_id": None}
# Default to current state values unless explicitly changed
new_token_state["status"] = False # Assume failure until success
new_token_state["token"] = None
# new_token_state["client_id"] will be set or cleared based on env var
# Attempt to load Linkedin_client_id from environment variable
linkedin_client_id_from_env = os.environ.get("Linkedin_client_id")
if not linkedin_client_id_from_env:
bubble_api_status_msg = "β CRITICAL ERROR: 'Linkedin_client_id' environment variable not set."
print(bubble_api_status_msg)
new_token_state["client_id"] = "ENV VAR MISSING" # Indicate error in state
return bubble_api_status_msg, check_token_status(new_token_state), get_active_client_id(new_token_state), new_token_state
if not url_user_token_str or "not found" in url_user_token_str or "Could not access" in url_user_token_str:
bubble_api_status_msg = f"βΉοΈ No valid user token from URL to query Bubble. ({url_user_token_str})"
new_token_state["client_id"] = linkedin_client_id_from_env # Client ID is known, but no token
return bubble_api_status_msg, check_token_status(new_token_state), get_active_client_id(new_token_state), new_token_state
print(f"Attempting to fetch token from Bubble with state: {url_user_token_str}")
parsed_token_dict = fetch_linkedin_token_from_bubble(url_user_token_str)
if parsed_token_dict and isinstance(parsed_token_dict, dict) and "access_token" in parsed_token_dict:
new_token_state["status"] = True
new_token_state["token"] = parsed_token_dict
new_token_state["client_id"] = linkedin_client_id_from_env # Use client_id from env var
bubble_api_status_msg = f"β
Token successfully fetched from Bubble for state: {url_user_token_str}. Client ID loaded."
print(bubble_api_status_msg)
else:
bubble_api_status_msg = f"β Failed to fetch a valid token from Bubble for state: {url_user_token_str}. Check console logs from Bubble_API_Calls.py."
print(bubble_api_status_msg)
# Token fetch failed, status remains False, token remains None
new_token_state["client_id"] = linkedin_client_id_from_env # Client ID is known, but no token
return bubble_api_status_msg, check_token_status(new_token_state), get_active_client_id(new_token_state), new_token_state
# --- Guarded fetch functions (now use token_state) ---
def guarded_fetch_dashboard(current_token_state):
if not (current_token_state and current_token_state.get("status") and current_token_state.get("token")):
return "<p style='color:red; text-align:center;'>β Access denied. No token available.</p>"
html = fetch_and_render_dashboard(
current_token_state.get("client_id"), # Use .get for safety
current_token_state.get("token")
)
return html
def guarded_fetch_analytics(current_token_state):
if not (current_token_state and current_token_state.get("status") and current_token_state.get("token")):
return (
"<p style='color:red; text-align:center;'>β Access denied. No token available.</p>",
None, None, None, None, None, None, None
)
client_id = current_token_state.get("client_id")
token_data = current_token_state.get("token")
count_md, plot, growth_plot, avg_post_eng_rate, interaction_metrics, eb_metrics, mentions_vol_metrics, mentions_sentiment_metrics = fetch_and_render_analytics(
client_id,
token_data
)
return count_md, plot, growth_plot, avg_post_eng_rate, interaction_metrics, eb_metrics, mentions_vol_metrics, mentions_sentiment_metrics
def run_mentions_and_load(current_token_state):
if not (current_token_state and current_token_state.get("status") and current_token_state.get("token")):
return ("<p style='color:red; text-align:center;'>β Access denied. No token available.</p>", None)
html, fig = generate_mentions_dashboard(
current_token_state.get("client_id"),
current_token_state.get("token")
)
return html, fig
# --- Build the Gradio UI ---
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
title="LinkedIn Post Viewer & Analytics") as app:
token_state = gr.State(value={"status": False, "token": None, "client_id": None})
gr.Markdown("# π LinkedIn Organization Post Viewer & Analytics")
gr.Markdown("Token is supplied via URL parameter for Bubble.io lookup. Then explore dashboard and analytics.")
url_user_token_display = gr.Textbox(
label="User Token (from URL - Hidden)",
interactive=False,
placeholder="Attempting to load from URL...",
visible=False
)
bubble_api_status_display = gr.Textbox(label="Bubble API Call Status", interactive=False, placeholder="Waiting for URL token...")
status_box = gr.Textbox(label="Overall Token Status", interactive=False)
client_display = gr.Textbox(label="Client ID (Active)", interactive=False)
app.load(
fn=get_url_user_token,
inputs=None,
outputs=[url_user_token_display]
)
url_user_token_display.change(
fn=process_and_store_bubble_token,
inputs=[url_user_token_display, token_state],
outputs=[bubble_api_status_display, status_box, client_display, token_state]
)
app.load(fn=check_token_status, inputs=[token_state], outputs=status_box)
app.load(fn=get_active_client_id, inputs=[token_state], outputs=client_display)
timer = gr.Timer(5.0)
timer.tick(fn=check_token_status, inputs=[token_state], outputs=status_box)
timer.tick(fn=get_active_client_id, inputs=[token_state], outputs=client_display)
with gr.Tabs():
with gr.TabItem("1οΈβ£ Dashboard"):
gr.Markdown("View your organization's recent posts and their engagement statistics.")
fetch_dashboard_btn = gr.Button("π Fetch Posts & Stats", variant="primary")
dashboard_html = gr.HTML(value="<p style='text-align: center; color: #555;'>Waiting for token...</p>")
fetch_dashboard_btn.click(
fn=guarded_fetch_dashboard,
inputs=[token_state],
outputs=[dashboard_html]
)
with gr.TabItem("2οΈβ£ Analytics"):
gr.Markdown("View follower count and monthly gains for your organization.")
fetch_analytics_btn = gr.Button("π Fetch Follower Analytics", variant="primary")
follower_count = gr.Markdown("<p style='text-align: center; color: #555;'>Waiting for token...</p>")
with gr.Row():
follower_plot = gr.Plot(visible=True)
growth_rate_plot = gr.Plot(visible=True)
with gr.Row():
post_eng_rate_plot = gr.Plot(visible=True)
with gr.Row():
interaction_data = gr.Plot(visible=True)
with gr.Row():
eb_data = gr.Plot(visible=True)
with gr.Row():
mentions_vol_data = gr.Plot(visible=True)
mentions_sentiment_data = gr.Plot(visible=True)
fetch_analytics_btn.click(
fn=guarded_fetch_analytics,
inputs=[token_state],
outputs=[follower_count, follower_plot, growth_rate_plot, post_eng_rate_plot, interaction_data, eb_data, mentions_vol_data, mentions_sentiment_data]
)
with gr.TabItem("3οΈβ£ Mentions"):
gr.Markdown("Analyze sentiment of recent posts that mention your organization.")
fetch_mentions_btn = gr.Button("π§ Fetch Mentions & Sentiment", variant="primary")
mentions_html = gr.HTML(value="<p style='text-align: center; color: #555;'>Waiting for token...</p>")
mentions_plot = gr.Plot(visible=True)
fetch_mentions_btn.click(
fn=run_mentions_and_load,
inputs=[token_state],
outputs=[mentions_html, mentions_plot]
)
if __name__ == "__main__":
app.launch(server_name="0.0.0.0", server_port=7860, share=True)
|